hexsha
stringlengths 40
40
| size
int64 3
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
972
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
972
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
972
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 3
1.03M
| avg_line_length
float64 1.13
941k
| max_line_length
int64 2
941k
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3f228e2a2cc32627b2b7f41aaf38967a78547e66
| 694
|
py
|
Python
|
setup.py
|
surajpandey11/Tweetpy
|
785ee31b3ed58a7ad5c13cc9fea0080d93eba0b6
|
[
"MIT"
] | null | null | null |
setup.py
|
surajpandey11/Tweetpy
|
785ee31b3ed58a7ad5c13cc9fea0080d93eba0b6
|
[
"MIT"
] | null | null | null |
setup.py
|
surajpandey11/Tweetpy
|
785ee31b3ed58a7ad5c13cc9fea0080d93eba0b6
|
[
"MIT"
] | null | null | null |
import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="Tweet_Assignment", # Replace with your own username
version="0.0.1",
author="Suraj Pandey",
author_email="soorajhjcms@gmail.com",
description="Assignment On Python package",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"": "src"},
packages=setuptools.find_packages(where="src"),
python_requires=">=3.6",
)
| 30.173913
| 61
| 0.665706
|
59e0165f60ff7471921590624d8f25300c2abc70
| 1,023
|
py
|
Python
|
workers/arch_space/util/SGDR.py
|
arberzela/EfficientNAS
|
cf62e0ba4c0e8a509f6701e4989dec72a42b5202
|
[
"MIT"
] | 51
|
2018-07-24T03:14:00.000Z
|
2021-12-23T10:30:07.000Z
|
workers/arch_space/util/SGDR.py
|
automl/EfficientNAS
|
b5e9c8bf40ea29bb410d272efed1aeb25056c119
|
[
"MIT"
] | 1
|
2018-12-22T14:08:18.000Z
|
2018-12-27T20:51:13.000Z
|
workers/arch_space/util/SGDR.py
|
arberzela/EfficientNAS
|
cf62e0ba4c0e8a509f6701e4989dec72a42b5202
|
[
"MIT"
] | 6
|
2018-08-03T14:30:34.000Z
|
2020-02-25T02:53:05.000Z
|
import math
from torch.optim.lr_scheduler import _LRScheduler
class CosineAnnealingRestartsLR(_LRScheduler):
def __init__(self, optimizer, num_batches, T_max, T_mul=1, eta_min=0, last_epoch=-1):
'''
Here last_epoch actually means last_step since the
learning rate is decayed after each batch step.
'''
self.T_max = T_max
self.T_mul = T_mul
self.eta_min = eta_min
self.num_batches = num_batches
super(CosineAnnealingRestartsLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
'''
Override this method to the existing get_lr() of the parent class
'''
if self.last_epoch == self.num_batches * self.T_max:
self.T_max = self.T_max * self.T_mul
self.last_epoch = 0
return [self.eta_min + (base_lr - self.eta_min) *
(1 + math.cos(math.pi * self.last_epoch / (self.T_max * self.num_batches))) / 2
for base_lr in self.base_lrs]
| 36.535714
| 103
| 0.622678
|
c79db67177fffea628502f8a7fc9b31516d96ff1
| 1,856
|
py
|
Python
|
tests/assignment/test_models.py
|
DerPate/OpenSlides
|
2733a47d315fec9b8f3cb746fd5f3739be225d65
|
[
"MIT"
] | 1
|
2015-03-22T02:07:23.000Z
|
2015-03-22T02:07:23.000Z
|
tests/assignment/test_models.py
|
frauenknecht/OpenSlides
|
6521d6b095bca33dc0c5f09f59067551800ea1e3
|
[
"MIT"
] | null | null | null |
tests/assignment/test_models.py
|
frauenknecht/OpenSlides
|
6521d6b095bca33dc0c5f09f59067551800ea1e3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.test.client import Client
from openslides.agenda.models import Item, Speaker
from openslides.assignment.models import Assignment
from openslides.participant.models import User
from openslides.utils.test import TestCase
class AssignmentModelTest(TestCase):
def setUp(self):
# Admin
self.admin = User.objects.get(pk=1)
self.admin_client = Client()
self.admin_client.login(username='admin', password='admin')
def test_delete_with_related_item(self):
assignment = Assignment.objects.create(name='assignment_name_fgdhensbch34zfu1284ds', posts=1)
response = self.admin_client.get('/assignment/1/agenda/')
self.assertRedirects(response, '/agenda/')
self.assertEqual(Item.objects.get(pk=1).get_title(), 'assignment_name_fgdhensbch34zfu1284ds')
assignment.delete()
self.assertTrue(Item.objects.filter(pk=1).exists())
def test_begin_speach(self):
assignment = Assignment.objects.create(name='test_assignment_gjbnchs4620sdfhjfsksj1', posts=1)
item = Item.objects.create(content_object=assignment)
person_1 = User.objects.create(username='user_1_bnhdjgd8747djcbjd8fg')
person_2 = User.objects.create(username='user_2_qmlkohid6qvx5q0fbmh9')
person_3 = User.objects.create(username='user_3_nbjf74jf9bjag219ou96')
assignment.run(person_1, person_1)
assignment.run(person_2, person_2)
assignment.run(person_3, person_3)
Speaker.objects.add(person_1, item)
self.assertEqual(item.speaker_set.count(), 1)
assignment.gen_poll()
self.assertTrue(item.speaker_set.filter(person=person_1).exists())
self.assertTrue(item.speaker_set.filter(person=person_2).exists())
self.assertTrue(item.speaker_set.filter(person=person_3).exists())
| 44.190476
| 102
| 0.724677
|
0cb7efc39648e641dce040773cdfd4d73d1b5d5b
| 3,766
|
py
|
Python
|
classy_vision/hooks/__init__.py
|
Sandbox3aster/ClassyVision
|
b499a892cb62702e2a07a27bd374379d4d83e7d9
|
[
"MIT"
] | 1
|
2021-04-09T08:32:22.000Z
|
2021-04-09T08:32:22.000Z
|
classy_vision/hooks/__init__.py
|
tullie/ClassyVision-1
|
b6202d6323431203997039a6768762811cb7215f
|
[
"MIT"
] | null | null | null |
classy_vision/hooks/__init__.py
|
tullie/ClassyVision-1
|
b6202d6323431203997039a6768762811cb7215f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
from pathlib import Path
from typing import Any, Dict, List
from classy_vision.generic.registry_utils import import_all_modules
from .constants import ClassyHookFunctions # isort:skip
from .classy_hook import ClassyHook # isort:skip
FILE_ROOT = Path(__file__).parent
HOOK_REGISTRY = {}
HOOK_CLASS_NAMES = set()
def register_hook(name):
"""Registers a :class:`ClassyHook` subclass.
This decorator allows Classy Vision to instantiate a subclass of
:class:`ClassyHook` from a configuration file, even if the class
itself is not part of the base Classy Vision framework. To use it,
apply this decorator to a ClassyHook subclass, like this:
.. code-block:: python
@register_hook('custom_hook')
class CustomHook(ClassyHook):
...
To instantiate a hook from a configuration file, see
:func:`build_hook`.
"""
def register_hook_cls(cls):
if name in HOOK_REGISTRY:
raise ValueError("Cannot register duplicate hook ({})".format(name))
if not issubclass(cls, ClassyHook):
raise ValueError(
"Hook ({}: {}) must extend ClassyHook".format(name, cls.__name__)
)
if cls.__name__ in HOOK_CLASS_NAMES:
raise ValueError(
"Cannot register hook with duplicate class name ({})".format(
cls.__name__
)
)
HOOK_REGISTRY[name] = cls
HOOK_CLASS_NAMES.add(cls.__name__)
return cls
return register_hook_cls
def build_hooks(hook_configs: List[Dict[str, Any]]):
return [build_hook(config) for config in hook_configs]
def build_hook(hook_config: Dict[str, Any]):
"""Builds a ClassyHook from a config.
This assumes a 'name' key in the config which is used to determine
what hook class to instantiate. For instance, a config `{"name":
"my_hook", "foo": "bar"}` will find a class that was registered as
"my_hook" (see :func:`register_hook`) and call .from_config on
it."""
assert hook_config["name"] in HOOK_REGISTRY, (
"Unregistered hook. Did you make sure to use the register_hook decorator "
"AND import the hook file before calling this function??"
)
hook_config = copy.deepcopy(hook_config)
hook_name = hook_config.pop("name")
return HOOK_REGISTRY[hook_name].from_config(hook_config)
# automatically import any Python files in the hooks/ directory
import_all_modules(FILE_ROOT, "classy_vision.hooks")
from .checkpoint_hook import CheckpointHook # isort:skip
from .torchscript_hook import TorchscriptHook # isort:skip
from .exponential_moving_average_model_hook import ( # isort:skip
ExponentialMovingAverageModelHook,
)
from .loss_lr_meter_logging_hook import LossLrMeterLoggingHook # isort:skip
from .model_complexity_hook import ModelComplexityHook # isort:skip
from .model_tensorboard_hook import ModelTensorboardHook # isort:skip
from .profiler_hook import ProfilerHook # isort:skip
from .progress_bar_hook import ProgressBarHook # isort:skip
from .tensorboard_plot_hook import TensorboardPlotHook # isort:skip
from .visdom_hook import VisdomHook # isort:skip
__all__ = [
"build_hooks",
"build_hook",
"register_hook",
"CheckpointHook",
"ClassyHook",
"ClassyHookFunctions",
"ExponentialMovingAverageModelHook",
"LossLrMeterLoggingHook",
"TensorboardPlotHook",
"TorchscriptHook",
"ModelComplexityHook",
"ModelTensorboardHook",
"ProfilerHook",
"ProgressBarHook",
"VisdomHook",
]
| 32.188034
| 82
| 0.709506
|
c6ee83e72881090dc3aa3c16056f368a3f32eba1
| 382
|
py
|
Python
|
songpay/mainapp/migrations/0003_arrangement_slug.py
|
malfin/SongPay
|
998a0e72410cd5484069edc79e09bf0e50d87aa2
|
[
"Apache-2.0"
] | null | null | null |
songpay/mainapp/migrations/0003_arrangement_slug.py
|
malfin/SongPay
|
998a0e72410cd5484069edc79e09bf0e50d87aa2
|
[
"Apache-2.0"
] | null | null | null |
songpay/mainapp/migrations/0003_arrangement_slug.py
|
malfin/SongPay
|
998a0e72410cd5484069edc79e09bf0e50d87aa2
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-12-01 06:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0002_arrangement_text'),
]
operations = [
migrations.AddField(
model_name='arrangement',
name='slug',
field=models.SlugField(blank=True),
),
]
| 20.105263
| 47
| 0.594241
|
f7c8cc1ecf0217ccf65a2bca851feff4821078cf
| 3,609
|
py
|
Python
|
src/losses/loss_functions.py
|
MehdiZouitine/Learning-Disentangled-Representations-via-Mutual-Information-Estimation
|
52952aff647a33b749b709cd7f0c3cd059c66b54
|
[
"MIT"
] | 25
|
2021-08-03T11:49:34.000Z
|
2022-03-27T13:57:18.000Z
|
src/losses/loss_functions.py
|
MehdiZouitine/Learning-Disentangled-Representations-via-Mutual-Information-Estimation
|
52952aff647a33b749b709cd7f0c3cd059c66b54
|
[
"MIT"
] | null | null | null |
src/losses/loss_functions.py
|
MehdiZouitine/Learning-Disentangled-Representations-via-Mutual-Information-Estimation
|
52952aff647a33b749b709cd7f0c3cd059c66b54
|
[
"MIT"
] | 3
|
2021-12-14T01:17:21.000Z
|
2022-01-30T03:47:46.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from src.utils.custom_typing import GanLossOutput, Tuple
class ClassifLoss(nn.Module):
"""Classifier loss"""
@staticmethod
def accuracy(y_pred, target):
return torch.sum(y_pred == target).float().mean()
def __init__(self):
super().__init__()
self.cross_entropy = nn.CrossEntropyLoss()
def __call__(
self, y_pred: torch.Tensor, target: torch.Tensor
) -> Tuple[float, float]:
"""Compute cross entropy loss
Args:
y_pred (torch.Tensor): Classifier prediction
target (torch.Tensor): Ground truth
Returns:
Tuple[float, float]: Error and accuracy over the current batch
"""
batch_size = y_pred.size(0)
classif_error = self.cross_entropy(
F.softmax(y_pred, dim=1), target.long()
).mean()
accuracy = self.accuracy(y_pred=torch.argmax(y_pred, dim=1), target=target)
return classif_error, accuracy / batch_size
class DJSLoss(nn.Module):
"""Jensen Shannon Divergence loss"""
def __init__(self) -> None:
super().__init__()
def __call__(self, T: torch.Tensor, T_prime: torch.Tensor) -> float:
"""Estimator of the Jensen Shannon Divergence see paper equation (2)
Args:
T (torch.Tensor): Statistique network estimation from the marginal distribution P(x)P(z)
T_prime (torch.Tensor): Statistique network estimation from the joint distribution P(xz)
Returns:
float: DJS estimation value
"""
joint_expectation = (-F.softplus(-T)).mean()
marginal_expectation = F.softplus(T_prime).mean()
mutual_info = joint_expectation - marginal_expectation
return -mutual_info
class DiscriminatorLoss(nn.Module):
"""Basic discriminator GAN loss """
def __init__(self) -> None:
super().__init__()
def __call__(self, real_logits: torch.Tensor, fake_logits: torch.Tensor) -> float:
"""Discriminator loss gan
Args:
real_logits (torch.Tensor): Sample from the real distribution here from P(Sx)P(Ex)
fake_logits (torch.Tensor): Sample from the fake (generated) distribution here from P(SxEx)
Returns:
float: Discriminator loss value
"""
# Discriminator should predict real logits as logits from the real distribution
discriminator_real = F.binary_cross_entropy_with_logits(
input=real_logits, target=torch.ones_like(real_logits)
)
# Discriminator should predict fake logits as logits from the generated distribution
discriminator_fake = F.binary_cross_entropy_with_logits(
input=fake_logits, target=torch.zeros_like(fake_logits)
)
discriminator_loss = discriminator_real.mean() + discriminator_fake.mean()
return discriminator_loss
class GeneratorLoss(nn.Module):
"""Basic generator GAN loss """
def __init__(self) -> None:
super().__init__()
def __call__(self, fake_logits: torch.Tensor) -> float:
"""Generator loss
Args:
fake_logits (torch.Tensor): Sample from the fake (generated) distribution here from P(SxEx)
Returns:
float: Generator loss value
"""
# Discriminator should generate fake logits that fool the discriminator
generator_loss = F.binary_cross_entropy_with_logits(
input=fake_logits, target=torch.ones_like(fake_logits)
)
return generator_loss
| 31.938053
| 103
| 0.650596
|
57a1d4d99dc867e0964b2e07155cc1ebb729f50a
| 7,404
|
py
|
Python
|
tests/test_add_option_enqueue.py
|
mamh-mixed/loguru
|
22396695b9d8be9a46eca371279df7c427b27f61
|
[
"MIT"
] | null | null | null |
tests/test_add_option_enqueue.py
|
mamh-mixed/loguru
|
22396695b9d8be9a46eca371279df7c427b27f61
|
[
"MIT"
] | null | null | null |
tests/test_add_option_enqueue.py
|
mamh-mixed/loguru
|
22396695b9d8be9a46eca371279df7c427b27f61
|
[
"MIT"
] | null | null | null |
import pickle
import re
import sys
import time
import pytest
from loguru import logger
from .conftest import default_threading_excepthook
class NotPicklable:
def __getstate__(self):
raise pickle.PicklingError("You shall not serialize me!")
def __setstate__(self, state):
pass
class NotUnpicklable:
def __getstate__(self):
return "..."
def __setstate__(self, state):
raise pickle.UnpicklingError("You shall not de-serialize me!")
class NotWritable:
def write(self, message):
if "fail" in message.record["extra"]:
raise RuntimeError("You asked me to fail...")
print(message, end="")
def test_enqueue():
x = []
def sink(message):
time.sleep(0.1)
x.append(message)
logger.add(sink, format="{message}", enqueue=True)
logger.debug("Test")
assert len(x) == 0
logger.complete()
assert len(x) == 1
assert x[0] == "Test\n"
def test_enqueue_with_exception():
x = []
def sink(message):
time.sleep(0.1)
x.append(message)
logger.add(sink, format="{message}", enqueue=True)
try:
1 / 0
except ZeroDivisionError:
logger.exception("Error")
assert len(x) == 0
logger.complete()
assert len(x) == 1
lines = x[0].splitlines()
assert lines[0] == "Error"
assert lines[-1] == "ZeroDivisionError: division by zero"
def test_caught_exception_queue_put(writer, capsys):
logger.add(writer, enqueue=True, catch=True, format="{message}")
logger.info("It's fine")
logger.bind(broken=NotPicklable()).info("Bye bye...")
logger.info("It's fine again")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert writer.read() == "It's fine\nIt's fine again\n"
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Bye bye.*\}", lines[1])
assert lines[-2].endswith("PicklingError: You shall not serialize me!")
assert lines[-1] == "--- End of logging error ---"
def test_caught_exception_queue_get(writer, capsys):
logger.add(writer, enqueue=True, catch=True, format="{message}")
logger.info("It's fine")
logger.bind(broken=NotUnpicklable()).info("Bye bye...")
logger.info("It's fine again")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert writer.read() == "It's fine\nIt's fine again\n"
assert out == ""
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert lines[1] == "Record was: None"
assert lines[-2].endswith("UnpicklingError: You shall not de-serialize me!")
assert lines[-1] == "--- End of logging error ---"
def test_caught_exception_sink_write(capsys):
logger.add(NotWritable(), enqueue=True, catch=True, format="{message}")
logger.info("It's fine")
logger.bind(fail=True).info("Bye bye...")
logger.info("It's fine again")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == "It's fine\nIt's fine again\n"
assert lines[0] == "--- Logging error in Loguru Handler #0 ---"
assert re.match(r"Record was: \{.*Bye bye.*\}", lines[1])
assert lines[-2] == "RuntimeError: You asked me to fail..."
assert lines[-1] == "--- End of logging error ---"
def test_not_caught_exception_queue_put(writer, capsys):
logger.add(writer, enqueue=True, catch=False, format="{message}")
logger.info("It's fine")
with pytest.raises(pickle.PicklingError, match=r"You shall not serialize me!"):
logger.bind(broken=NotPicklable()).info("Bye bye...")
logger.remove()
out, err = capsys.readouterr()
assert writer.read() == "It's fine\n"
assert out == ""
assert err == ""
def test_not_caught_exception_queue_get(writer, capsys):
logger.add(writer, enqueue=True, catch=False, format="{message}")
with default_threading_excepthook():
logger.info("It's fine")
logger.bind(broken=NotUnpicklable()).info("Bye bye...")
logger.info("It's not fine")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert writer.read() == "It's fine\n"
assert out == ""
assert lines[0].startswith("Exception")
assert lines[-1].endswith("UnpicklingError: You shall not de-serialize me!")
def test_not_caught_exception_sink_write(capsys):
logger.add(NotWritable(), enqueue=True, catch=False, format="{message}")
with default_threading_excepthook():
logger.info("It's fine")
logger.bind(fail=True).info("Bye bye...")
logger.info("It's not fine")
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == "It's fine\n"
assert lines[0].startswith("Exception")
assert lines[-1] == "RuntimeError: You asked me to fail..."
def test_not_caught_exception_sink_write_then_complete(capsys):
logger.add(NotWritable(), enqueue=True, catch=False, format="{message}")
with default_threading_excepthook():
logger.bind(fail=True).info("Bye bye...")
logger.complete()
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert out == ""
assert lines[0].startswith("Exception")
assert lines[-1] == "RuntimeError: You asked me to fail..."
def test_not_caught_exception_queue_get_then_complete(writer, capsys):
logger.add(writer, enqueue=True, catch=False, format="{message}")
with default_threading_excepthook():
logger.bind(broken=NotUnpicklable()).info("Bye bye...")
logger.complete()
logger.remove()
out, err = capsys.readouterr()
lines = err.strip().splitlines()
assert writer.read() == ""
assert out == ""
assert lines[0].startswith("Exception")
assert lines[-1].endswith("UnpicklingError: You shall not de-serialize me!")
def test_wait_for_all_messages_enqueued(capsys):
def slow_sink(message):
time.sleep(0.01)
sys.stderr.write(message)
logger.add(slow_sink, enqueue=True, catch=False, format="{message}")
for i in range(10):
logger.info(i)
logger.complete()
out, err = capsys.readouterr()
assert out == ""
assert err == "".join("%d\n" % i for i in range(10))
def test_logging_not_picklable_exception():
exception = None
def sink(message):
nonlocal exception
exception = message.record["exception"]
logger.add(sink, enqueue=True, catch=False)
try:
raise ValueError(NotPicklable())
except Exception:
logger.exception("Oups")
logger.remove()
type_, value, traceback_ = exception
assert type_ is ValueError
assert value is None
assert traceback_ is None
@pytest.mark.skip(reason="No way to safely deserialize exception yet")
def test_logging_not_unpicklable_exception():
exception = None
def sink(message):
nonlocal exception
exception = message.record["exception"]
logger.add(sink, enqueue=True, catch=False)
try:
raise ValueError(NotUnpicklable())
except Exception:
logger.exception("Oups")
logger.remove()
type_, value, traceback_ = exception
assert type_ is ValueError
assert value is None
assert traceback_ is None
| 27.626866
| 83
| 0.646542
|
edf636d77197bb51ea4c137595f5c16605d5dc8a
| 1,423
|
py
|
Python
|
lib/taurus/qt/qtgui/display/__init__.py
|
MikeFalowski/taurus
|
ef041bf35dd847caf08a7efbe072f4020d35522e
|
[
"CC-BY-3.0"
] | 1
|
2016-10-19T13:54:08.000Z
|
2016-10-19T13:54:08.000Z
|
lib/taurus/qt/qtgui/display/__init__.py
|
MikeFalowski/taurus
|
ef041bf35dd847caf08a7efbe072f4020d35522e
|
[
"CC-BY-3.0"
] | 27
|
2016-05-25T08:56:58.000Z
|
2019-01-21T09:18:08.000Z
|
lib/taurus/qt/qtgui/display/__init__.py
|
MikeFalowski/taurus
|
ef041bf35dd847caf08a7efbe072f4020d35522e
|
[
"CC-BY-3.0"
] | 8
|
2015-07-24T09:16:50.000Z
|
2018-06-12T12:33:59.000Z
|
#!/usr/bin/env python
#############################################################################
##
# This file is part of Taurus
##
# http://taurus-scada.org
##
# Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
# Taurus is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
##
# Taurus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
##
# You should have received a copy of the GNU Lesser General Public License
# along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
#############################################################################
"""This package contains a collection of taurus widgets designed to display taurus
information, typically in a read-only fashion (no user interaction is possible).
Examples of widgets that suite this rule are labels, leds and LCDs"""
__docformat__ = 'restructuredtext'
from .qfallback import *
from .qpixmapwidget import *
from .qled import *
from .qlogo import *
from .qsevensegment import *
from .tauruslabel import *
from .taurusled import *
from .tauruslcd import *
| 35.575
| 82
| 0.675334
|
eee2549a02df9b3bc5626cd6985804a88a0525fb
| 4,373
|
py
|
Python
|
sdks/python/appcenter_sdk/models/JavaScriptSolution.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | null | null | null |
sdks/python/appcenter_sdk/models/JavaScriptSolution.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 6
|
2019-10-23T06:38:53.000Z
|
2022-01-22T07:57:58.000Z
|
sdks/python/appcenter_sdk/models/JavaScriptSolution.py
|
Brantone/appcenter-sdks
|
eeb063ecf79908b6e341fb00196d2cd9dc8f3262
|
[
"MIT"
] | 2
|
2019-10-23T06:31:05.000Z
|
2021-08-21T17:32:47.000Z
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class JavaScriptSolution(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'package_json_path': 'string',
'react_native_version': 'string'
}
attribute_map = {
'package_json_path': 'package_json_path',
'react_native_version': 'react_native_version'
}
def __init__(self, package_json_path=None, react_native_version=None): # noqa: E501
"""JavaScriptSolution - a model defined in Swagger""" # noqa: E501
self._package_json_path = None
self._react_native_version = None
self.discriminator = None
if package_json_path is not None:
self.package_json_path = package_json_path
if react_native_version is not None:
self.react_native_version = react_native_version
@property
def package_json_path(self):
"""Gets the package_json_path of this JavaScriptSolution. # noqa: E501
The path to the detected package.json # noqa: E501
:return: The package_json_path of this JavaScriptSolution. # noqa: E501
:rtype: string
"""
return self._package_json_path
@package_json_path.setter
def package_json_path(self, package_json_path):
"""Sets the package_json_path of this JavaScriptSolution.
The path to the detected package.json # noqa: E501
:param package_json_path: The package_json_path of this JavaScriptSolution. # noqa: E501
:type: string
"""
self._package_json_path = package_json_path
@property
def react_native_version(self):
"""Gets the react_native_version of this JavaScriptSolution. # noqa: E501
Version of React Native from package.json files # noqa: E501
:return: The react_native_version of this JavaScriptSolution. # noqa: E501
:rtype: string
"""
return self._react_native_version
@react_native_version.setter
def react_native_version(self, react_native_version):
"""Sets the react_native_version of this JavaScriptSolution.
Version of React Native from package.json files # noqa: E501
:param react_native_version: The react_native_version of this JavaScriptSolution. # noqa: E501
:type: string
"""
self._react_native_version = react_native_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JavaScriptSolution):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.460432
| 103
| 0.618568
|
3d94b6cbef6f76350d8120c7e449ba3b6153adb1
| 3,116
|
py
|
Python
|
src/scancode/help.py
|
s4-2/scancode-toolkit
|
8931b42e2630b94d0cabc834dfb3c16f01f82321
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 1,511
|
2015-07-01T15:29:03.000Z
|
2022-03-30T13:40:05.000Z
|
src/scancode/help.py
|
s4-2/scancode-toolkit
|
8931b42e2630b94d0cabc834dfb3c16f01f82321
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 2,695
|
2015-07-01T16:01:35.000Z
|
2022-03-31T19:17:44.000Z
|
src/scancode/help.py
|
s4-2/scancode-toolkit
|
8931b42e2630b94d0cabc834dfb3c16f01f82321
|
[
"Apache-2.0",
"CC-BY-4.0"
] | 540
|
2015-07-01T15:08:19.000Z
|
2022-03-31T12:13:11.000Z
|
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
# FIXME: the glob story is very weird!!!
examples_text = '''
Scancode command lines examples:
(Note for Windows: use '\\' back slash instead of '/' forward slash for paths.)
Scan a single file for copyrights. Print scan results to stdout as pretty JSON:
scancode --copyright samples/zlib/zlib.h --json-pp -
Scan a single file for licenses, print verbose progress to stderr as each
file is scanned. Save scan to a JSON file:
scancode --license --verbose samples/zlib/zlib.h --json licenses.json
Scan a directory explicitly for licenses and copyrights. Redirect JSON scan
results to a file:
scancode --license --copyright samples/zlib/ --json - > scan.json
Scan a directory while ignoring a single file. Scan for license, copyright and
package manifests. Use four parallel processes.
Print scan results to stdout as pretty formatted JSON.
scancode -lc --package --ignore README --processes 4 --json-pp - samples/
Scan a directory while ignoring all files with .txt extension.
Print scan results to stdout as pretty formatted JSON.
It is recommended to use quotes around glob patterns to prevent pattern
expansion by the shell:
scancode --json-pp - --ignore "*.txt" samples/
Special characters supported in GLOB pattern:
- * matches everything
- ? matches any single character
- [seq] matches any character in seq
- [!seq] matches any character not in seq
For a literal match, wrap the meta-characters in brackets.
For example, '[?]' matches the character '?'.
For details on GLOB patterns see https://en.wikipedia.org/wiki/Glob_(programming).
Note: Glob patterns cannot be applied to path as strings.
For example, this will not ignore "samples/JGroups/licenses".
scancode --json - --ignore "samples*licenses" samples/
Scan a directory while ignoring multiple files (or glob patterns).
Print the scan results to stdout as JSON:
scancode --json - --ignore README --ignore "*.txt" samples/
Scan a directory for licenses and copyrights. Save scan results to an
HTML file:
scancode --license --copyright --html scancode_result.html samples/zlib
'''
epilog_text = '''Examples (use --examples for more):
\b
Scan the 'samples' directory for licenses and copyrights.
Save scan results to the 'scancode_result.json' JSON file:
scancode --license --copyright --json-pp scancode_result.json samples
\b
Scan the 'samples' directory for licenses and package manifests. Print scan
results on screen as pretty-formatted JSON (using the special '-' FILE to print
to on screen/to stdout):
scancode --json-pp - --license --package samples
Note: when you run scancode, a progress bar is displayed with a counter of the
number of files processed. Use --verbose to display file-by-file progress.
'''
| 35.409091
| 82
| 0.745828
|
aa5ab7852c67c7f2ba8858fd3c7be2e5d7fb7da8
| 1,016
|
py
|
Python
|
2-do_deploy_web_static.py
|
ralexrivero/AirBnB_clone_v3
|
f6ae9107e8e1ccd53575bb82eb45f07379f480de
|
[
"MIT"
] | null | null | null |
2-do_deploy_web_static.py
|
ralexrivero/AirBnB_clone_v3
|
f6ae9107e8e1ccd53575bb82eb45f07379f480de
|
[
"MIT"
] | null | null | null |
2-do_deploy_web_static.py
|
ralexrivero/AirBnB_clone_v3
|
f6ae9107e8e1ccd53575bb82eb45f07379f480de
|
[
"MIT"
] | 1
|
2021-10-04T19:29:47.000Z
|
2021-10-04T19:29:47.000Z
|
#!/usr/bin/python3
"""
Fabric script based on the file 1-pack_web_static.py that distributes an
archive to the web servers
"""
from fabric.api import put, run, env
from os.path import exists
env.hosts = ['142.44.167.228', '144.217.246.195']
def do_deploy(archive_path):
"""distributes an archive to the web servers"""
if exists(archive_path) is False:
return False
try:
file_n = archive_path.split("/")[-1]
no_ext = file_n.split(".")[0]
path = "/data/web_static/releases/"
put(archive_path, '/tmp/')
run('mkdir -p {}{}/'.format(path, no_ext))
run('tar -xzf /tmp/{} -C {}{}/'.format(file_n, path, no_ext))
run('rm /tmp/{}'.format(file_n))
run('mv {0}{1}/web_static/* {0}{1}/'.format(path, no_ext))
run('rm -rf {}{}/web_static'.format(path, no_ext))
run('rm -rf /data/web_static/current')
run('ln -s {}{}/ /data/web_static/current'.format(path, no_ext))
return True
except:
return False
| 32.774194
| 72
| 0.599409
|
506c96ca206fd52cd77fbe64ffe85e5c6f24a890
| 3,442
|
py
|
Python
|
experiments/ashvin/pushervisual/reference_experiment_test.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/ashvin/pushervisual/reference_experiment_test.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/ashvin/pushervisual/reference_experiment_test.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
import railrl.misc.hyperparameter as hyp
from multiworld.envs.mujoco.cameras import sawyer_pusher_camera_upright_v0
from railrl.launchers.launcher_util import run_experiment
from railrl.torch.grill.launcher import grill_her_td3_full_experiment
from railrl.launchers.arglauncher import run_variants
# from torch import nn
if __name__ == "__main__":
variant = dict(
imsize=84,
init_camera=sawyer_pusher_camera_upright_v0,
env_id='SawyerPushNIPS-v0',
grill_variant=dict(
save_video=True,
save_video_period=100,
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
algo_kwargs=dict(
base_kwargs=dict(
num_epochs=505,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
min_num_steps_before_training=4000,
batch_size=128,
max_path_length=100,
discount=0.99,
num_updates_per_env_step=4,
collection_mode='online-parallel',
parallel_env_params=dict(
num_workers=1,
),
reward_scale=1,
),
her_kwargs=dict(),
td3_kwargs=dict(
tau=1e-2,
),
),
replay_buffer_kwargs=dict(
max_size=int(1e6),
fraction_goals_are_rollout_goals=0.5,
fraction_resampled_goals_are_env_goals=0.5,
),
algorithm='RIG-HER-TD3',
normalize=False,
render=False,
exploration_noise=0.5,
exploration_type='ou',
training_mode='train',
testing_mode='test',
reward_params=dict(
type='latent_distance',
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
vae_wrapped_env_kwargs=dict(
sample_from_true_prior=True,
)
),
train_vae_variant=dict(
vae_path=None,
representation_size=16,
beta=2.5,
num_epochs=10,
dump_skew_debug_plots=False,
decoder_activation='sigmoid',
generate_vae_dataset_kwargs=dict(
test_p=.9,
N=10,
oracle_dataset_using_set_to_goal=True,
use_cached=False,
vae_dataset_specific_kwargs=dict(
),
show=False,
),
vae_kwargs=dict(
input_channels=3,
),
algo_kwargs=dict(
do_scatterplot=False,
use_linear_dynamics=False,
is_auto_encoder=False,
batch_size=64,
lr=1e-3,
),
save_period=100,
),
)
search_space = {
'seedid': range(5),
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(grill_her_td3_full_experiment, variants, run_id=0)
| 31.87037
| 74
| 0.524985
|
94cc81c3084a725e8fb1ef6bfd314473ace63b72
| 13,262
|
py
|
Python
|
speech_recognition/sample_models.py
|
gotamist/nlp
|
49ae117d2f1ef60c3c523696a4af61d816bf0469
|
[
"MIT"
] | null | null | null |
speech_recognition/sample_models.py
|
gotamist/nlp
|
49ae117d2f1ef60c3c523696a4af61d816bf0469
|
[
"MIT"
] | null | null | null |
speech_recognition/sample_models.py
|
gotamist/nlp
|
49ae117d2f1ef60c3c523696a4af61d816bf0469
|
[
"MIT"
] | 1
|
2020-04-27T12:44:42.000Z
|
2020-04-27T12:44:42.000Z
|
from keras import backend as K
from keras.models import Model
from keras.layers import (BatchNormalization, Conv1D, Dense, Input,
TimeDistributed, Activation, Bidirectional, SimpleRNN, GRU, Dropout, MaxPooling1D)
def simple_rnn_model(input_dim, output_dim=29):
""" Build a recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add recurrent layer
simp_rnn = GRU(output_dim, return_sequences=True,
implementation=2, name='rnn')(input_data)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(simp_rnn)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def rnn_model(input_dim, units, activation, output_dim=29):
""" Build a recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add recurrent layer
simp_rnn = GRU(units, activation=activation,
return_sequences=True, implementation=2, name='rnn')(input_data)
# TODO: Add batch normalization
bn_rnn = BatchNormalization()(simp_rnn)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))(bn_rnn)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def cnn_rnn_model(input_dim, filters, kernel_size, conv_stride,
conv_border_mode, units, output_dim=29, drop_fraction = 0.2):
""" Build a recurrent + convolutional network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add convolutional layer
conv_1d = Conv1D(filters, kernel_size,
strides=conv_stride,
padding=conv_border_mode,
activation='relu',
name='conv1d')(input_data)
# Add batch normalization
bn_cnn = BatchNormalization(name='bn_conv_1d')(conv_1d)
bn_cnn = Dropout(drop_fraction)(bn_cnn)
# Add a recurrent layer
simp_rnn = SimpleRNN(units, activation='relu',
return_sequences=True, implementation=2, name='rnn')(bn_cnn)
# TODO: Add batch normalization
bn_rnn = BatchNormalization(name='bn_simp_rnn')(simp_rnn)
bn_rnn = Dropout(drop_fraction)(bn_rnn)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))(bn_rnn)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: cnn_output_length(
x, kernel_size, conv_border_mode, conv_stride)
print(model.summary())
return model
def cnn_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
def deep_rnn_model(input_dim, units, recur_layers, output_dim=29):
""" Build a deep recurrent network for speech
"""
# Do this without dropout to study the effect of dropout later
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# TODO: Add recurrent layers, each with batch normalization
# simp_rnn = GRU(units, activation=activation,
# return_sequences=True, implementation=2, name='rnn')(input_data)
# TODO: Add batch normalization
# bn_rnn = BatchNormalization()(simp_rnn)
# cells = [ GRU(output_dim) for _ in recur_layers ]
# rnn_layer = RNN(cells)(input_data)
recur = input_data
for cellnum in range(recur_layers):
recur = GRU( units, activation='relu', return_sequences=True, implementation=2, name='rnn_'+str(cellnum))(recur)
recur = BatchNormalization()(recur)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))(recur)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def bidirectional_rnn_model(input_dim, units, output_dim=29, drop_fraction=0.0):
""" Build a bidirectional recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# TODO: Add bidirectional recurrent layer
bidir_rnn = Bidirectional( GRU( units, activation='relu',
return_sequences=True, implementation=2,dropout=drop_fraction,
recurrent_dropout=drop_fraction ) )(input_data)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))(bidir_rnn)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def deep_bd_rnn_model(input_dim, units, recur_layers=2, output_dim=29, drop_fraction=0.0):
""" Build a deepbidirectional recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# TODO: Add bidirectional recurrent layer
recur = input_data
for cellnum in range(recur_layers):
recur = Bidirectional( GRU( units, activation='relu', return_sequences=True, dropout=drop_fraction, recurrent_dropout=drop_fraction) )(recur)
recur = BatchNormalization()(recur)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))( recur )
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
#cnn_rnn_model(input_dim, filters, kernel_size, conv_stride, conv_border_mode, units, output_dim=29):
#deep_bd_rnn_model(input_dim, units, recur_layers=2, output_dim=29):
def opt_model(input_dim, filters, kernel_size, conv_stride, conv_border_mode, units, recur_layers=2, output_dim=29):
#only 1 convolutional layer
#pool_size=1,
""" Build a deep network for speech
"""
# pool_strides =1
dilation_rate = 1
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# TODO: Specify the layers in your network
# Add convolutional layer with BN and Dropout
drop_fraction = 0.2
conv1 = Conv1D(filters, kernel_size,
strides=conv_stride,
padding=conv_border_mode,
activation='relu',
dilation_rate=dilation_rate,
name='conv1d_1')(input_data)
conv1 = BatchNormalization()(conv1)
conv1 = Dropout(drop_fraction)(conv1)
#In the maxpoling layer below, I have used kernel_size from the conv layer as the pooling size.
# conv1 = MaxPooling1D(pool_size=kernel_size,strides=pool_strides, padding='same')(conv1) #default padding is 'valid' (strides any less would make the sequence length<222)
# Add a second convolutional layer with BN and Dropout
# conv2 = Conv1D(2*filters, kernel_size,
# strides=conv_stride,
# padding=conv_border_mode,
# activation='relu',
# dilation_rate=dilation_rate,
# name='conv1d_2')(conv1)
# conv2 = BatchNormalization()(conv2)
# conv2 = Dropout(drop_fraction)(conv2)
# conv2 = MaxPooling1D(pool_size=kernel_size,strides=pool_strides, padding='same')(conv2) #default padding is 'valid' (strides any less would make the sequence length<222)
# Add multilayer RNN with dropout
recur = conv1
for cellnum in range(recur_layers):
recur = Bidirectional( GRU( units, activation='relu', return_sequences=True, dropout = drop_fraction, recurrent_dropout = drop_fraction) )(recur) #dropout was droput_U and recurrent_dropout was dropout_W
recur = BatchNormalization()(recur)
# recur = MaxPooling1D( pool_size=kernel_size, strides=pool_strides, padding='same' )( recur ) #default padding is 'valid' (strides any less would make the sequence length<222)
# Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))( recur )
# time_dense = Dropout(drop_fraction)(time_dense)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
# Specify model.output_length
model.output_length = lambda x: cnn_output_length(
x, kernel_size, conv_border_mode, conv_stride)
# model.output_length = lambda x: cnn_pooling_output_length(
# x, kernel_size, conv_border_mode, conv_stride, pool_size=pool_size, num_cnn=2, num_pool=1)
# model.output_length = lambda x: cnn_output_length( cnn_output_length(
# x, kernel_size, conv_border_mode, conv_stride, dilation=dilation_rate), kernel_size, conv_border_mode, conv_stride, dilation=dilation_rate )
print(model.summary())
return model
def final_model(input_dim, filters, kernel_size, conv_stride, conv_border_mode, units, num_cnn=1, recur_layers=2, output_dim=29, drop_fraction = 0.2):
# two convolutional layers
""" Build a deep network for speech
"""
assert num_cnn<3, "Only supports one or two CNN layers"
dilation_rate = 1
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# TODO: Specify the layers in your network
# Add convolutional layer with BN and Dropout
conv = Conv1D(filters, kernel_size,
strides=conv_stride,
padding=conv_border_mode,
activation='relu',
dilation_rate=dilation_rate,
name='conv1d_1')(input_data)
conv = BatchNormalization()(conv)
conv = Dropout(drop_fraction)(conv)
#more convolutional layers as needed
if num_cnn==2:
conv = Conv1D( 2*filters, kernel_size,
strides=conv_stride,
padding=conv_border_mode,
activation='relu',
dilation_rate=dilation_rate,
name='conv1d_2')(conv)
conv = BatchNormalization()(conv)
conv = Dropout(drop_fraction)(conv)
# conv2 = MaxPooling1D(pool_size=kernel_size,strides=pool_strides, padding='same')(conv2) #default padding is 'valid' (strides any less would make the sequence length<222)
# Add multilayer RNN with dropout
recur = conv
for cellnum in range(recur_layers):
recur = Bidirectional( GRU( units, activation='relu', return_sequences=True, dropout = drop_fraction, recurrent_dropout = drop_fraction) )(recur) #dropout was droput_U and recurrent_dropout was dropout_W
recur = BatchNormalization()(recur)
# recur = MaxPooling1D( pool_size=kernel_size, strides=pool_strides, padding='same' )( recur ) #default padding is 'valid' (strides any less would make the sequence length<222)
# Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense(output_dim))( recur )
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
# Specify model.output_length
if num_cnn==1:
model.output_length = lambda x: cnn_output_length(
x, kernel_size, conv_border_mode, conv_stride)
if num_cnn==2:
model.output_length = lambda x: cnn_output_length( cnn_output_length(
x, kernel_size, conv_border_mode, conv_stride, dilation=dilation_rate), kernel_size, conv_border_mode, conv_stride, dilation=dilation_rate )
return model
| 43.198697
| 211
| 0.681044
|
7ab737d01af75fbe5a88a063b89a34f1b9121aaa
| 2,081
|
py
|
Python
|
convert_msmarco_doc_to_t5_format.py
|
justinyeh1995/docTTTTTquery
|
3c5c5bb07cd8e235092efbf9db7064c1a6654801
|
[
"Apache-2.0"
] | 190
|
2019-11-28T19:36:02.000Z
|
2022-03-22T17:21:43.000Z
|
convert_msmarco_doc_to_t5_format.py
|
justinyeh1995/docTTTTTquery
|
3c5c5bb07cd8e235092efbf9db7064c1a6654801
|
[
"Apache-2.0"
] | 45
|
2019-12-06T22:33:24.000Z
|
2022-02-10T04:14:51.000Z
|
convert_msmarco_doc_to_t5_format.py
|
justinyeh1995/docTTTTTquery
|
3c5c5bb07cd8e235092efbf9db7064c1a6654801
|
[
"Apache-2.0"
] | 29
|
2019-12-05T21:50:41.000Z
|
2022-03-16T12:55:20.000Z
|
import argparse
import re
import spacy
from tqdm import tqdm
def load_corpus(path):
print('Loading corpus...')
corpus = {}
for line in tqdm(open(path)):
doc_id, doc_url, doc_title, doc_text = line.split('\t')
doc_text = doc_text.strip()
corpus[doc_id] = (doc_title, doc_text)
return corpus
parser = argparse.ArgumentParser(
description='Create T5-formatted tsv file from MS MARCO Document Ranking '
'dataset.')
parser.add_argument('--corpus_path', required=True, default='', help='')
parser.add_argument('--output_passage_texts_path', required=True, default='',
help='')
parser.add_argument('--output_passage_doc_ids_path', required=True, default='',
help='')
parser.add_argument('--stride', default=5, help='')
parser.add_argument('--max_length', default=10, help='')
args = parser.parse_args()
nlp = spacy.blank("en")
nlp.add_pipe(nlp.create_pipe("sentencizer"))
corpus = load_corpus(path=args.corpus_path)
n_passages = 0
n_no_passages = 0
with open(args.output_passage_texts_path, 'w') as fout_passage_texts, \
open(args.output_passage_doc_ids_path, 'w') as fout_passage_doc_ids:
for doc_id, (doc_title, doc_text) in tqdm(corpus.items(), total=len(corpus)):
doc = nlp(doc_text[:10000])
sentences = [sent.string.strip() for sent in doc.sents]
if not sentences:
n_no_passages += 1
for i in range(0, len(sentences), args.stride):
segment = ' '.join(sentences[i:i + args.max_length])
segment = doc_title + ' ' + segment
# Remove starting #'s as T5 skips those lines by default.
segment = re.sub(r'^#*', '', segment)
fout_passage_doc_ids.write(f'{doc_id}\n')
fout_passage_texts.write(f'{segment}\n')
n_passages += 1
if i + args.max_length >= len(sentences):
break
print(f'Wrote {n_passages} passages from {len(corpus)} docs.')
print(f'There were {n_no_passages} docs without passages.')
print('Done!')
| 33.564516
| 81
| 0.641999
|
6cb446dd71c8b9c55e8a0d648668448e032c1214
| 11,044
|
py
|
Python
|
tensorflow/python/keras/layers/kernelized.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 848
|
2019-12-03T00:16:17.000Z
|
2022-03-31T22:53:17.000Z
|
tensorflow/python/keras/layers/kernelized.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 656
|
2019-12-03T00:48:46.000Z
|
2022-03-31T18:41:54.000Z
|
tensorflow/python/keras/layers/kernelized.py
|
abhaikollara/tensorflow
|
4f96df3659696990cb34d0ad07dc67843c4225a9
|
[
"Apache-2.0"
] | 506
|
2019-12-03T00:46:26.000Z
|
2022-03-30T10:34:56.000Z
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras layers that implement explicit (approximate) kernel feature maps."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import initializers
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
_SUPPORTED_RBF_KERNEL_TYPES = ['gaussian', 'laplacian']
class RandomFourierFeatures(base_layer.Layer):
r"""Layer that maps its inputs using random Fourier features.
This layer implements a feature map \\(\phi: \mathbb{R}^d \rightarrow
\mathbb{R}^D\\) which approximates shift-invariant kernels. A kernel function
K(x, y) defined over \\(\mathbb{R}^d x \mathbb{R}^d\\) is shift-invariant if
K(x, y) = k(x-y) for some function defined over \\(\mathbb{R}^d\\). Many
popular Radial Basis Functions (in short RBF), including gaussian and
laplacian kernels are shift-invariant.
The layer approximates a (shift invariant) kernel K in the following sense:
up to a scaling factor, for all inputs \\(x, y \in \mathbb{R}^d\\)
\\(\phi(x)^T \cdot \phi(y) \approx K(x, y)\\)
The implementation of this layer is based on the following paper:
"Random Features for Large-Scale Kernel Machines" by Ali Rahimi and Ben Recht.
(link: https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf)
The distribution from which the parameters of the random features map (layer)
are sampled, determines which shift-invariant kernel the layer approximates
(see paper for more details). The users can use the distribution of their
choice. Due to their popularity, the layer supports the out-of-the-box
approximation of the following RBF kernels:
- Gaussian: \\(K(x, y) = e^{-\frac{\|x-y\|_2^2}{2 \cdot scale^2}}\\)
- Laplacian: \\(K(x, y) = e^{-\frac{\|x-y\|_1}{scale}}\\)
NOTE: Unlike the map described in the paper and the scikit-learn
implementation, the output of this layer does not apply the sqrt(2/D)
normalization factor.
Usage for ML: Typically, this layer is used to "kernelize" linear models by
applying a non-linear transformation (this layer) to the input features and
then training a linear model on top of the transformed features. Depending on
the loss function of the linear model, the composition of this layer and the
linear model results to models that are equivalent (up to approximation) to
kernel SVMs (for hinge loss), kernel logistic regression (for logistic loss),
kernel linear regression (for squared loss) etc.
Example of building a kernel multinomial logistic regression model with
Gaussian kernel in keras:
```python
random_features_layer = RandomFourierFeatures(
output_dim=500,
kernel_initializer='gaussian',
scale=5.0,
...)
model = tf.keras.models.Sequential()
model.add(random_features_layer)
model.add(tf.keras.layers.Dense(units=num_classes, activation='softmax')
model.compile(
loss=tf.keras.losses.categorical_crossentropy, optimizer=..., metrics=...)
```
To use another kernel, replace the layer creation command with:
```python
random_features_layer = RandomFourierFeatures(
output_dim=500,
kernel_initializer=<my_initializer>,
scale=...,
...)
```
Arguments:
output_dim: Positive integer, the dimension of the layer's output, i.e., the
number of random features used to approximate the kernel.
kernel_initializer: Determines the distribution of the parameters of the
random features map (and therefore the kernel approximated by the layer).
It can be either a string or an instance of TensorFlow's Initializer
class. Currently only 'gaussian' and 'laplacian' are supported as string
initializers (case insensitive). Note that these parameters are not
trainable.
scale: For gaussian and laplacian kernels, this corresponds to a scaling
factor of the corresponding kernel approximated by the layer (see concrete
definitions above). When provided, it should be a positive float. If None,
the implementation chooses a default value (1.0 typically). Both the
approximation error of the kernel and the classification quality are
sensitive to this parameter. If trainable is set to True, this parameter
is learned end-to-end during training and the provided value serves as an
initialization value.
NOTE: When this layer is used to map the initial features and then the
transformed features are fed to a linear model, by making `scale`
trainable, the resulting optimization problem is no longer convex (even
if the loss function used by the linear model is convex).
trainable: Whether the scaling parameter of th layer is trainable. Defaults
to False.
name: name for the RandomFourierFeatures layer.
Raises:
ValueError: if output_dim or stddev are not positive or if the provided
kernel_initializer is not supported.
"""
def __init__(self,
output_dim,
kernel_initializer='gaussian',
scale=None,
trainable=False,
name=None,
**kwargs):
if output_dim <= 0:
raise ValueError(
'`output_dim` should be a positive integer. Given: {}.'.format(
output_dim))
if isinstance(kernel_initializer, six.string_types):
if kernel_initializer.lower() not in _SUPPORTED_RBF_KERNEL_TYPES:
raise ValueError(
'Unsupported kernel type: \'{}\'. Supported kernel types: {}.'
.format(kernel_initializer, _SUPPORTED_RBF_KERNEL_TYPES))
if scale is not None and scale <= 0.0:
raise ValueError('When provided, `scale` should be a positive float. '
'Given: {}.'.format(scale))
super(RandomFourierFeatures, self).__init__(
trainable=trainable, name=name, **kwargs)
self.output_dim = output_dim
self.kernel_initializer = kernel_initializer
self.scale = scale
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
# TODO(sibyl-vie3Poto): Allow higher dimension inputs. Currently the input is expected
# to have shape [batch_size, dimension].
if input_shape.rank != 2:
raise ValueError(
'The rank of the input tensor should be 2. Got {} instead.'.format(
input_shape.ndims))
if input_shape.dims[1].value is None:
raise ValueError(
'The last dimension of the inputs to `RandomFourierFeatures` '
'should be defined. Found `None`.')
self.input_spec = input_spec.InputSpec(
ndim=2, axes={1: input_shape.dims[1].value})
input_dim = input_shape.dims[1].value
kernel_initializer = _get_random_features_initializer(
self.kernel_initializer, shape=(input_dim, self.output_dim))
unscaled_kernel = self.add_weight(
name='unscaled_random_features',
shape=(input_dim, self.output_dim),
dtype=dtypes.float32,
initializer=kernel_initializer,
trainable=False)
self.bias = self.add_weight(
name='random_features_bias',
shape=(self.output_dim,),
dtype=dtypes.float32,
initializer=init_ops.random_uniform_initializer(
minval=0.0, maxval=2 * np.pi, dtype=dtypes.float32),
trainable=False)
if self.scale is None:
self.scale = _get_default_scale(self.kernel_initializer, input_dim)
scale = self.add_weight(
name='random_features_scale',
shape=(1,),
dtype=dtypes.float32,
initializer=init_ops.constant_initializer(self.scale),
trainable=True,
constraint='NonNeg')
self.kernel = (1.0 / scale) * unscaled_kernel
super(RandomFourierFeatures, self).build(input_shape)
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
inputs = gen_math_ops.cast(inputs, dtypes.float32)
outputs = gen_math_ops.mat_mul(inputs, self.kernel)
outputs = nn.bias_add(outputs, self.bias)
return gen_math_ops.cos(outputs)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank(2)
if input_shape.dims[-1].value is None:
raise ValueError(
'The innermost dimension of input shape must be defined. Given: %s' %
input_shape)
return input_shape[:-1].concatenate(self.output_dim)
def get_config(self):
kernel_initializer = self.kernel_initializer
if isinstance(self.kernel_initializer, init_ops.Initializer):
kernel_initializer = initializers.serialize(self.kernel_initializer)
config = {
'output_dim': self.output_dim,
'kernel_initializer': kernel_initializer,
'scale': self.scale,
}
base_config = super(RandomFourierFeatures, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _get_random_features_initializer(initializer, shape):
"""Returns Initializer object for random features."""
def _get_cauchy_samples(loc, scale, shape):
probs = np.random.uniform(low=0., high=1., size=shape)
return loc + scale * np.tan(np.pi * (probs - 0.5))
random_features_initializer = initializer
if isinstance(initializer, six.string_types):
if initializer.lower() == 'gaussian':
random_features_initializer = init_ops.random_normal_initializer(
stddev=1.0)
elif initializer.lower() == 'laplacian':
random_features_initializer = init_ops.constant_initializer(
_get_cauchy_samples(loc=0.0, scale=1.0, shape=shape))
else:
raise ValueError(
'Unsupported kernel type: \'{}\'. Supported kernel types: {}.'.format(
random_features_initializer, _SUPPORTED_RBF_KERNEL_TYPES))
return random_features_initializer
def _get_default_scale(initializer, input_dim):
if (isinstance(initializer, six.string_types) and
initializer.lower() == 'gaussian'):
return np.sqrt(input_dim / 2.0)
return 1.0
| 42.640927
| 90
| 0.70853
|
e7854477c95fb2a26ca8885d8560b5e8a2b23209
| 1,492
|
py
|
Python
|
frameworks/Scala/lift-stateless/setup.py
|
idlewan/FrameworkBenchmarks
|
f187ec69752f369d84ef5a262efaef85c3a6a5ab
|
[
"BSD-3-Clause"
] | 4
|
2015-01-22T02:13:03.000Z
|
2018-06-13T12:02:46.000Z
|
frameworks/Scala/lift-stateless/setup.py
|
ratpack/FrameworkBenchmarks
|
81604309e46e382fe2ffb7970a87d728f20c8be6
|
[
"BSD-3-Clause"
] | null | null | null |
frameworks/Scala/lift-stateless/setup.py
|
ratpack/FrameworkBenchmarks
|
81604309e46e382fe2ffb7970a87d728f20c8be6
|
[
"BSD-3-Clause"
] | null | null | null |
import subprocess
import sys
import setup_util
import os
import time
def start(args, logfile, errfile):
setup_util.replace_text("lift-stateless/src/main/scala/Main.scala", "> \".*:3306", "> \"" + args.database_host + ":3306")
if os.name == 'nt':
subprocess.check_call('"..\\sbt\\sbt.bat" update assembly', shell=True, cwd="lift-stateless", stderr=errfile, stdout=logfile)
subprocess.Popen(".\\run.bat", shell=True, cwd="lift-stateless", stderr=errfile, stdout=logfile)
else:
subprocess.check_call("$FWROOT/sbt/sbt update assembly", shell=True, cwd="lift-stateless", stderr=errfile, stdout=logfile)
subprocess.Popen("./run", shell=True, cwd="lift-stateless", stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
if os.name == 'nt':
subprocess.check_call("wmic process where \"CommandLine LIKE '%lift-stateless-assembly%'\" call terminate", stderr=errfile, stdout=logfile)
else:
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'lift-stateless-assembly' in line and 'java' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 15)
# Takes up so much disk space
if os.name == 'nt':
subprocess.check_call("del /f /s /q target", shell=True, cwd="lift-stateless", stderr=errfile, stdout=logfile)
else:
subprocess.check_call("rm -rf target", shell=True, cwd="lift-stateless", stderr=errfile, stdout=logfile)
return 0
| 41.444444
| 143
| 0.688338
|
eca8d198c4b732c3afc3dce76cd1e0c56f913df2
| 55,195
|
py
|
Python
|
canadapost/canadapost_lib/pickup.py
|
Purplship/purplship-carriers
|
dcd044320b86e9af5fe3ef15c36ebf7828b2851b
|
[
"MIT"
] | 2
|
2021-04-12T22:40:28.000Z
|
2021-04-21T18:28:31.000Z
|
canadapost/canadapost_lib/pickup.py
|
Purplship/purplship-carriers
|
dcd044320b86e9af5fe3ef15c36ebf7828b2851b
|
[
"MIT"
] | 2
|
2021-01-29T07:14:31.000Z
|
2021-02-18T18:29:23.000Z
|
canadapost/canadapost_lib/pickup.py
|
Purplship/purplship-carriers
|
dcd044320b86e9af5fe3ef15c36ebf7828b2851b
|
[
"MIT"
] | 3
|
2020-09-09T17:04:46.000Z
|
2021-03-05T00:32:32.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Wed Jul 14 15:39:45 2021 by generateDS.py version 2.39.2.
# Python 3.8.6 (v3.8.6:db455296be, Sep 23 2020, 13:31:39) [Clang 6.0 (clang-600.0.57)]
#
# Command line options:
# ('--no-namespace-defs', '')
# ('-o', './canadapost_lib/pickup.py')
#
# Command line arguments:
# ./schemas/pickup.xsd
#
# Command line:
# /Users/danielkobina/Workspace/project/purplship-carriers/.venv/purplship-carriers/bin/generateDS --no-namespace-defs -o "./canadapost_lib/pickup.py" ./schemas/pickup.xsd
#
# Current working directory (os.getcwd()):
# canadapost
#
import sys
try:
ModulenotfoundExp_ = ModuleNotFoundError
except NameError:
ModulenotfoundExp_ = ImportError
from six.moves import zip_longest
import os
import re as re_
import base64
import datetime as datetime_
import decimal as decimal_
try:
from lxml import etree as etree_
except ModulenotfoundExp_ :
from xml.etree import ElementTree as etree_
Validate_simpletypes_ = True
SaveElementTreeNode = True
if sys.version_info.major == 2:
BaseStrType_ = basestring
else:
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
try:
if isinstance(infile, os.PathLike):
infile = os.path.join(infile)
except AttributeError:
pass
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
def parsexmlstring_(instring, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
element = etree_.fromstring(instring, parser=parser, **kwargs)
return element
#
# Namespace prefix definition table (and other attributes, too)
#
# The module generatedsnamespaces, if it is importable, must contain
# a dictionary named GeneratedsNamespaceDefs. This Python dictionary
# should map element type names (strings) to XML schema namespace prefix
# definitions. The export method for any class for which there is
# a namespace prefix definition, will export that definition in the
# XML representation of that element. See the export method of
# any generated element type class for an example of the use of this
# table.
# A sample table is:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceDefs = {
# "ElementtypeA": "http://www.xxx.com/namespaceA",
# "ElementtypeB": "http://www.xxx.com/namespaceB",
# }
#
# Additionally, the generatedsnamespaces module can contain a python
# dictionary named GenerateDSNamespaceTypePrefixes that associates element
# types with the namespace prefixes that are to be added to the
# "xsi:type" attribute value. See the _exportAttributes method of
# any generated element type and the generation of "xsi:type" for an
# example of the use of this table.
# An example table:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceTypePrefixes = {
# "ElementtypeC": "aaa:",
# "ElementtypeD": "bbb:",
# }
#
try:
from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_
except ModulenotfoundExp_ :
GenerateDSNamespaceDefs_ = {}
try:
from generatedsnamespaces import GenerateDSNamespaceTypePrefixes as GenerateDSNamespaceTypePrefixes_
except ModulenotfoundExp_ :
GenerateDSNamespaceTypePrefixes_ = {}
#
# You can replace the following class definition by defining an
# importable module named "generatedscollector" containing a class
# named "GdsCollector". See the default class definition below for
# clues about the possible content of that class.
#
try:
from generatedscollector import GdsCollector as GdsCollector_
except ModulenotfoundExp_ :
class GdsCollector_(object):
def __init__(self, messages=None):
if messages is None:
self.messages = []
else:
self.messages = messages
def add_message(self, msg):
self.messages.append(msg)
def get_messages(self):
return self.messages
def clear_messages(self):
self.messages = []
def print_messages(self):
for msg in self.messages:
print("Warning: {}".format(msg))
def write_messages(self, outstream):
for msg in self.messages:
outstream.write("Warning: {}\n".format(msg))
#
# The super-class for enum types
#
try:
from enum import Enum
except ModulenotfoundExp_ :
Enum = object
#
# The root super-class for element type classes
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ModulenotfoundExp_ as exp:
try:
from generatedssupersuper import GeneratedsSuperSuper
except ModulenotfoundExp_ as exp:
class GeneratedsSuperSuper(object):
pass
class GeneratedsSuper(GeneratedsSuperSuper):
__hash__ = object.__hash__
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_parse_string(self, input_data, node=None, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_parse_integer(self, input_data, node=None, input_name=''):
try:
ival = int(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires integer value: %s' % exp)
return ival
def gds_validate_integer(self, input_data, node=None, input_name=''):
try:
value = int(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires integer value')
return value
def gds_format_integer_list(self, input_data, input_name=''):
if len(input_data) > 0 and not isinstance(input_data[0], BaseStrType_):
input_data = [str(s) for s in input_data]
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integer values')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_parse_float(self, input_data, node=None, input_name=''):
try:
fval_ = float(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires float or double value: %s' % exp)
return fval_
def gds_validate_float(self, input_data, node=None, input_name=''):
try:
value = float(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires float value')
return value
def gds_format_float_list(self, input_data, input_name=''):
if len(input_data) > 0 and not isinstance(input_data[0], BaseStrType_):
input_data = [str(s) for s in input_data]
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of float values')
return values
def gds_format_decimal(self, input_data, input_name=''):
return_value = '%s' % input_data
if '.' in return_value:
return_value = return_value.rstrip('0')
if return_value.endswith('.'):
return_value = return_value.rstrip('.')
return return_value
def gds_parse_decimal(self, input_data, node=None, input_name=''):
try:
decimal_value = decimal_.Decimal(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires decimal value')
return decimal_value
def gds_validate_decimal(self, input_data, node=None, input_name=''):
try:
value = decimal_.Decimal(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires decimal value')
return value
def gds_format_decimal_list(self, input_data, input_name=''):
if len(input_data) > 0 and not isinstance(input_data[0], BaseStrType_):
input_data = [str(s) for s in input_data]
return ' '.join([self.gds_format_decimal(item) for item in input_data])
def gds_validate_decimal_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
decimal_.Decimal(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of decimal values')
return values
def gds_format_double(self, input_data, input_name=''):
return '%s' % input_data
def gds_parse_double(self, input_data, node=None, input_name=''):
try:
fval_ = float(input_data)
except (TypeError, ValueError) as exp:
raise_parse_error(node, 'Requires double or float value: %s' % exp)
return fval_
def gds_validate_double(self, input_data, node=None, input_name=''):
try:
value = float(input_data)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires double or float value')
return value
def gds_format_double_list(self, input_data, input_name=''):
if len(input_data) > 0 and not isinstance(input_data[0], BaseStrType_):
input_data = [str(s) for s in input_data]
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(
node, 'Requires sequence of double or float values')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_parse_boolean(self, input_data, node=None, input_name=''):
if input_data in ('true', '1'):
bval = True
elif input_data in ('false', '0'):
bval = False
else:
raise_parse_error(node, 'Requires boolean value')
return bval
def gds_validate_boolean(self, input_data, node=None, input_name=''):
if input_data not in (True, 1, False, 0, ):
raise_parse_error(
node,
'Requires boolean value '
'(one of True, 1, False, 0)')
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
if len(input_data) > 0 and not isinstance(input_data[0], BaseStrType_):
input_data = [str(s) for s in input_data]
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
value = self.gds_parse_boolean(value, node, input_name)
if value not in (True, 1, False, 0, ):
raise_parse_error(
node,
'Requires sequence of boolean values '
'(one of True, 1, False, 0)')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (
time_parts[0], "{}".format(micro_seconds).rjust(6, "0"), )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns.
# The target value must match at least one of the patterns
# in order for the test to succeed.
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
mo = re_.search(patterns2, target)
if mo is not None and len(mo.group(0)) == len(target):
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_check_cardinality_(
self, value, input_name,
min_occurs=0, max_occurs=1, required=None):
if value is None:
length = 0
elif isinstance(value, list):
length = len(value)
else:
length = 1
if required is not None :
if required and length < 1:
self.gds_collector_.add_message(
"Required value {}{} is missing".format(
input_name, self.gds_get_node_lineno_()))
if length < min_occurs:
self.gds_collector_.add_message(
"Number of values for {}{} is below "
"the minimum allowed, "
"expected at least {}, found {}".format(
input_name, self.gds_get_node_lineno_(),
min_occurs, length))
elif length > max_occurs:
self.gds_collector_.add_message(
"Number of values for {}{} is above "
"the maximum allowed, "
"expected at most {}, found {}".format(
input_name, self.gds_get_node_lineno_(),
max_occurs, length))
def gds_validate_builtin_ST_(
self, validator, value, input_name,
min_occurs=None, max_occurs=None, required=None):
if value is not None:
try:
validator(value, input_name=input_name)
except GDSParseError as parse_error:
self.gds_collector_.add_message(str(parse_error))
def gds_validate_defined_ST_(
self, validator, value, input_name,
min_occurs=None, max_occurs=None, required=None):
if value is not None:
try:
validator(value)
except GDSParseError as parse_error:
self.gds_collector_.add_message(str(parse_error))
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
# provide default value in case option --disable-xml is used.
content = ""
content = etree_.tostring(node, encoding="unicode")
return content
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.items()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
if ExternalEncoding:
encoding = ExternalEncoding
else:
encoding = 'utf-8'
return instring.encode(encoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
def excl_select_objs_(obj):
return (obj[0] != 'parent_object_' and
obj[0] != 'gds_collector_')
if type(self) != type(other):
return False
return all(x == y for x, y in zip_longest(
filter(excl_select_objs_, self.__dict__.items()),
filter(excl_select_objs_, other.__dict__.items())))
def __ne__(self, other):
return not self.__eq__(other)
# Django ETL transform hooks.
def gds_djo_etl_transform(self):
pass
def gds_djo_etl_transform_db_obj(self, dbobj):
pass
# SQLAlchemy ETL transform hooks.
def gds_sqa_etl_transform(self):
return 0, None
def gds_sqa_etl_transform_db_obj(self, dbobj):
pass
def gds_get_node_lineno_(self):
if (hasattr(self, "gds_elementtree_node_") and
self.gds_elementtree_node_ is not None):
return ' near line {}'.format(
self.gds_elementtree_node_.sourceline)
else:
return ""
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = ''
# Set this to false in order to deactivate during export, the use of
# name space prefixes captured from the input document.
UseCapturedNS_ = True
CapturedNsmap_ = {}
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
if prefix == 'xml':
namespace = 'http://www.w3.org/XML/1998/namespace'
else:
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
def encode_str_2_3(instr):
return instr
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
if node is not None:
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace,
pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(
outfile, level, namespace, name_=name,
pretty_print=pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name,
base64.b64encode(self.value),
self.name))
def to_etree(self, element, mapping_=None, nsmap_=None):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(
element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self, mapping_=None, nsmap_=None):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0,
optional=0, child_attrs=None, choice=None):
self.name = name
self.data_type = data_type
self.container = container
self.child_attrs = child_attrs
self.choice = choice
self.optional = optional
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs
def get_child_attrs(self): return self.child_attrs
def set_choice(self, choice): self.choice = choice
def get_choice(self): return self.choice
def set_optional(self, optional): self.optional = optional
def get_optional(self): return self.optional
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class boolean(str, Enum):
TRUE='true'
FALSE='false'
class pickup_availability(GeneratedsSuper):
__hash__ = GeneratedsSuper.__hash__
subclass = None
superclass = None
def __init__(self, postal_code=None, on_demand_cutoff=None, on_demand_tour=None, prority_world_cutoff=None, scheduled_pickups_available=None, gds_collector_=None, **kwargs_):
self.gds_collector_ = gds_collector_
self.gds_elementtree_node_ = None
self.original_tagname_ = None
self.parent_object_ = kwargs_.get('parent_object_')
self.ns_prefix_ = None
self.postal_code = postal_code
self.postal_code_nsprefix_ = None
self.on_demand_cutoff = on_demand_cutoff
self.on_demand_cutoff_nsprefix_ = None
self.on_demand_tour = on_demand_tour
self.validate_boolean(self.on_demand_tour)
self.on_demand_tour_nsprefix_ = None
self.prority_world_cutoff = prority_world_cutoff
self.prority_world_cutoff_nsprefix_ = None
self.scheduled_pickups_available = scheduled_pickups_available
self.validate_boolean(self.scheduled_pickups_available)
self.scheduled_pickups_available_nsprefix_ = None
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, pickup_availability)
if subclass is not None:
return subclass(*args_, **kwargs_)
if pickup_availability.subclass:
return pickup_availability.subclass(*args_, **kwargs_)
else:
return pickup_availability(*args_, **kwargs_)
factory = staticmethod(factory)
def get_ns_prefix_(self):
return self.ns_prefix_
def set_ns_prefix_(self, ns_prefix):
self.ns_prefix_ = ns_prefix
def get_postal_code(self):
return self.postal_code
def set_postal_code(self, postal_code):
self.postal_code = postal_code
def get_on_demand_cutoff(self):
return self.on_demand_cutoff
def set_on_demand_cutoff(self, on_demand_cutoff):
self.on_demand_cutoff = on_demand_cutoff
def get_on_demand_tour(self):
return self.on_demand_tour
def set_on_demand_tour(self, on_demand_tour):
self.on_demand_tour = on_demand_tour
def get_prority_world_cutoff(self):
return self.prority_world_cutoff
def set_prority_world_cutoff(self, prority_world_cutoff):
self.prority_world_cutoff = prority_world_cutoff
def get_scheduled_pickups_available(self):
return self.scheduled_pickups_available
def set_scheduled_pickups_available(self, scheduled_pickups_available):
self.scheduled_pickups_available = scheduled_pickups_available
def validate_boolean(self, value):
result = True
# Validate type boolean, a restriction on xsd:string.
if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
if not isinstance(value, str):
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
return False
value = value
enumerations = ['true', 'false']
if value not in enumerations:
lineno = self.gds_get_node_lineno_()
self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on boolean' % {"value" : encode_str_2_3(value), "lineno": lineno} )
result = False
return result
def _hasContent(self):
if (
self.postal_code is not None or
self.on_demand_cutoff is not None or
self.on_demand_tour is not None or
self.prority_world_cutoff is not None or
self.scheduled_pickups_available is not None
):
return True
else:
return False
def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='pickup-availability', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('pickup-availability')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None and name_ == 'pickup-availability':
name_ = self.original_tagname_
if UseCapturedNS_ and self.ns_prefix_:
namespaceprefix_ = self.ns_prefix_ + ':'
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespaceprefix_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self._exportAttributes(outfile, level, already_processed, namespaceprefix_, name_='pickup-availability')
if self._hasContent():
outfile.write('>%s' % (eol_, ))
self._exportChildren(outfile, level + 1, namespaceprefix_, namespacedef_, name_='pickup-availability', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespaceprefix_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def _exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='pickup-availability'):
pass
def _exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='pickup-availability', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.postal_code is not None:
namespaceprefix_ = self.postal_code_nsprefix_ + ':' if (UseCapturedNS_ and self.postal_code_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%spostal-code>%s</%spostal-code>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.postal_code), input_name='postal-code')), namespaceprefix_ , eol_))
if self.on_demand_cutoff is not None:
namespaceprefix_ = self.on_demand_cutoff_nsprefix_ + ':' if (UseCapturedNS_ and self.on_demand_cutoff_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%son-demand-cutoff>%s</%son-demand-cutoff>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.on_demand_cutoff), input_name='on-demand-cutoff')), namespaceprefix_ , eol_))
if self.on_demand_tour is not None:
namespaceprefix_ = self.on_demand_tour_nsprefix_ + ':' if (UseCapturedNS_ and self.on_demand_tour_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%son-demand-tour>%s</%son-demand-tour>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.on_demand_tour), input_name='on-demand-tour')), namespaceprefix_ , eol_))
if self.prority_world_cutoff is not None:
namespaceprefix_ = self.prority_world_cutoff_nsprefix_ + ':' if (UseCapturedNS_ and self.prority_world_cutoff_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sprority-world-cutoff>%s</%sprority-world-cutoff>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.prority_world_cutoff), input_name='prority-world-cutoff')), namespaceprefix_ , eol_))
if self.scheduled_pickups_available is not None:
namespaceprefix_ = self.scheduled_pickups_available_nsprefix_ + ':' if (UseCapturedNS_ and self.scheduled_pickups_available_nsprefix_) else ''
showIndent(outfile, level, pretty_print)
outfile.write('<%sscheduled-pickups-available>%s</%sscheduled-pickups-available>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.scheduled_pickups_available), input_name='scheduled-pickups-available')), namespaceprefix_ , eol_))
def build(self, node, gds_collector_=None):
self.gds_collector_ = gds_collector_
if SaveElementTreeNode:
self.gds_elementtree_node_ = node
already_processed = set()
self.ns_prefix_ = node.prefix
self._buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self._buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
return self
def _buildAttributes(self, node, attrs, already_processed):
pass
def _buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
if nodeName_ == 'postal-code':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'postal_code')
value_ = self.gds_validate_string(value_, node, 'postal_code')
self.postal_code = value_
self.postal_code_nsprefix_ = child_.prefix
elif nodeName_ == 'on-demand-cutoff':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'on_demand_cutoff')
value_ = self.gds_validate_string(value_, node, 'on_demand_cutoff')
self.on_demand_cutoff = value_
self.on_demand_cutoff_nsprefix_ = child_.prefix
elif nodeName_ == 'on-demand-tour':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'on_demand_tour')
value_ = self.gds_validate_string(value_, node, 'on_demand_tour')
self.on_demand_tour = value_
self.on_demand_tour_nsprefix_ = child_.prefix
# validate type boolean
self.validate_boolean(self.on_demand_tour)
elif nodeName_ == 'prority-world-cutoff':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'prority_world_cutoff')
value_ = self.gds_validate_string(value_, node, 'prority_world_cutoff')
self.prority_world_cutoff = value_
self.prority_world_cutoff_nsprefix_ = child_.prefix
elif nodeName_ == 'scheduled-pickups-available':
value_ = child_.text
value_ = self.gds_parse_string(value_, node, 'scheduled_pickups_available')
value_ = self.gds_validate_string(value_, node, 'scheduled_pickups_available')
self.scheduled_pickups_available = value_
self.scheduled_pickups_available_nsprefix_ = child_.prefix
# validate type boolean
self.validate_boolean(self.scheduled_pickups_available)
# end class pickup_availability
GDSClassesMapping = {
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def get_required_ns_prefix_defs(rootNode):
'''Get all name space prefix definitions required in this XML doc.
Return a dictionary of definitions and a char string of definitions.
'''
nsmap = {
prefix: uri
for node in rootNode.iter()
for (prefix, uri) in node.nsmap.items()
if prefix is not None
}
namespacedefs = ' '.join([
'xmlns:{}="{}"'.format(prefix, uri)
for prefix, uri in nsmap.items()
])
return nsmap, namespacedefs
def parse(inFileName, silence=False, print_warnings=True):
global CapturedNsmap_
gds_collector = GdsCollector_()
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'pickup_availability'
rootClass = pickup_availability
rootObj = rootClass.factory()
rootObj.build(rootNode, gds_collector_=gds_collector)
CapturedNsmap_, namespacedefs = get_required_ns_prefix_defs(rootNode)
if not SaveElementTreeNode:
doc = None
rootNode = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_=namespacedefs,
pretty_print=True)
if print_warnings and len(gds_collector.get_messages()) > 0:
separator = ('-' * 50) + '\n'
sys.stderr.write(separator)
sys.stderr.write('----- Warnings -- count: {} -----\n'.format(
len(gds_collector.get_messages()), ))
gds_collector.write_messages(sys.stderr)
sys.stderr.write(separator)
return rootObj
def parseEtree(inFileName, silence=False, print_warnings=True,
mapping=None, nsmap=None):
parser = None
doc = parsexml_(inFileName, parser)
gds_collector = GdsCollector_()
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'pickup_availability'
rootClass = pickup_availability
rootObj = rootClass.factory()
rootObj.build(rootNode, gds_collector_=gds_collector)
# Enable Python to collect the space used by the DOM.
if mapping is None:
mapping = {}
rootElement = rootObj.to_etree(
None, name_=rootTag, mapping_=mapping, nsmap_=nsmap)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not SaveElementTreeNode:
doc = None
rootNode = None
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(str(content))
sys.stdout.write('\n')
if print_warnings and len(gds_collector.get_messages()) > 0:
separator = ('-' * 50) + '\n'
sys.stderr.write(separator)
sys.stderr.write('----- Warnings -- count: {} -----\n'.format(
len(gds_collector.get_messages()), ))
gds_collector.write_messages(sys.stderr)
sys.stderr.write(separator)
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False, print_warnings=True):
'''Parse a string, create the object tree, and export it.
Arguments:
- inString -- A string. This XML fragment should not start
with an XML declaration containing an encoding.
- silence -- A boolean. If False, export the object.
Returns -- The root object in the tree.
'''
parser = None
rootNode= parsexmlstring_(inString, parser)
gds_collector = GdsCollector_()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'pickup_availability'
rootClass = pickup_availability
rootObj = rootClass.factory()
rootObj.build(rootNode, gds_collector_=gds_collector)
if not SaveElementTreeNode:
rootNode = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='')
if print_warnings and len(gds_collector.get_messages()) > 0:
separator = ('-' * 50) + '\n'
sys.stderr.write(separator)
sys.stderr.write('----- Warnings -- count: {} -----\n'.format(
len(gds_collector.get_messages()), ))
gds_collector.write_messages(sys.stderr)
sys.stderr.write(separator)
return rootObj
def parseLiteral(inFileName, silence=False, print_warnings=True):
parser = None
doc = parsexml_(inFileName, parser)
gds_collector = GdsCollector_()
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'pickup_availability'
rootClass = pickup_availability
rootObj = rootClass.factory()
rootObj.build(rootNode, gds_collector_=gds_collector)
# Enable Python to collect the space used by the DOM.
if not SaveElementTreeNode:
doc = None
rootNode = None
if not silence:
sys.stdout.write('#from pickup import *\n\n')
sys.stdout.write('import pickup as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
if print_warnings and len(gds_collector.get_messages()) > 0:
separator = ('-' * 50) + '\n'
sys.stderr.write(separator)
sys.stderr.write('----- Warnings -- count: {} -----\n'.format(
len(gds_collector.get_messages()), ))
gds_collector.write_messages(sys.stderr)
sys.stderr.write(separator)
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
RenameMappings_ = {
}
#
# Mapping of namespaces to types defined in them
# and the file in which each is defined.
# simpleTypes are marked "ST" and complexTypes "CT".
NamespaceToDefMappings_ = {'http://www.canadapost.ca/ws/pickup/availability': [('boolean',
'./schemas/pickup.xsd',
'ST')]}
__all__ = [
"pickup_availability"
]
| 40.435897
| 271
| 0.586339
|
81484a5559221a2dd739b46964cb294104c3b2de
| 5,583
|
py
|
Python
|
ML/ml_new.py
|
rohitgs28/Visualizing-Predicting-stocks
|
23f263faae9c97451edc760b09d1c8e2df6c650a
|
[
"MIT"
] | null | null | null |
ML/ml_new.py
|
rohitgs28/Visualizing-Predicting-stocks
|
23f263faae9c97451edc760b09d1c8e2df6c650a
|
[
"MIT"
] | null | null | null |
ML/ml_new.py
|
rohitgs28/Visualizing-Predicting-stocks
|
23f263faae9c97451edc760b09d1c8e2df6c650a
|
[
"MIT"
] | null | null | null |
#!C:\Users\Mathew\Anaconda3\python.exe
import quandl
import numpy as np
from datetime import datetime
import datetime,time
from sklearn import linear_model
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from math import sqrt
import cgi, cgitb
import json
import sys,os
data=[]
date1=[]
final_date=[]
close = []
opening_prices = []
High_prices = []
Low_prices = []
closing_prices_list=[]
openning_prices_list=[]
High_prices_list=[]
Low_prices_list=[]
final_date_list=[]
date_list3=[]
slised_price=[]
sliced_date=[]
price_test=[]
date_test=[]
price_training=[]
date_training=[]
date2=[]
final_date=[]
prdeicted_values=[]
closingPredictedStockPrice=""
cgitb.enable()
def to_integer(dt_time):
return time.mktime(dt_time.timetuple())
def google_stocks(myTicker):
quandl.ApiConfig.api_key = "M5HWLF9jXDTrXrcWnpEe"
data = quandl.get_table('WIKI/PRICES', qopts = { 'columns': ['ticker', 'date', 'close','open','high','low','Volume'] }, ticker = [myTicker], date = { 'gte': '2017-06-01', 'lte': '2017-12-31' })
for index in data['date']:
intDate = to_integer(index)
date1.append(datetime.datetime.fromtimestamp(intDate).strftime('%d-%b-%Y'))
for index in data['close']:
closing_prices_list.append(float(index))
for index in data['open']:
openning_prices_list.append(float(index))
for index in data['high']:
High_prices_list.append(float(index))
for index in data['low']:
Low_prices_list.append(float(index))
i=0
for row in date1:
final_date_list.append(i)
i=i+1
def training_model(final_date_list,closing_prices_list):
prdeicted_values=[]
price_test=[]
date_test=[]
date_training=[]
price_training=[]
date_training1=[]
price_training1=[]
percentage_count= 0.8 *len(closing_prices_list)
percentage_count =int(round(percentage_count))
i=0
while(i<=percentage_count):
date_training.append(final_date_list[i])
price_training.append(closing_prices_list[i])
i=i+1
i=percentage_count
while(i<=percentage_count+4):
date_test.append(final_date_list[i])
price_test.append(closing_prices_list[i])
i=i+1
date_training=map(int,date_training)
price_training=map(float,price_training)
date_training_list=list(date_training)
price_training_list=list(price_training)
#
#prediction logic begins here
#
date_training_list = np.reshape(date_training_list, (len(date_training_list),1))
price_training_list = np.reshape(price_training_list, (len(list(price_training_list)),1))
linear_mod = linear_model.LinearRegression()
linear_mod.fit(date_training_list,price_training_list)
StockPriceFornextperiod = linear_mod.predict(len(final_date_list)+1)[0][0]
prdeicted_values.append(linear_mod.predict(len(price_training_list)+1)[0][0])
prdeicted_values.append(linear_mod.predict(len(price_training_list)+2)[0][0])
prdeicted_values.append(linear_mod.predict(len(price_training_list)+3)[0][0])
prdeicted_values.append(linear_mod.predict(len(price_training_list)+4)[0][0])
prdeicted_values.append(linear_mod.predict(len(price_training_list)+5)[0][0])
return prdeicted_values,price_test,StockPriceFornextperiod
def evaluate(true_value,predict_value):
# print(true_value)
# print(predict_value)
rms=sqrt(mean_squared_error(true_value,predict_value))
diff=abs(true_value[0] - predict_value[0])
value=diff/true_value[0]
value=100-(value*100)
#print value
#accuracy =accuracy_score(true_label,predict_label)
return rms,value
def predict(dates,price):
price_test=[]
prdeicted_values1=[]
prdeicted_values2=[]
price_test1=[]
price_test2=[]
predicted_price1,price_test,stockprice =training_model(dates,price)
map(float,price_test)
map(float,predicted_price1)
for index in price_test:
price_test1.append(format(index, '.2f'))
for index in predicted_price1:
prdeicted_values1.append(format(index, '.2f'))
price_test2 = map(float,price_test1)
prdeicted_values2 = map(float,prdeicted_values1)
# print(price_test2)
# print(prdeicted_values2)
price_test2=list(price_test2)
prdeicted_values2=list(prdeicted_values2)
rms,accuracy = evaluate(price_test2,prdeicted_values2)
return stockprice,rms,accuracy
try:
inputValues = cgi.FieldStorage()
companyName = inputValues.getvalue('name')
google_stocks(companyName)
actual_price=[]
closingstockpredictprice,closingrms,closingaccuracy=predict(final_date_list,closing_prices_list)
openstockpredictprice,openrms,openaccuracy=predict(final_date_list,openning_prices_list)
lowstockpredictprice,lowrms,lowaccuracy=predict(final_date_list,Low_prices_list)
Highstockpredictprice,highrms,highaccuracy=predict(final_date_list,High_prices_list)
objClosing={"PredictedPrice":closingstockpredictprice,"RMS":closingrms,"Accuracy":closingaccuracy,"Data":closing_prices_list}
objOpening={"PredictedPrice":openstockpredictprice,"RMS":openrms,"Accuracy":openaccuracy,"Data":openning_prices_list}
objLow={"PredictedPrice":lowstockpredictprice,"RMS":lowrms,"Accuracy":lowaccuracy,"Data":Low_prices_list}
objHigh={"PredictedPrice":Highstockpredictprice,"RMS":highrms,"Accuracy":highaccuracy,"Data":High_prices_list}
objResponse={"Close":objClosing,"Open":objOpening,"Low":objLow,"High":objHigh}
# predicted_price1=training_model(final_date_list,closing_prices_list)
response={"dates":date1,"values":objResponse}
print("Content-type:application/json\r\n\r\n")
print(json.dumps({'status':'success', 'response':json.dumps(response)}))
except Exception as e:
print("Content-type:application/json\r\n\r\n")
print(json.dumps({'status':'error', 'except':json.dumps(e)}))
| 33.431138
| 194
| 0.786674
|
dea6c276e0cd4abca2e9c0afa299e5f67a1de930
| 3,036
|
py
|
Python
|
rgw/v2/lib/exceptions.py
|
viduship/ceph-qe-scripts
|
886619fa6600c24cbf989d65868951b9c3decd72
|
[
"MIT"
] | null | null | null |
rgw/v2/lib/exceptions.py
|
viduship/ceph-qe-scripts
|
886619fa6600c24cbf989d65868951b9c3decd72
|
[
"MIT"
] | null | null | null |
rgw/v2/lib/exceptions.py
|
viduship/ceph-qe-scripts
|
886619fa6600c24cbf989d65868951b9c3decd72
|
[
"MIT"
] | null | null | null |
import os
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, "../../../")))
import logging
log = logging.getLogger()
class RGWBaseException(Exception):
# RGW Base Exception Class
def __init__(
self,
message=None,
):
super().__init__(message)
self.message = message
class ConfigError(RGWBaseException):
# exception when config error occurs
def __init__(self, message=None):
super().__init__(message)
self.message = message
class RGWIOGenException(RGWBaseException):
# exception raised when io gen occurs
def __init__(
self,
message=None,
):
super().__init__(message)
self.message = message
class TestExecError(RGWBaseException):
# test execution error
def __init__(
self,
message=None,
):
super().__init__(message)
self.message = message
class S3CMDConfigFileNotFound(RGWBaseException):
# s3cmd file not exists
def __init__(
self,
message=None,
):
super().__init__(message)
self.message = message
class AccessDeniedObjectDeleted(RGWBaseException):
# Access denied object got deleted
def __init__(
self,
message=None,
):
super().__init__(message)
self.message = message
class ObjectVersionCountMismatch(RGWBaseException):
# object count mismatch
def __init__(
self,
message=None,
):
super().__init__(message)
self.message = message
class S3CommandExecError(RGWBaseException):
# s3cmd Command execution error
def __init__(
self,
message=None,
):
super().__init__(message)
self.message = message
class NFSGaneshaBaseException(Exception):
# Base exception for NFS-Ganesha
def __init__(
self,
message=None,
):
super().__init__(message)
self.message = message
class NFSGaneshaMountError(NFSGaneshaBaseException):
# NFS Mount error
def __init__(
self,
message=None,
):
super().__init__(message)
self.message = message
class InvalidCephConfigOption(RGWBaseException):
# Invalid ceph config error
def __init__(
self,
message=None,
):
super().__init__(message)
self.message = message
class MFAVersionError(Exception):
# exception raised when enabling MFA and versioning fails
def __init__(
self,
message=None,
):
super().__init__(message)
self.message = message
class SyncFailedError(Exception):
# exception raised when there is sync error in multisite
def __init__(
self,
message=None,
):
super().__init__(message)
self.message = message
class DefaultDatalogBackingError(Exception):
# get default datalog backing error
def __init__(
self,
message=None,
):
super().__init__(message)
self.message = message
| 20.937931
| 69
| 0.625823
|
12b509bf455292847cd0add816211b06674b9ba1
| 10,418
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20180701/get_network_interface.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20180701/get_network_interface.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20180701/get_network_interface.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetNetworkInterfaceResult',
'AwaitableGetNetworkInterfaceResult',
'get_network_interface',
]
@pulumi.output_type
class GetNetworkInterfaceResult:
"""
A network interface in a resource group.
"""
def __init__(__self__, dns_settings=None, enable_accelerated_networking=None, enable_ip_forwarding=None, etag=None, id=None, ip_configurations=None, location=None, mac_address=None, name=None, network_security_group=None, primary=None, provisioning_state=None, resource_guid=None, tags=None, type=None, virtual_machine=None):
if dns_settings and not isinstance(dns_settings, dict):
raise TypeError("Expected argument 'dns_settings' to be a dict")
pulumi.set(__self__, "dns_settings", dns_settings)
if enable_accelerated_networking and not isinstance(enable_accelerated_networking, bool):
raise TypeError("Expected argument 'enable_accelerated_networking' to be a bool")
pulumi.set(__self__, "enable_accelerated_networking", enable_accelerated_networking)
if enable_ip_forwarding and not isinstance(enable_ip_forwarding, bool):
raise TypeError("Expected argument 'enable_ip_forwarding' to be a bool")
pulumi.set(__self__, "enable_ip_forwarding", enable_ip_forwarding)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ip_configurations and not isinstance(ip_configurations, list):
raise TypeError("Expected argument 'ip_configurations' to be a list")
pulumi.set(__self__, "ip_configurations", ip_configurations)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if mac_address and not isinstance(mac_address, str):
raise TypeError("Expected argument 'mac_address' to be a str")
pulumi.set(__self__, "mac_address", mac_address)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_security_group and not isinstance(network_security_group, dict):
raise TypeError("Expected argument 'network_security_group' to be a dict")
pulumi.set(__self__, "network_security_group", network_security_group)
if primary and not isinstance(primary, bool):
raise TypeError("Expected argument 'primary' to be a bool")
pulumi.set(__self__, "primary", primary)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_machine and not isinstance(virtual_machine, dict):
raise TypeError("Expected argument 'virtual_machine' to be a dict")
pulumi.set(__self__, "virtual_machine", virtual_machine)
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional['outputs.NetworkInterfaceDnsSettingsResponse']:
"""
The DNS settings in network interface.
"""
return pulumi.get(self, "dns_settings")
@property
@pulumi.getter(name="enableAcceleratedNetworking")
def enable_accelerated_networking(self) -> Optional[bool]:
"""
If the network interface is accelerated networking enabled.
"""
return pulumi.get(self, "enable_accelerated_networking")
@property
@pulumi.getter(name="enableIPForwarding")
def enable_ip_forwarding(self) -> Optional[bool]:
"""
Indicates whether IP forwarding is enabled on this network interface.
"""
return pulumi.get(self, "enable_ip_forwarding")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.NetworkInterfaceIPConfigurationResponse']]:
"""
A list of IPConfigurations of the network interface.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="macAddress")
def mac_address(self) -> Optional[str]:
"""
The MAC address of the network interface.
"""
return pulumi.get(self, "mac_address")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkSecurityGroup")
def network_security_group(self) -> Optional['outputs.NetworkSecurityGroupResponse']:
"""
The reference of the NetworkSecurityGroup resource.
"""
return pulumi.get(self, "network_security_group")
@property
@pulumi.getter
def primary(self) -> Optional[bool]:
"""
Gets whether this is a primary network interface on a virtual machine.
"""
return pulumi.get(self, "primary")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[str]:
"""
The provisioning state of the public IP resource. Possible values are: 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> Optional[str]:
"""
The resource GUID property of the network interface resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualMachine")
def virtual_machine(self) -> Optional['outputs.SubResourceResponse']:
"""
The reference of a virtual machine.
"""
return pulumi.get(self, "virtual_machine")
class AwaitableGetNetworkInterfaceResult(GetNetworkInterfaceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkInterfaceResult(
dns_settings=self.dns_settings,
enable_accelerated_networking=self.enable_accelerated_networking,
enable_ip_forwarding=self.enable_ip_forwarding,
etag=self.etag,
id=self.id,
ip_configurations=self.ip_configurations,
location=self.location,
mac_address=self.mac_address,
name=self.name,
network_security_group=self.network_security_group,
primary=self.primary,
provisioning_state=self.provisioning_state,
resource_guid=self.resource_guid,
tags=self.tags,
type=self.type,
virtual_machine=self.virtual_machine)
def get_network_interface(expand: Optional[str] = None,
network_interface_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkInterfaceResult:
"""
A network interface in a resource group.
:param str expand: Expands referenced resources.
:param str network_interface_name: The name of the network interface.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['networkInterfaceName'] = network_interface_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180701:getNetworkInterface', __args__, opts=opts, typ=GetNetworkInterfaceResult).value
return AwaitableGetNetworkInterfaceResult(
dns_settings=__ret__.dns_settings,
enable_accelerated_networking=__ret__.enable_accelerated_networking,
enable_ip_forwarding=__ret__.enable_ip_forwarding,
etag=__ret__.etag,
id=__ret__.id,
ip_configurations=__ret__.ip_configurations,
location=__ret__.location,
mac_address=__ret__.mac_address,
name=__ret__.name,
network_security_group=__ret__.network_security_group,
primary=__ret__.primary,
provisioning_state=__ret__.provisioning_state,
resource_guid=__ret__.resource_guid,
tags=__ret__.tags,
type=__ret__.type,
virtual_machine=__ret__.virtual_machine)
| 39.313208
| 329
| 0.665291
|
bea15ff181cc57f23397e51254e2827c8cb4140c
| 40,091
|
py
|
Python
|
Common/loss_utils.py
|
JiazeWang/SP-GAN
|
455003f78b1160ebe0a2056005b069808c0df35b
|
[
"MIT"
] | 73
|
2021-05-11T12:00:29.000Z
|
2022-03-31T09:40:12.000Z
|
Common/loss_utils.py
|
JiazeWang/SP-GAN
|
455003f78b1160ebe0a2056005b069808c0df35b
|
[
"MIT"
] | 6
|
2021-08-18T13:03:43.000Z
|
2022-03-30T04:48:29.000Z
|
Common/loss_utils.py
|
JiazeWang/SP-GAN
|
455003f78b1160ebe0a2056005b069808c0df35b
|
[
"MIT"
] | 13
|
2021-08-28T20:09:13.000Z
|
2022-03-20T12:42:51.000Z
|
import torch
import numpy as np
import warnings
from scipy.stats import entropy
from sklearn.neighbors import NearestNeighbors
import torch.nn as nn
from numpy.linalg import norm
import sys,os
import torch.nn.functional as F
#from Common.Const import GPU
from torch.autograd import Variable, grad
sys.path.append(os.path.join(os.getcwd(),"metrics"))
from pointops import pointops_util
# Import CUDA version of approximate EMD, from https://github.com/zekunhao1995/pcgan-pytorch/
# from StructuralLosses.match_cost import match_cost
# from StructuralLosses.nn_distance import nn_distance
from torch.autograd import Variable
from Common.modules import pairwise_dist
from torch.distributions import Beta
from CD_EMD.emd_ import emd_module
from CD_EMD.cd.chamferdist import ChamferDistance as CD
import functools
from numpy import ones,zeros
def dist_o2l(p1, p2):
# distance from origin to the line defined by (p1, p2)
p12 = p2 - p1
u12 = p12 / np.linalg.norm(p12)
l_pp = np.dot(-p1, u12)
pp = l_pp*u12 + p1
return np.linalg.norm(pp)
def para_count(models):
count = 0
for model in models:
count += sum(param.numel() for param in model.parameters())
return count
class AverageValueMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0.0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0.0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# # Import CUDA version of CD, borrowed from https://github.com/ThibaultGROUEIX/AtlasNet
# try:
# from . chamfer_distance_ext.dist_chamfer import chamferDist
# CD = chamferDist()
# def distChamferCUDA(x,y):
# return CD(x,y,gpu)
# except:
class CrossEntropyLoss(nn.Module):
def __init__(self, smoothing=True):
super(CrossEntropyLoss, self).__init__()
self.smoothing = smoothing
def forward(self, preds, gts):
gts = gts.contiguous().view(-1)
if self.smoothing:
eps = 0.2
n_class = preds.size(1)
one_hot = torch.zeros_like(preds).scatter(1, gts.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / (n_class - 1)
log_prb = F.log_softmax(preds, dim=1)
loss = -(one_hot * log_prb).sum(dim=1).mean()
else:
loss = F.cross_entropy(preds, gts, reduction='mean')
return loss
class ChamferLoss(nn.Module):
def __init__(self):
super(ChamferLoss, self).__init__()
self.use_cuda = torch.cuda.is_available()
def forward(self,preds,gts):
P = self.batch_pairwise_dist(gts, preds)
mins, _ = torch.min(P, 1)
loss_1 = torch.sum(mins)
mins, _ = torch.min(P, 2)
loss_2 = torch.sum(mins)
return loss_1 + loss_2
def batch_pairwise_dist(self,x,y):
bs, num_points_x, points_dim = x.size()
_, num_points_y, _ = y.size()
#xx = torch.bmm(x, x.transpose(2,1))
xx = torch.sum(x ** 2, dim=2, keepdim=True)
yy = torch.sum(y ** 2, dim=2, keepdim=True)
xy = -2 * torch.bmm(x, y.permute(0, 2, 1))
dist = xy + xx + yy.permute(0, 2, 1) # [B, N, N]
return dist
yy = torch.bmm(y, y.transpose(2,1))
zz = torch.bmm(x, y.transpose(2,1))
if self.use_cuda:
dtype = torch.cuda.LongTensor
else:
dtype = torch.LongTensor
diag_ind_x = torch.arange(0, num_points_x).type(dtype)
diag_ind_y = torch.arange(0, num_points_y).type(dtype)
#brk()
rx = xx[:, diag_ind_x, diag_ind_x].unsqueeze(1).expand_as(zz.transpose(2,1))
ry = yy[:, diag_ind_y, diag_ind_y].unsqueeze(1).expand_as(zz)
P = (rx.transpose(2,1) + ry - 2*zz)
return P
# def batch_pairwise_dist(self,x,y):
#
# bs, num_points_x, points_dim = x.size()
# _, num_points_y, _ = y.size()
#
# xx = torch.sum(x ** 2, dim=2, keepdim=True)
# yy = torch.sum(y ** 2, dim=2, keepdim=True)
# yy = yy.permute(0, 2, 1)
#
# xi = -2 * torch.bmm(x, y.permute(0, 2, 1))
# dist = xi + xx + yy # [B, N, N]
# return dist
def dist_simple(x,y,loss="l2"):
if loss == "l2":
dist = torch.sum((x - y) ** 2, dim=-1).sum(dim=1).float()
else:
dist = torch.sum(torch.abs(x - y), dim=-1).sum(dim=1).float()
return dist.mean()
def distChamferCUDA(x, y):
cd = CD()
cd0, cd1, _, _ = cd(x, y)
return nn_distance(x, y)
def emd_approx(sample, ref):
B, N, N_ref = sample.size(0), sample.size(1), ref.size(1)
# import ipdb
# ipdb.set_trace()
assert N == N_ref, "Not sure what would EMD do in this case"
emd = match_cost(sample, ref) # (B,)
emd_norm = emd / float(N) # (B,)
return emd_norm
def CD_loss(x, y):
dists_forward, dists_backward = nn_distance(x, y)
dists_forward = torch.mean(dists_forward,dim=1)
dists_backward = torch.mean(dists_backward,dim=1)
cd_dist = torch.mean(dists_forward+dists_backward)
return cd_dist
def EMD_loss(sample, ref):
B, N, N_ref = sample.size(0), sample.size(1), ref.size(1)
# import ipdb
# ipdb.set_trace()
assert N == N_ref, "Not sure what would EMD do in this case"
emd = match_cost(sample, ref) # (B,)
emd_norm = emd / float(N) # (B,)
return emd_norm
def compute_mean_covariance(points):
bs, ch, nump = points.size()
# ----------------------------------------------------------------
mu = points.mean(dim=-1, keepdim=True) # Bx3xN -> Bx3x1
# ----------------------------------------------------------------
tmp = points - mu.repeat(1, 1, nump) # Bx3xN - Bx3xN -> Bx3xN
tmp_transpose = tmp.transpose(1, 2) # Bx3xN -> BxNx3
covariance = torch.bmm(tmp, tmp_transpose)
covariance = covariance / nump
return mu, covariance # Bx3x1 Bx3x3
def get_local_pair(pt1, pt2):
pt1_batch, pt1_N, pt1_M = pt1.size()
pt2_batch, pt2_N, pt2_M = pt2.size()
# pt1: Bx3xM pt2: Bx3XN (N > M)
# print('pt1: {} pt2: {}'.format(pt1.size(), pt2.size()))
new_xyz = pt1.transpose(1, 2).contiguous() # Bx3xM -> BxMx3
pt1_trans = pt1.transpose(1, 2).contiguous() # Bx3xM -> BxMx3
pt2_trans = pt2.transpose(1, 2).contiguous() # Bx3xN -> BxNx3
K=20
group = pointops_util.Gen_QueryAndGroupXYZ(radius=None, nsample=K, use_xyz=False)
g_xyz1 = group(pt1_trans, new_xyz) # Bx3xMxK
# print('g_xyz1: {}'.format(g_xyz1.size()))
g_xyz2 = group(pt2_trans, new_xyz) # Bx3xMxK
# print('g_xyz2: {}'.format(g_xyz2.size()))
g_xyz1 = g_xyz1.transpose(1, 2).contiguous().view(-1, 3, K) # Bx3xMxK -> BxMx3xK -> (BM)x3xK
# print('g_xyz1: {}'.format(g_xyz1.size()))
g_xyz2 = g_xyz2.transpose(1, 2).contiguous().view(-1, 3, K) # Bx3xMxK -> BxMx3xK -> (BM)x3xK
# print('g_xyz2: {}'.format(g_xyz2.size()))
# print('====================== FPS ========================')
# print(pt1.shape,g_xyz1.shape)
# print(pt2.shape,g_xyz2.shape)
mu1, var1 = compute_mean_covariance(g_xyz1)
mu2, var2 = compute_mean_covariance(g_xyz2)
# print('mu1: {} var1: {}'.format(mu1.size(), var1.size()))
# print('mu2: {} var2: {}'.format(mu2.size(), var2.size()))
# --------------------------------------------------
# like_mu12 = self.shape_loss_fn(mu1, mu2)
# like_var12 = self.shape_loss_fn(var1, var2)
# ----------------------------------------------------
# =========$$$ CD loss $$$===============
# print("p1,p2:",pt1.shape,pt2.shape)
# print("mu2:",mu1.shape,mu2.shape,pt1_batch,pt1_N,pt1_M)
mu1 = mu1.view(pt1_batch, -1, 3)
mu2 = mu2.view(pt2_batch, -1, 3)
var1 = var1.view(pt1_batch, -1, 9)
var2 = var2.view(pt2_batch, -1, 9)
chamfer_loss = ChamferLoss()
like_mu12 = chamfer_loss(mu1, mu2) / float(pt1_M)
like_var12 = chamfer_loss(var1, var2) / float(pt1_M)
# print('mu: {} var: {}'.format(like_mu12.item(), like_var12.item()))
return like_mu12, like_var12
# Borrow from https://github.com/ThibaultGROUEIX/AtlasNet
def distChamfer(a, b):
x, y = a, b
bs, num_points, points_dim = x.size()
xx = torch.bmm(x, x.transpose(2, 1))
yy = torch.bmm(y, y.transpose(2, 1))
zz = torch.bmm(x, y.transpose(2, 1))
diag_ind = torch.arange(0, num_points).to(a).long()
rx = xx[:, diag_ind, diag_ind].unsqueeze(1).expand_as(xx)
ry = yy[:, diag_ind, diag_ind].unsqueeze(1).expand_as(yy)
P = (rx.transpose(2, 1) + ry - 2 * zz)
return P.min(1)[0], P.min(2)[0]
def EMD_CD(sample_pcs, ref_pcs, batch_size, accelerated_cd=False, reduced=True):
N_sample = sample_pcs.shape[0]
N_ref = ref_pcs.shape[0]
assert N_sample == N_ref, "REF:%d SMP:%d" % (N_ref, N_sample)
cd_lst = []
emd_lst = []
iterator = range(0, N_sample, batch_size)
for b_start in iterator:
b_end = min(N_sample, b_start + batch_size)
sample_batch = sample_pcs[b_start:b_end]
ref_batch = ref_pcs[b_start:b_end]
if accelerated_cd:
dl, dr = distChamferCUDA(sample_batch, ref_batch)
else:
dl, dr = distChamfer(sample_batch, ref_batch)
cd_lst.append(dl.mean(dim=1) + dr.mean(dim=1))
emd_batch = emd_approx(sample_batch, ref_batch)
emd_lst.append(emd_batch)
if reduced:
cd = torch.cat(cd_lst).mean()
emd = torch.cat(emd_lst).mean()
else:
cd = torch.cat(cd_lst)
emd = torch.cat(emd_lst)
results = {
'MMD-CD': cd,
'MMD-EMD': emd,
}
return results
def _pairwise_EMD_CD_(sample_pcs, ref_pcs, batch_size, accelerated_cd=True):
N_sample = sample_pcs.shape[0]
N_ref = ref_pcs.shape[0]
all_cd = []
all_emd = []
iterator = range(N_sample)
for sample_b_start in iterator:
sample_batch = sample_pcs[sample_b_start]
cd_lst = []
emd_lst = []
for ref_b_start in range(0, N_ref, batch_size):
ref_b_end = min(N_ref, ref_b_start + batch_size)
ref_batch = ref_pcs[ref_b_start:ref_b_end]
batch_size_ref = ref_batch.size(0)
sample_batch_exp = sample_batch.view(1, -1, 3).expand(batch_size_ref, -1, -1)
sample_batch_exp = sample_batch_exp.contiguous()
if accelerated_cd:
dl, dr = distChamferCUDA(sample_batch_exp, ref_batch)
else:
dl, dr = distChamfer(sample_batch_exp, ref_batch)
cd_lst.append((dl.mean(dim=1) + dr.mean(dim=1)).view(1, -1))
emd_batch = emd_approx(sample_batch_exp, ref_batch)
emd_lst.append(emd_batch.view(1, -1))
cd_lst = torch.cat(cd_lst, dim=1)
emd_lst = torch.cat(emd_lst, dim=1)
all_cd.append(cd_lst)
all_emd.append(emd_lst)
all_cd = torch.cat(all_cd, dim=0) # N_sample, N_ref
all_emd = torch.cat(all_emd, dim=0) # N_sample, N_ref
return all_cd, all_emd
# Adapted from https://github.com/xuqiantong/GAN-Metrics/blob/master/framework/metric.py
def knn(Mxx, Mxy, Myy, k, sqrt=False):
n0 = Mxx.size(0)
n1 = Myy.size(0)
label = torch.cat((torch.ones(n0), torch.zeros(n1))).to(Mxx)
M = torch.cat((torch.cat((Mxx, Mxy), 1), torch.cat((Mxy.transpose(0, 1), Myy), 1)), 0)
if sqrt:
M = M.abs().sqrt()
INFINITY = float('inf')
val, idx = (M + torch.diag(INFINITY * torch.ones(n0 + n1).to(Mxx))).topk(k, 0, False)
count = torch.zeros(n0 + n1).to(Mxx)
for i in range(0, k):
count = count + label.index_select(0, idx[i])
pred = torch.ge(count, (float(k) / 2) * torch.ones(n0 + n1).to(Mxx)).float()
s = {
'tp': (pred * label).sum(),
'fp': (pred * (1 - label)).sum(),
'fn': ((1 - pred) * label).sum(),
'tn': ((1 - pred) * (1 - label)).sum(),
}
s.update({
'precision': s['tp'] / (s['tp'] + s['fp'] + 1e-10),
'recall': s['tp'] / (s['tp'] + s['fn'] + 1e-10),
'acc_t': s['tp'] / (s['tp'] + s['fn'] + 1e-10),
'acc_f': s['tn'] / (s['tn'] + s['fp'] + 1e-10),
'acc': torch.eq(label, pred).float().mean(),
})
return s
def lgan_mmd_cov(all_dist):
N_sample, N_ref = all_dist.size(0), all_dist.size(1)
min_val_fromsmp, min_idx = torch.min(all_dist, dim=1)
min_val, _ = torch.min(all_dist, dim=0)
mmd = min_val.mean()
mmd_smp = min_val_fromsmp.mean()
cov = float(min_idx.unique().view(-1).size(0)) / float(N_ref)
cov = torch.tensor(cov).to(all_dist)
return {
'lgan_mmd': mmd,
'lgan_cov': cov,
'lgan_mmd_smp': mmd_smp,
}
def compute_all_metrics(sample_pcs, ref_pcs, batch_size, accelerated_cd=False):
results = {}
M_rs_cd, M_rs_emd = _pairwise_EMD_CD_(ref_pcs, sample_pcs, batch_size, accelerated_cd=accelerated_cd)
res_cd = lgan_mmd_cov(M_rs_cd.t())
results.update({
"%s-CD" % k: v for k, v in res_cd.items()
})
res_emd = lgan_mmd_cov(M_rs_emd.t())
results.update({
"%s-EMD" % k: v for k, v in res_emd.items()
})
M_rr_cd, M_rr_emd = _pairwise_EMD_CD_(ref_pcs, ref_pcs, batch_size, accelerated_cd=accelerated_cd)
M_ss_cd, M_ss_emd = _pairwise_EMD_CD_(sample_pcs, sample_pcs, batch_size, accelerated_cd=accelerated_cd)
# 1-NN results
one_nn_cd_res = knn(M_rr_cd, M_rs_cd, M_ss_cd, 1, sqrt=False)
results.update({
"1-NN-CD-%s" % k: v for k, v in one_nn_cd_res.items() if 'acc' in k
})
one_nn_emd_res = knn(M_rr_emd, M_rs_emd, M_ss_emd, 1, sqrt=False)
results.update({
"1-NN-EMD-%s" % k: v for k, v in one_nn_emd_res.items() if 'acc' in k
})
return results
def compute_all_metrics2(sample_pcs, ref_pcs, normalize=False):
from Common.point_operation import normalize_point_cloud
gen_clouds_buf = sample_pcs
ref_clouds_buf = ref_pcs
if normalize:
gen_clouds_buf = gen_clouds_buf.cpu().numpy()
# gen_clouds_inds = set(np.arange(gen_clouds_buf.shape[0]))
# nan_gen_clouds_inds = set(np.isnan(gen_clouds_buf).sum(axis=(1, 2)).nonzero()[0])
# gen_clouds_inds = list(gen_clouds_inds - nan_gen_clouds_inds)
# dup_gen_clouds_inds = np.random.choice(gen_clouds_inds, size=len(nan_gen_clouds_inds))
# gen_clouds_buf[list(nan_gen_clouds_inds)] = gen_clouds_buf[dup_gen_clouds_inds]
gen_clouds_buf = normalize_point_cloud(gen_clouds_buf)
gen_clouds_buf = torch.from_numpy(gen_clouds_buf).cuda()
gg_cds = pairwise_CD(gen_clouds_buf, gen_clouds_buf)
tt_cds = pairwise_CD(ref_clouds_buf, ref_clouds_buf)
gt_cds = pairwise_CD(gen_clouds_buf, ref_clouds_buf)
metrics = {}
jsd = JSD(gen_clouds_buf.cpu().numpy(), ref_clouds_buf.cpu().numpy(),
clouds1_flag='gen', clouds2_flag='ref', warning=False)
cd_covs = COV(gt_cds)
cd_mmds = MMD(gt_cds)
cd_1nns = KNN(gg_cds, gt_cds, tt_cds, 1)
metrics = {
"JSD": jsd,
"COV-CD": cd_covs,
"MMD-CD": cd_mmds,
"1NN-CD": cd_1nns,
}
return metrics
def f_score(predicted_clouds, true_clouds, threshold=0.001):
ld, rd = distChamferCUDA(predicted_clouds, true_clouds)
precision = 100. * (rd < threshold).float().mean(1)
recall = 100. * (ld < threshold).float().mean(1)
return 2. * precision * recall / (precision + recall + 1e-7)
def get_voxel_occ_dist(all_clouds, clouds_flag='gen', res=28, bound=0.5, bs=128, warning=True):
if np.any(np.fabs(all_clouds) > bound) and warning:
print('{} clouds out of cube bounds: [-{}; {}]'.format(clouds_flag, bound, bound))
n_nans = np.isnan(all_clouds).sum()
if n_nans > 0:
print('{} NaN values in point cloud tensors.'.format(n_nans))
p2v_dist = np.zeros((res, res, res), dtype=np.uint64)
step = 1. / res
v_bs = -0.5 + np.arange(res + 1) * step
nbs = all_clouds.shape[0] // bs + 1
for i in range(nbs):
clouds = all_clouds[bs * i:bs * (i + 1)]
preiis = clouds[:, :, 0].reshape(1, -1)
preiis = np.logical_and(v_bs[:28].reshape(-1, 1) <= preiis, preiis < v_bs[1:].reshape(-1, 1))
iis = preiis.argmax(0)
iis_values = preiis.sum(0) > 0
prejjs = clouds[:, :, 1].reshape(1, -1)
prejjs = np.logical_and(v_bs[:28].reshape(-1, 1) <= prejjs, prejjs < v_bs[1:].reshape(-1, 1))
jjs = prejjs.argmax(0)
jjs_values = prejjs.sum(0) > 0
prekks = clouds[:, :, 2].reshape(1, -1)
prekks = np.logical_and(v_bs[:28].reshape(-1, 1) <= prekks, prekks < v_bs[1:].reshape(-1, 1))
kks = prekks.argmax(0)
kks_values = prekks.sum(0) > 0
values = np.uint64(np.logical_and(np.logical_and(iis_values, jjs_values), kks_values))
np.add.at(p2v_dist, (iis, jjs, kks), values)
return np.float64(p2v_dist) / p2v_dist.sum()
def JSD(clouds1, clouds2, clouds1_flag='gen', clouds2_flag='ref', warning=True):
dist1 = get_voxel_occ_dist(clouds1, clouds_flag=clouds1_flag, warning=warning)
dist2 = get_voxel_occ_dist(clouds2, clouds_flag=clouds2_flag, warning=warning)
return entropy((dist1 + dist2).flatten() / 2.0, base=2) - \
0.5 * (entropy(dist1.flatten(), base=2) + entropy(dist2.flatten(), base=2))
def pairwise_CD(clouds1, clouds2, bs=2048):
N1 = clouds1.shape[0]
N2 = clouds2.shape[0]
cds = torch.from_numpy(np.zeros((N1, N2), dtype=np.float32)).cuda()
for i in range(N1):
clouds1_i = clouds1[i]
if bs < N1:
for j_l in range(0, N2, bs):
j_u = min(N2, j_l + bs)
clouds2_js = clouds2[j_l:j_u]
clouds1_is = clouds1_i.unsqueeze(0).expand(j_u - j_l, -1, -1)
clouds1_is = clouds1_is.contiguous()
dl, dr = distChamferCUDA(clouds1_is, clouds2_js)
cds[i, j_l:j_u] = dl.mean(dim=1) + dr.mean(dim=1)
else:
clouds1_is = clouds1_i.unsqueeze(0).expand(N1, -1, -1)
clouds1_is = clouds1_is.contiguous()
dl, dr = distChamferCUDA(clouds1_is, clouds2)
cds[i] = dl.mean(dim=1) + dr.mean(dim=1)
return cds
def COV(dists, axis=1):
return float(dists.min(axis)[1].unique().shape[0]) / float(dists.shape[axis])
def MMD(dists, axis=1):
return float(dists.min((axis + 1) % 2)[0].mean().float())
def KNN(Mxx, Mxy, Myy, k, sqrt=False):
n0 = Mxx.size(0)
n1 = Myy.size(0)
label = torch.cat((-torch.ones(n0), torch.ones(n1))).to(Mxx)
M = torch.cat((torch.cat((Mxx, Mxy), 1), torch.cat((Mxy.transpose(0, 1), Myy), 1)), 0)
if sqrt:
M = M.abs().sqrt()
INFINITY = float('inf')
val, idx = (M + torch.diag(INFINITY * torch.ones(n0 + n1).to(Mxx))).topk(k, 0, False)
count = torch.zeros(n0 + n1).to(Mxx)
for i in range(0, k):
count = count + label.index_select(0, idx[i])
pred = torch.ge(count, 0).float()
pred[torch.eq(pred, 0)] = -1.
return float(torch.eq(label, pred).float().mean())
#######################################################
# JSD : from https://github.com/optas/latent_3d_points
#######################################################
def unit_cube_grid_point_cloud(resolution, clip_sphere=False):
"""Returns the center coordinates of each cell of a 3D grid with resolution^3 cells,
that is placed in the unit-cube.
If clip_sphere it True it drops the "corner" cells that lie outside the unit-sphere.
"""
grid = np.ndarray((resolution, resolution, resolution, 3), np.float32)
spacing = 1.0 / float(resolution - 1)
for i in range(resolution):
for j in range(resolution):
for k in range(resolution):
grid[i, j, k, 0] = i * spacing - 0.5
grid[i, j, k, 1] = j * spacing - 0.5
grid[i, j, k, 2] = k * spacing - 0.5
if clip_sphere:
grid = grid.reshape(-1, 3)
grid = grid[norm(grid, axis=1) <= 0.5]
return grid, spacing
def jsd_between_point_cloud_sets(sample_pcs, ref_pcs, resolution=28):
"""Computes the JSD between two sets of point-clouds, as introduced in the paper
```Learning Representations And Generative Models For 3D Point Clouds```.
Args:
sample_pcs: (np.ndarray S1xR2x3) S1 point-clouds, each of R1 points.
ref_pcs: (np.ndarray S2xR2x3) S2 point-clouds, each of R2 points.
resolution: (int) grid-resolution. Affects granularity of measurements.
"""
in_unit_sphere = True
sample_grid_var = entropy_of_occupancy_grid(sample_pcs, resolution, in_unit_sphere)[1]
ref_grid_var = entropy_of_occupancy_grid(ref_pcs, resolution, in_unit_sphere)[1]
return jensen_shannon_divergence(sample_grid_var, ref_grid_var)
def entropy_of_occupancy_grid(pclouds, grid_resolution, in_sphere=False, verbose=False):
"""Given a collection of point-clouds, estimate the entropy of the random variables
corresponding to occupancy-grid activation patterns.
Inputs:
pclouds: (numpy array) #point-clouds x points per point-cloud x 3
grid_resolution (int) size of occupancy grid that will be used.
"""
epsilon = 10e-4
bound = 0.5 + epsilon
if abs(np.max(pclouds)) > bound or abs(np.min(pclouds)) > bound:
if verbose:
warnings.warn('Point-clouds are not in unit cube.')
if in_sphere and np.max(np.sqrt(np.sum(pclouds ** 2, axis=2))) > bound:
if verbose:
warnings.warn('Point-clouds are not in unit sphere.')
grid_coordinates, _ = unit_cube_grid_point_cloud(grid_resolution, in_sphere)
grid_coordinates = grid_coordinates.reshape(-1, 3)
grid_counters = np.zeros(len(grid_coordinates))
grid_bernoulli_rvars = np.zeros(len(grid_coordinates))
nn = NearestNeighbors(n_neighbors=1).fit(grid_coordinates)
for pc in pclouds:
_, indices = nn.kneighbors(pc)
indices = np.squeeze(indices)
for i in indices:
grid_counters[i] += 1
indices = np.unique(indices)
for i in indices:
grid_bernoulli_rvars[i] += 1
acc_entropy = 0.0
n = float(len(pclouds))
for g in grid_bernoulli_rvars:
if g > 0:
p = float(g) / n
acc_entropy += entropy([p, 1.0 - p])
return acc_entropy / len(grid_counters), grid_counters
def jensen_shannon_divergence(P, Q):
if np.any(P < 0) or np.any(Q < 0):
raise ValueError('Negative values.')
if len(P) != len(Q):
raise ValueError('Non equal size.')
P_ = P / np.sum(P) # Ensure probabilities.
Q_ = Q / np.sum(Q)
e1 = entropy(P_, base=2)
e2 = entropy(Q_, base=2)
e_sum = entropy((P_ + Q_) / 2.0, base=2)
res = e_sum - ((e1 + e2) / 2.0)
res2 = _jsdiv(P_, Q_)
if not np.allclose(res, res2, atol=10e-5, rtol=0):
warnings.warn('Numerical values of two JSD methods don\'t agree.')
return res
def _jsdiv(P, Q):
"""another way of computing JSD"""
def _kldiv(A, B):
a = A.copy()
b = B.copy()
idx = np.logical_and(a > 0, b > 0)
a = a[idx]
b = b[idx]
return np.sum([v for v in a * np.log2(a / b)])
P_ = P / np.sum(P)
Q_ = Q / np.sum(Q)
M = 0.5 * (P_ + Q_)
return 0.5 * (_kldiv(P_, M) + _kldiv(Q_, M))
ZERO = 0.1
ONE = 0.9
def smooth_labels(B,ran=[0.9,1.0]):
#return y - 0.3 + (np.random.random(y.shape) * 0.5)
return (ran[1]-ran[0])*np.random.random(B) + ran[0]
#y = ones((n_samples, 1))
# example of smoothing class=1 to [0.7, 1.2
def smooth_positive_labels(B,ran=[0.9,1.0]):
#return y - 0.3 + (np.random.random(y.shape) * 0.5)
return (ran[1]-ran[0])*np.random.random((B,)) + ran[0]
# example of smoothing class=0 to [0.0, 0.3]
#y = zeros((n_samples, 1))
def smooth_negative_labels(B,ran=[0.0,0.1]):
#return y + np.random.random(y.shape) * 0.3
return (ran[1]-ran[0])*np.random.random((B,)) + ran[0]
# randomly flip some labels
#y = ones((n_samples, 1))
#or y = zeros((n_samples, 1))
def noisy_labels(y, p_flip=0.05):
# determine the number of labels to flip
n_select = int(p_flip * y.shape[0])
# choose labels to flip
flip_ix = np.random.choice([i for i in range(y.shape[0])], size=n_select)
# invert the labels in place
y[flip_ix] = 1 - y[flip_ix]
return y
def gen_loss(d_real, d_fake, gan="wgan", weight=1., d_real_p=None, d_fake_p=None,noise_label=False):
if gan.lower() == "wgan":
wg_loss_orig = - d_fake.mean()
wg_loss = wg_loss_orig * weight
return wg_loss, {
"wgan_gen_loss": wg_loss.clone().detach().item(),
"wgan_gen_loss_orig": wg_loss_orig.clone().detach().item(),
}
elif gan.lower() == "hinge":
g_loss = -d_fake.mean()
d_correct = (d_real >= 0.).float().sum() + (d_fake < 0.).float().sum()
d_acc = d_correct / float(d_real.size(0) + d_fake.size(0))
loss = weight * g_loss
return loss, {
'loss': loss.clone().detach(),
"dis_acc": d_acc.clone().detach(),
"dis_correct": d_correct.clone().detach(),
'g_loss': g_loss.clone().detach()
}
elif gan.lower() == "ls":
#mse = nn.MSELoss()
B = d_fake.size(0)
#real_label_np = np.ones((B,))
fake_label_np = np.ones((B,))
if noise_label:
# occasionally flip the labels when training the generator to fool the D
fake_label_np = noisy_labels(fake_label_np, 0.05)
#real_label = torch.from_numpy(real_label_np.astype(np.float32)).cuda()
fake_label = torch.from_numpy(fake_label_np.astype(np.float32)).cuda()
# real_label = Variable(torch.FloatTensor(d_fake.size(0)).fill_(1).cuda())
# fake_label = Variable(torch.FloatTensor(d_fake.size(0)).fill_(0).cuda())
g_loss = F.mse_loss(d_fake, fake_label)
if d_fake_p is not None:
fake_label_p = Variable(torch.FloatTensor(d_fake_p.size(0), d_fake_p.size(1)).fill_(1).cuda())
g_loss_p = F.mse_loss(d_fake_p,fake_label_p)
g_loss = g_loss + 0.2*g_loss_p
loss = weight * g_loss
return loss, {
'loss': loss.clone().detach(),
'g_loss': g_loss.clone().detach()
}
elif gan.lower() == "gan":
fake_target = torch.tensor([1.0]).cuda()
fake_loss = functools.partial(BCEfakeloss, target=fake_target)
g_loss = fake_loss(d_fake)
if d_fake_p is not None:
g_loss_p = fake_loss(d_fake_p.view(-1))
g_loss = g_loss + g_loss_p
loss = weight * g_loss
return loss, {
'loss': loss.clone().detach(),
'g_loss': g_loss.clone().detach()
}
elif gan.lower() == "real":
# https://github.com/weishenho/SAGAN-with-relativistic/blob/master/main.py
y = Variable(torch.Tensor(d_real.size(0)).fill_(1.0), requires_grad=False)
d_loss = torch.mean((d_real - torch.mean(d_fake) + y) ** 2)
g_loss = torch.mean((d_fake - torch.mean(d_real) - y) ** 2)
# d_loss = torch.mean((d_real - torch.mean(d_fake) - y) ** 2)
# g_loss = torch.mean((d_fake - torch.mean(d_real) + y) ** 2)
loss = (g_loss + d_loss) / 2.0
else:
raise NotImplementedError("Not implement: %s" % gan)
def mix_loss(d_mix, gan="wgan", weight=1.,d_mix_p=None,target_map_p=None):
if gan.lower() == "ls":
fake_label = Variable(torch.FloatTensor(d_mix.size(0)).fill_(0).cuda())
mix_loss = F.mse_loss(d_mix, fake_label)
if d_mix_p is not None:
mix_loss_p = F.mse_loss(d_mix_p, target_map_p)
mix_loss = (mix_loss + mix_loss_p)/2.0
loss = mix_loss
return loss, {
'loss': loss.clone().detach(),
}
elif gan.lower() =="gan":
fake_target = torch.tensor([0.0]).cuda()
mix_loss = F.binary_cross_entropy_with_logits(d_mix, fake_target.expand_as(d_mix),
reduction="none")
if d_mix_p is not None:
consistency_loss = F.mse_loss(d_mix_p, target_map_p)
mix_list = []
for i in range(d_mix_p.size(0)):
# MIXUP LOSS 2D
mix2d_i = F.binary_cross_entropy_with_logits(d_mix_p[i].view(-1), target_map_p[i].view(-1))
mix_list.append(mix2d_i)
D_loss_mixed_2d = torch.stack(mix_list)
mix_loss = D_loss_mixed_2d + mix_loss
mix_loss = mix_loss.mean()
mix_loss = mix_loss + consistency_loss
# -> D_loss_mixed_2d.mean() is taken later
else:
mix_loss = mix_loss.mean()
loss = mix_loss
return loss, {
'loss': loss.clone().detach(),
}
else:
raise NotImplementedError("Not implement: %s" % gan)
def dis_loss(d_real, d_fake, gan="wgan", weight=1.,d_real_p=None, d_fake_p=None, noise_label=False):
# B = d_fake.size(0)
# a = 1.0
# b = 0.9
if gan.lower() == "wgan":
loss_fake = d_fake.mean()
loss_real = d_real.mean()
wg_loss_orig = loss_fake - loss_real
wg_loss = wg_loss_orig * weight
return wg_loss, {
"wgan_dis_loss": wg_loss.clone().detach().item(),
"wgan_dis_loss_orig": wg_loss_orig.clone().detach().item(),
"wgan_dis_loss_real": loss_real.clone().detach().item(),
"wgan_dis_loss_fake": loss_fake.clone().detach().item()
}
elif gan.lower() == "hinge":
d_loss_real = torch.nn.ReLU()(1.0 - d_real).mean()
d_loss_fake = torch.nn.ReLU()(1.0 + d_fake).mean()
# d_loss_real = -torch.min(d_real - 1, d_real * 0).mean()
# d_loss_fake = -torch.min(-d_fake - 1, d_fake * 0).mean()
real_correct = (d_real >= 0.).float().sum() + (d_fake < 0.).float().sum()
real_acc = real_correct / float(d_real.size(0) + d_fake.size(0))
d_loss = d_loss_real + d_loss_fake
loss = d_loss * weight
return loss, {
"loss": loss.clone().detach(),
"d_loss": d_loss.clone().detach(),
"dis_acc": real_acc.clone().detach(),
"dis_correct": real_correct.clone().detach(),
"loss_real": d_loss_real.clone().detach(),
"loss_fake": d_loss_fake.clone().detach(),
}
elif gan.lower() == "ls":
mse = nn.MSELoss()
B = d_fake.size(0)
real_label_np = np.ones((B,))
fake_label_np = np.zeros((B,))
if noise_label:
real_label_np = smooth_labels(B,ran=[0.9,1.0])
#fake_label_np = smooth_labels(B,ran=[0.0,0.1])
# occasionally flip the labels when training the D to
# prevent D from becoming too strong
real_label_np = noisy_labels(real_label_np, 0.05)
#fake_label_np = noisy_labels(fake_label_np, 0.05)
real_label = torch.from_numpy(real_label_np.astype(np.float32)).cuda()
fake_label = torch.from_numpy(fake_label_np.astype(np.float32)).cuda()
# real_label = Variable((1.0 - 0.9) * torch.rand(d_fake.size(0)) + 0.9).cuda()
# fake_label = Variable((0.1 - 0.0) * torch.rand(d_fake.size(0)) + 0.0).cuda()
t = 0.5
real_correct = (d_real >= t).float().sum()
real_acc = real_correct / float(d_real.size(0))
fake_correct = (d_fake < t).float().sum()
fake_acc = fake_correct / float(d_fake.size(0))
# + d_fake.size(0))
# real_label = Variable(torch.FloatTensor(d_fake.size(0)).fill_(1).cuda())
# fake_label = Variable(torch.FloatTensor(d_fake.size(0)).fill_(0).cuda())
g_loss = F.mse_loss(d_fake, fake_label)
d_loss = F.mse_loss(d_real, real_label)
if d_real_p is not None and d_fake_p is not None:
real_label_p = Variable((1.0 - 0.9) * torch.rand(d_fake_p.size(0), d_fake_p.size(1)) + 0.9).cuda()
fake_label_p = Variable((0.1 - 0.0) * torch.rand(d_fake_p.size(0), d_fake_p.size(1)) + 0.0).cuda()
# real_label_p = Variable(torch.FloatTensor(d_real_p.size(0), d_real_p.size(1)).fill_(1).cuda())
# fake_label_p = Variable(torch.FloatTensor(d_real_p.size(0), d_real_p.size(1)).fill_(0).cuda())
g_loss_p = F.mse_loss(d_fake_p, fake_label_p)
d_loss_p = F.mse_loss(d_real_p, real_label_p)
g_loss = (g_loss + 0.1*g_loss_p)
d_loss = (d_loss + 0.1*d_loss_p)
loss = (g_loss+d_loss)/2.0
return loss, {
'loss': loss.clone().detach(),
'g_loss': g_loss.clone().detach(),
'd_loss': g_loss.clone().detach(),
"fake_acc": fake_acc.clone().detach(),
"real_acc": real_acc.clone().detach()
}
elif gan.lower() =="gan":
d_real_target = torch.tensor([1.0]).cuda()
d_fake_target = torch.tensor([0.0]).cuda()
discriminator_loss = functools.partial(BCEloss, d_real_target=d_real_target, d_fake_target=d_fake_target)
g_loss, d_loss = discriminator_loss(d_fake, d_real)
if d_real_p is not None and d_fake_p is not None:
g_loss_p,d_loss_p = discriminator_loss(d_fake_p.view(-1),d_real_p.view(-1))
g_loss = (g_loss + g_loss_p)/2.0
d_loss = (d_loss + d_loss_p)/2.0
loss = (g_loss+d_loss)/2.0
return loss, {
'loss': loss.clone().detach(),
'g_loss': g_loss.clone().detach(),
'd_loss': g_loss.clone().detach()
}
elif gan.lower() == "real":
y = Variable(torch.Tensor(d_real.size(0)).fill_(1.0), requires_grad=False)
d_loss = torch.mean((d_real - torch.mean(d_fake) - y) ** 2)
g_loss = torch.mean((d_fake - torch.mean(d_real) + y) ** 2)
loss = (g_loss+d_loss)/2.0
else:
raise NotImplementedError("Not implement: %s" % gan)
def BCEloss(D_fake, D_real, d_real_target, d_fake_target):
real = F.binary_cross_entropy_with_logits(D_real,d_real_target.expand_as(D_real))
fake = F.binary_cross_entropy_with_logits(D_fake,d_fake_target.expand_as(D_fake))
return real, fake
def BCEfakeloss(D_fake,target):
return F.binary_cross_entropy_with_logits(D_fake, target.expand_as(D_fake))
def dis_acc(d_real, d_fake, loss_type="wgan", **kwargs):
if loss_type.lower() == "wgan":
# No threshold, don't know which one is correct which is not
return {}
elif loss_type.lower() == "hinge":
return {}
else:
raise NotImplementedError("Not implement: %s" % loss_type)
def gradient_penalty(x_real, x_fake, d_real, d_fake,
lambdaGP=10., gp_type='zero_center', eps=1e-8):
if gp_type == "zero_center":
bs = d_real.size(0)
grad = torch.autograd.grad(
outputs=d_real, inputs=x_real,
grad_outputs=torch.ones_like(d_real).to(d_real),
create_graph=True, retain_graph=True)[0]
# [grad] should be either (B, D) or (B, #points, D)
grad = grad.reshape(bs, -1)
grad_norm = gp_orig = torch.sqrt(torch.sum(grad ** 2, dim=1)).mean()
gp = gp_orig ** 2. * lambdaGP
# real_image.requires_grad = True
# grad_real = torch.autograd.grad(outputs=real_predict.sum(), inputs=real_image, create_graph=True)[0]
# grad_penalty_real = (grad_real.view(grad_real.size(0), -1).norm(2, dim=1) ** 2).mean()
# grad_penalty_real = 10 / 2 * grad_penalty_real
# grad_penalty_real.backward()
return gp, {
'gp': gp.clone().detach().cpu(),
'gp_orig': gp_orig.clone().detach().cpu(),
'grad_norm': grad_norm.clone().detach().cpu()
}
else:
raise NotImplemented("Invalid gp type:%s" % gp_type)
#dist, ass = EMD(sample, ref, 0.005, 300)
class CutMix:
def __init__(self):
self.EMD = emd_module.emdModule()
def __call__(self, real_data, fake_data,bs=16):
real_data = real_data.transpose(1,2)
fake_data = fake_data.transpose(1,2)
B = real_data.size(0)
N = real_data.size(1)
lam = np.random.beta(1, 1, size=B)
sample_nums = (lam * 2048).astype(np.int32)
seeds = [16, 32, 64, 128, 256, 512]
#sample_nums = np.random.choice(seeds, size=B).astype(np.int32)
sample_id = np.random.choice(np.arange(N), size=B)
#print(sample_id)
sample_id = torch.from_numpy(sample_id).int().to(fake_data)
alpha = torch.rand(B, 1, 1, requires_grad=True).to(fake_data)
sample_id = torch.randint(2048,size=(bs,)).to(fake_data).long()
#rf_dist = pairwise_dist(real_data,fake_data)
rr_dist = pairwise_dist(real_data,real_data)
map = torch.ones((B,N)).cuda()
map_s = torch.ones((B)).cuda()
for i in range(B):
idx = rr_dist[i,sample_id[i]].topk(k=int(sample_nums[i]), dim=-1)[1]
map[i,idx] = 0
map_s[i] = 1.0 - 1.0*sample_nums[i]/N
if torch.rand(1) > 0.5:
map = 1.0 - map
map_s = 1.0 -map_s
dist, ass = self.EMD(real_data, fake_data, 0.005, 300)
temp = fake_data
ass = ass.long()
for i in range(B):
temp[i] = temp[i][ass[i]]
temp_map = map.view(B, N, 1).repeat(1, 1, 3)
temp = temp_map * real_data + (1.0 - temp_map) * temp
return temp.transpose(1,2), map_s, map
class GradientPenalty:
"""Computes the gradient penalty as defined in "Improved Training of Wasserstein GANs"
(https://arxiv.org/abs/1704.00028)
Args:
batchSize (int): batch-size used in the training. Must be updated w.r.t the current batchsize
lambdaGP (float): coefficient of the gradient penalty as defined in the article
gamma (float): regularization term of the gradient penalty, augment to minimize "ghosts"
"""
def __init__(self, lambdaGP, gamma=1, vertex_num=2500, device=torch.device('cpu')):
self.lambdaGP = lambdaGP
self.gamma = gamma
self.vertex_num = vertex_num
self.EMD = emd_module.emdModule()
self.device = device
def __call__(self, netD, real_data, fake_data,mapping=False):
B = real_data.size(0)
fake_data = fake_data[:B]
alpha = torch.rand(B, 1, 1, requires_grad=True).to(fake_data).expand_as(fake_data)
# randomly mix real and fake data
#interpolates = real_data + alpha * (fake_data - real_data)
interpolates = Variable(alpha * real_data + (1 - alpha) * fake_data, requires_grad=True)
if mapping:
fake_data = fake_data.transpose(1,2)
real_data = real_data.transpose(1,2)
dist, ass = self.EMD(fake_data, real_data, 0.005, 300)
interpolates = real_data
ass = ass.long()
for i in range(B):
interpolates[i] = interpolates[i][ass[i]]
interpolates = alpha*fake_data + (1.0-alpha)*interpolates
interpolates = interpolates.transpose(1,2)
# compute output of D for interpolated input
disc_interpolates = netD(interpolates)
# compute gradients w.r.t the interpolated outputs
gradients = grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).to(fake_data),
create_graph=True, retain_graph=True, only_inputs=True)[0].contiguous().view(B, -1)
gradient_penalty = (((gradients.norm(2, dim=1) - self.gamma) / self.gamma) ** 2).mean() * self.lambdaGP
return gradient_penalty
if __name__ == "__main__":
B, N = 2, 10
x = torch.rand(B, N, 3)
y = torch.rand(B, N, 3)
distChamfer = CD_loss()
min_l, min_r = distChamfer(x.cuda(), y.cuda())
print(min_l.shape)
print(min_r.shape)
l_dist = min_l.mean().cpu().detach().item()
r_dist = min_r.mean().cpu().detach().item()
print(l_dist, r_dist)
| 34.983421
| 113
| 0.59779
|
75b6b824f07d0ea4e76f14f5145f7e2b382b9feb
| 137,399
|
py
|
Python
|
modules/xia2/Modules/Xia2html/smartie.py
|
jorgediazjr/dials-dev20191018
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
[
"BSD-3-Clause"
] | null | null | null |
modules/xia2/Modules/Xia2html/smartie.py
|
jorgediazjr/dials-dev20191018
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
[
"BSD-3-Clause"
] | null | null | null |
modules/xia2/Modules/Xia2html/smartie.py
|
jorgediazjr/dials-dev20191018
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
[
"BSD-3-Clause"
] | 1
|
2020-02-04T15:39:06.000Z
|
2020-02-04T15:39:06.000Z
|
from __future__ import absolute_import, division, print_function
import copy
import linecache
import os
import re
#######################################################################
# Import modules that this module depends on
#######################################################################
import sys
import time
# smartie.py: CCP4 logfile parsing classes and functions
# Copyright (C) 2006-2007 Peter Briggs, Wanjuan Yang, CCLRC
#
# This code is distributed under the terms and conditions of the
# CCP4 licence agreement as `Part 1' (Annex 2) software.
# A copy of the CCP4 licence can be obtained by writing to the
# CCP4 Secretary, Daresbury Laboratory, Warrington WA4 4AD, UK.
#
########################################################################
#
# smartie.py
#
#########################################################################
"""smartie: CCP4 logfile parsing functions
The smartie module provides a set of classes and methods for parsing
logfiles from CCP4i and CCP4 programs. The central class is the 'logfile',
which provides a basic DOM-like description of a logfile and its
contents. Other classes provide descriptions of smaller chunks of logfile
features (programs, tables, keytext data and CCP4i informational messages).
The name 'smartie' reflects the module's origins as the intended driver
for a 'smart logfile browser'.
Some additional documentation material is also available in the file
smartie_overview.html."""
__cvs_id__ = "$Id$"
__version__ = "0.0.15"
#######################################################################
# Class definitions
#######################################################################
# buffer
#
# A class to store sets of lines which can then
# be passed to regular expression matching functions
#
class buffer:
"""Buffer object for parsing log files.
The buffer object holds lines of text which are added
a line at a time via the 'append' method."""
def __init__(self, maxsize=0):
"""Initialise a new buffer object.
If 'maxsize' is greater than zero then it sets the
maximum number of lines that the buffer will hold.
If this number of lines is exceeded by an append
operation, then the 'oldest' line is dropped from
the buffer.
If 'maxsize' is zero or negative then no upper limit
is placed on the number of lines that the buffer will
store."""
self.__contents = []
self.__maxsize = maxsize
def __len__(self):
"""Builtin: return the number of lines stored in the buffer."""
return len(self.__contents)
def append(self, line):
"""Append a line of text to the buffer.
The line will have any trailing 'end of line'
characters removed automatically upon storage."""
if self.__maxsize > 0 and len(self.__contents) > self.__maxsize:
self.__contents = self.__contents[1:]
# Remove trailing newline/end of line characters
self.__contents.append(line.rstrip("\r\n"))
def len(self):
"""Return the number of lines currently stored.
Deprecated: use len(table) instead."""
return self.__len__()
def line(self, n):
"""Return the n'th line of text from the buffer.
The line will be returned without end-of-line characters."""
return self.__contents[n]
def tail(self, n=10):
"""Return the 'tail' of the buffer.
This returns the last n lines of text stored in the
buffer, concatenated into a single string with end lines
terminated by a newline character.
If a number of lines is not specified then it defaults
to the last 10 lines of text."""
nend = self.len()
nstart = nend - n
if nstart < 0:
nstart = 0
return self.contents(nstart, nend)
def getlines(self, n, m):
"""Return lines 'n' through to 'm' as a list.
Return a set of lines starting from line index 'n' up to
but not including line index 'm', as a list."""
return self.__contents[n:m]
def contents(self, n, m):
"""Return lines 'n' through to 'm' as a string.
Return the specified lines from the buffer concatenated
into a single string with line ends terminated by a newline
character."""
str = ""
subset = self.getlines(n, m)
for line in subset:
str = str + line + "\n"
return str.rstrip("\n")
def all(self):
"""Return entire buffer contents as a string.
All lines in the buffer will be concatenated into a single
string with line ends terminated by a newline character."""
str = ""
for line in self.__contents:
str = str + line + "\n"
return str.rstrip("\n")
def clear(self):
"""Clear the buffer of all content.
Delete all lines currently stored in the buffer."""
self.__contents[0 : self.len()] = []
return
#
# tablebuffer
#
# Buffer class specialised for handling tables
# Subclasses buffer
class tablebuffer(buffer):
"""Buffer object specialised for dealing with CCP4 tables.
This class extends the 'buffer' class with additional
data and methods specialised for CCP4 tables."""
def __init__(self, maxsize=0):
"""Initialise a new tablebuffer object."""
self.__hasTABLE = False
self.__hasGRAPHS = False
self.__ndoubledollar = 0
buffer.__init__(self, maxsize)
def append(self, line):
"""Append a line of text to the tablebuffer.
This overrides the append method of the parent class
and performs additional checks on the line being
added, to also identify specific features (such as
'$$' symbols) that are part of a CCP4 table."""
# Check if line has "$TABLE"
hasTABLE = re.compile(r"\$TABLE *:").search(line)
if hasTABLE:
# Check if the buffer already contains a
# partial or complete table
if self.__hasTABLE:
# Dump the existing table
self.clear()
self.__hasTABLE = True
# Check if line has "$(GRAPHS|SCATTER)"
if self.__hasTABLE:
buffer.append(self, line)
hasGRAPHS = re.compile(r"\$(GRAPHS|SCATTER)").search(line)
if hasGRAPHS:
self.__hasGRAPHS = True
# Check if line has "$$"
if self.__hasGRAPHS:
ndoubledollar = line.count("$$")
if ndoubledollar > 0:
self.__ndoubledollar = self.__ndoubledollar + ndoubledollar
# Check if we have a complete table yet
def complete(self):
"""Check if the buffer appears to contain a complete table.
Returns 'True' if the buffer contains the following
features, encountered in this order:
1. '$TABLE' token
2. '$GRAPH' or '$SCATTER' token
3. Four '$$' tokens
In this case it is likely that the buffer contains a
complete CCP4 table.
If any of these elements are missing then the method
returns 'False'."""
if self.__hasTABLE and self.__hasGRAPHS and self.__ndoubledollar == 4:
return True
else:
return False
def clear(self):
"""Clear the tablebuffer of all data.
This overrides the 'clear' method of the parent class
and also resets the flag data that is specific to the
tablebuffer class."""
self.__hasTABLE = False
self.__hasGRAPHS = False
self.__ndoubledollar = 0
buffer.clear(self)
# logfile
#
# Abstract description of a CCP4 logfile
#
class logfile:
"""Object describing a program logfile.
A logfile object is populated and returned by the
parselog() function. This takes a file name as a single
compulsory argument; the optional 'progress' argument
specifies a number of lines at which to report progress
when parsing the file.
A logfile object holds lists of 'programs', 'tables',
'keytext messages' and 'CCP4i information messages',
plus a master list of 'fragments' (which can be any of
the above). There are methods to allow access to each of
these lists.
There is also a list of CCP4 'summaries' that have been
been found in the logfile. These are kept distinct from
the logfile fragments above."""
def __init__(self, filename):
"""Initialise the logfile object."""
# Source file
if os.path.isabs(filename):
self.__filename = os.path.normpath(filename)
else:
# Construct an absolute path
self.__filename = os.path.abspath(os.path.join(os.getcwd(), filename))
# CCP4i header and tail
self.__isccp4i = False
self.__ccp4i_header = []
self.__ccp4i_tail = []
# List of fragments, programs, tables, keytexts
# and ccp4i_info
self.__fragments = []
self.__programs = []
self.__tables = []
self.__keytexts = []
self.__ccp4i_info = []
self.__summaries = []
def __nonzero__(self):
"""Implement the nonzero built-in method.
The logfile will test as True if at least one
fragment is defined - otherwise it will test as
False."""
if self.__fragments:
return True
return False
def append_ccp4i_header(self, line):
"""Append a line of text to the CCP4i header."""
# FIXME should be internally accessible only?
self.__ccp4i_header.append(line)
self.__isccp4i = True
def ccp4i_header(self):
"""Return the CCP4i header content."""
return self.__ccp4i_header
def append_ccp4i_tail(self, line):
"""Append a line of text to the CCP4i tail."""
# FIXME should be internally accessible only?
self.__ccp4i_tail.append(line)
self.__isccp4i = True
def ccp4i_tail(self):
"""Return the CCP4i tail content."""
return self.__ccp4i_tail
def isccp4i(self):
"""Return True if the logfile appears to be from CCP4i."""
return self.__isccp4i
def filename(self):
"""Return the filename of the source logfile."""
return self.__filename
def newfragment(self):
"""Generate a new fragement and add to the logfile.
Returns a new fragment object and calls addfragment to
add it to the list of fragments for this logfile."""
newfragment = fragment()
self.addfragment(newfragment)
return newfragment
def addfragment(self, fragment):
"""Add an existing fragment-like object to the logfile."""
self.__fragments.append(fragment)
def nfragments(self):
"""Return the number of fragments."""
return len(self.__fragments)
def fragment(self, i):
"""Return the i'th fragment in the logfile.
A fragment can be a program, table, CCP4i message or
keytext object.
Note that i counts up from zero."""
return self.__fragments[i]
def addprogram(self):
"""Add a new program object to the logfile."""
# FIXME should be internally accessible only?
newprogram = program()
self.__programs.append(newprogram)
self.addfragment(newprogram)
return newprogram
def nprograms(self):
"""Return the number of program objects."""
return len(self.__programs)
def program(self, i):
"""Return the i'th program object in the logfile.
Note that i counts up starting from zero."""
return self.__programs[i]
def addtable(self, thistable=False, tabletext=""):
"""Add a table object to the list of tables.
If an existing table object is specified with the
thistable argument then this is appended to the
list. Otherwise a new table object is created. In
that case, if tabletext is supplied then this is
used to populate the table object; otherwise the
new table object is empty."""
# FIXME should be internally accessible only?
if thistable:
# Table object supplied
self.__tables.append(thistable)
return thistable
else:
# Make a new table
if tabletext:
new_table = table(tabletext)
else:
new_table = table()
self.__tables.append(new_table)
return new_table
def ntables(self):
"""Return the number of tables in the logfile."""
return len(self.__tables)
def table(self, i):
"""Return the i'th table object in the logfile.
This method is deprecated, use 'logfile.tables()[i]'
instead.
Note that i counts up starting from zero."""
return self.__tables[i]
def findtable(self, title_pattern, index=0):
"""Fetch a table in the logfile by matching the title.
This method is deprecated; use the 'tables' method
instead.
This method looks up a particular table in a list
of table objects (argument 'table_list'), by finding
the first table in the list which matches the supplied
regular expression 'title_pattern'.
If there is more than one matching table then the 'index'
argument specifies which of the list of matching tables
should be returned. If index is out of range (or there are
no matching tables) then return 'None'.
It calls the 'find_table_by_title' function."""
return find_table_by_title(self.__tables, title_pattern, index)
def tables(self, select_title=""):
"""Return a list of tables in the logfile.
If no 'select_title' is specifed then this returns
the list of all the table objects stored in the logfile
object.
If 'select_title' is given then this is compiled as
a regular expression pattern, and the method returns a
list containing only those table objects for which the
title matches the pattern.
In either case if no table objects are found then an
empty list is returned.
This method calls the 'find_tables_by_title' function."""
if select_title == "":
# Return everything
return copy.copy(self.__tables)
return find_tables_by_title(self.__tables, select_title)
def addkeytext(self, thiskeytext=False, name="", junk_text="", message=""):
"""Add a keytext object to the list of keytexts.
If an existing keytext object is supplied with the
thiskeytext argument this is appended to the list.
Otherwise a new keytext object is created and
populated with the contents of the name, junk_text
and message arguments."""
# FIXME should be internally accessible only?
if thiskeytext:
# Table object supplied
self.__keytexts.append(thiskeytext)
return thiskeytext
else:
# Make a new keytext
new_keytext = keytext(name, junk_text, message)
self.__keytexts.append(new_keytext)
return new_keytext
def nkeytexts(self):
"""Return the number of keytexts in the logfile."""
return len(self.__keytexts)
def keytext(self, i):
"""Return the i'th keytext object in the logfile.
Note that i counts up starting from zero."""
return self.__keytexts[i]
def addccp4i_info(self):
"""Add another ccp4i_info object to the logfile.
Creates a new ccp4i_info object and added to the
list of fragments, and to the list of CCP4i information
messages found in the logfile."""
# FIXME should be internally accessible only?
# Make a new ccp4i_info object
newccp4i_info = ccp4i_info()
self.__ccp4i_info.append(newccp4i_info)
self.addfragment(newccp4i_info)
return newccp4i_info
def nccp4i_info(self):
"""Return the number of ccp4i_info messages."""
return len(self.__ccp4i_info)
def ccp4i_info(self, i):
"""Return the i'th ccp4i_info object in the logfile.
Note that i counts up starting from zero."""
return self.__ccp4i_info[i]
def addsummary(self, start_line=-1):
"""Add another summary object to the logfile.
A new summary object is created and returned. The
new object is also added to the list of summaries
for the logfile."""
new_summary = summary(self.__filename, start_line)
self.__summaries.append(new_summary)
return new_summary
def nsummaries(self):
"""Return the number of summaries found in the log."""
return len(self.__summaries)
def summary(self, i):
"""Return the i'th summary object in the logfile.
Note that i counts up starting from zero."""
return self.__summaries[i]
def set_fragment_start(self, line_no):
"""Set the start line of the most recent fragment.
The most recent fragment is the last fragment object
in the list of fragments for this logfile. 'line_no'
is the current line number in the source file.
If the fragment has an 'nlines' attribute then this
is taken to be the offset from the current line back
to the start of the fragment. If nlines is not
present then the fragment is taken to start after the
end of the previous fragment. If there is no previous
fragment then it is assumed to start from the first
line of the file"""
fragment = self.__fragments[-1]
fragment.set_attribute("source_file", self.__filename)
if fragment.has_attribute("nlines"):
# Calculate the start of the fragment from the
# current position
offset = fragment.nlines
fragment.set_startline(line_no - offset)
else:
if self.nfragments() > 1:
# Assume that the fragment starts from here
fragment.set_startline(line_no)
else:
# This is the first fragment
fragment.set_startline(1)
# Now deal with the previous fragment,
# which may not have an end line set
if self.nfragments() > 1:
last_fragment = self.__fragments[-2]
if last_fragment.get_endline() < 0:
last_fragment.set_endline(fragment.get_startline() - 1)
def set_fragment_end(self, line_no):
"""Set the end line of the most recent fragment.
The most recent fragment is the last fragment object
in the list of fragments for this logfile. 'line_no'
is the current line number in the source file.
The supplied line number is always taken as the last
line number of the fragment. This method will also
check the start line number and will attempt to set
it to a reasonable value if it is not set: either the
first line after the end of the previous fragment,
or the start of the file (if there is no previous
fragment)."""
if not self.__fragments:
# We're in a situation where there was no
# first fragment
# Let's make one now
self.newfragment()
fragment = self.__fragments[-1]
if fragment.get_endline() > -1:
# Don't reset the value if it's already set
return
fragment.set_attribute("source_file", self.__filename)
fragment.set_endline(line_no)
# Check if the start is also set
if fragment.get_startline() < 1:
if self.nfragments() > 1:
# Assume that the fragment started from the
# end of the previous fragment
last_fragment = self.__fragments[-2]
fragment.set_startline(last_fragment.get_endline() + 1)
else:
# This is the first fragment
fragment.set_startline(1)
def fragment_to_program(self, i):
"""Convert the i'th fragment to a program.
This method allows a fragment in the logfile to be
recast as a program, and performs all the necessary
book keeping operations such as updating the lists
of fragment and program objects.
On successful conversion the converted program
object is returned. If the fragment is already a
program then no action is taken."""
if self.fragment(i).isprogram():
return self.fragment(i)
prog = copyfragment(self.fragment(i), program())
# Add the converted program fragment to the
# list of programs
# To do this we need to work out where it belongs
if i == 0:
# Fragment was the first in the list
# Add to the start of the program list
self.__programs.insert(0, prog)
else:
# Look for a fragment after this one
# in the list which is also a program
nextprog = None
for j in range(i, self.nfragments()):
if self.fragment(j).isprogram():
nextprog = self.fragment(j)
break
if not nextprog:
# No programs found - append
self.__programs.append(prog)
else:
# Locate this in the list of programs
j = self.__programs.index(nextprog)
self.__programs.insert(j, prog)
# Remove the converted fragment
self.__fragments.remove(self.fragment(i))
return prog
# fragment
#
# Abstract description of a generic logfile fragment
#
class fragment:
"""Object describing a generic fragment of a logfile.
The fragment object is intended to represent any
'top-level' fragment of a logfile, for example a
program logfile or some output from a script that
appears inbetween program logs.
The fragment class is used as the base class for
the program and ccp4i_info classes."""
def __init__(self):
"""Initialise a new fragment object."""
# Initialise a dictionary to store arbitrary
# attributes taken from the program logfile
self.__dict = {}
# List of tables
self.__tables = []
# List of keytexts
self.__keytexts = []
# For fragment retrieval
self.set_source_file("")
self.set_startline(-1)
self.set_endline(-1)
# Flags
self.__nonzero = False
def __nonzero__(self):
"""Implement the __nonzero__ built-in method.
The fragment is considered 'nonzero' once an
attribute, table or keytext has been assigned
to it."""
return self.__nonzero
def __len__(self):
"""Implement the __len__ built-in method.
The length of a fragment is the number of lines
of text that it contains."""
nlines = self.get_endline() - self.get_startline() + 1
if nlines < 0:
nlines = 0
return nlines
def isfragment(self):
"""Return True if this represents a basic fragment."""
return True
def isprogram(self):
"""Return True if this represents a program logfile."""
return False
def isccp4i_info(self):
"""Return True if this is a CCP4i information fragment."""
return False
def __setitem__(self, key, value):
"""Implements the functionality for program[key] = value
Wrapper for the set_attribute method."""
self.set_attribute(key, value)
def __getitem__(self, key):
"""Implements the functionality for value = fragment[key]
Wrapper for the get_attribute method."""
return self.get_attribute(key)
def __getattr__(self, key):
"""Implements the functionality for value = fragment.key
Wrapper for the get_attribute method."""
return self.get_attribute(key)
def get_attribute(self, key):
"""Return the value of a fragment attribute.
The key is a string specifying a particular fragment
attribute. If the attribute has been read from the file
then its value is returned, otherwise a KeyError
exception is raised."""
try:
return self.__dict[key]
except KeyError:
raise AttributeError("Unknown attribute '" + str(key) + "'")
def has_attribute(self, key):
"""Check whether a fragment attribute has been set.
The key is a string specifying a particular fragment
attribute. If the attribute has been set then this
method returns True, otherwise it returns False."""
return key in self.__dict
def attributes(self):
"""Return a list of all the fragment attributes.
The list contains all the attributes that have been
set for the fragment."""
return self.__dict.keys()
def set_attribute(self, key, value):
"""Set the value of a fragment attribute.
The key is a string specifying a particular fragment
attribute which will be assigned the given value.
If the attribute doesn't exist then it will be created,
if it does then the current value will be overwritten
by the new one."""
self.__dict[key] = value
self.__nonzero = True
def set_attributes_from_dictionary(self, dict):
"""Set the values of multiple fragment attributes.
For each key in dictionary 'dict', the value of a
fragment attribute with the same name as the key will
be assigned the same value as that of the key."""
for key in dict:
self.__dict[key] = dict[key]
self.__nonzero = True
def addtable(self, tabletext=""):
"""Add a new table object to the fragment.
Create a new table object and add it to the list of
tables associated with the fragment.
If 'tabletext' is nonblank then the table object will
be automatically populated from the text, if possible.
This method returns the new table object."""
if tabletext:
newtable = table(tabletext)
else:
newtable = table()
self.__tables.append(newtable)
self.__nonzero = True
return newtable
def ntables(self):
"""Return the number of tables found in the fragment."""
return len(self.__tables)
def table(self, i):
"""Return the i'th table object.
This method is deprecated, use 'fragment.tables()[i]'
instead.
fragment.table(i) returns the i'th table object associated
with the fragment object. The methods of the table class
can then be used to drill down into the contents of the
table.
Use the ntables method to get the total number of table
objects associated with the fragment."""
return self.__tables[i]
def findtable(self, title_pattern, index=0):
"""Fetch a table in the fragment by matching the title.
This method is deprecated; use the 'tables' method
instead.
This method looks up a particular table in a list
of table objects (argument 'table_list'), by finding
the first table in the list which matches the supplied
regular expression 'title_pattern'.
If there is more than one matching table then the 'index'
argument specifies which of the list of matching tables
should be returned. If index is out of range (or there are
no matching tables) then return 'None'.
It calls the 'find_table_by_title' function."""
return find_table_by_title(self.__tables, title_pattern, index)
def tables(self, select_title=""):
"""Return a list of tables in the fragment.
If no 'select_title' is specifed then this returns
the list of all the table objects stored in the fragment
object.
If 'select_title' is given then this is compiled as
a regular expression pattern, and the method returns a
list containing only those table objects for which the
title matches the pattern.
In either case if no table objects are found then an
empty list is returned.
This method calls the 'find_tables_by_title' function."""
if select_title == "":
# Return everything
return copy.copy(self.__tables)
return find_tables_by_title(self.__tables, select_title)
def addkeytext(self, name="", junk_text="", message=""):
"""Add a new keytext object to the fragment.
Create a new keytext object and add it to the list of
keytexts associated with the fragment.
The values of the parameters 'name', 'junk_text' and
'message' will be used to initialise the new keytext
object (one or more of these can be blank).
This method returns the new keytext object."""
# FIXME should be internally accessible only?
newkeytext = keytext(name, junk_text, message)
self.__keytexts.append(newkeytext)
self.__nonzero = True
return newkeytext
def nkeytexts(self):
"""Return the number of keytexts found in the logfile fragment.
'Keytexts' are warnings and messages issued by the
ccperror/CCPERR functions within programs; see the
loggraph format documentation for more information, e.g.
http://www.ccp4.ac.uk/dist/html/loggraphformat.html"""
return len(self.__keytexts)
def keytext(self, i):
"""Return the i'th keytext object.
For example: program.keytext(i) returns the i'th keytext
object associated with the program object. The methods
of the keytext class can then be used to drill down into
the contents of the message.
Use the nkeytexts method to get the total number of
keytext objects associated with the program/fragment."""
return self.__keytexts[i]
def set_startline(self, line_no):
"""Set the start line of the fragment in the source document."""
self.set_attribute("startline", line_no)
def get_startline(self):
"""Get the start line of the fragment in the source document."""
return self.get_attribute("startline")
def set_endline(self, line_no):
"""Set the end line of the fragment in the source document."""
self.set_attribute("endline", line_no)
def get_endline(self):
"""Get the end line of the fragment in the source document."""
return self.get_attribute("endline")
def set_source_file(self, source_file):
"""Set the source document for the fragment.
The source document is specified as the name of the file that
the fragment is part of."""
self.set_attribute("source_file", source_file)
def get_source_file(self):
"""Get the source document for the fragment."""
return self.get_attribute("source_file")
def retrieve(self):
"""Retrieve the text associated with the fragment.
This uses the 'retrieve' method within the module."""
# Retrieve the information
filen = self.get_source_file()
start = self.get_startline()
end = self.get_endline()
return retrieve(filen, start, end)
#
# program
#
# Abstract description of the logfile for a single program
#
class program(fragment):
"""Object describing the log for a single program.
program objects are instantiated and populated by parselog
as part of the parsing process. The program object is
intended to describe a fragment of logfile that corresponds
to the run of a particular program, although in practice
other types of logfile features (for example, 'interstitial'
fragments i.e. bits of output inbetween program logs) are
also assigned to program objects in the current version of
smartie.
A program object holds various attributes describing the
logfile fragment in question, as well as a list of tables
and keytext messages. A program object may also hold CCP4i
information messages, however normally it will not hold this
at the same time as actual program data.
The attributes associated with the program object can be
accessed using either of the syntaxes 'program['attribute']'
or 'program.attribute'.
For programs using the standard CCP4 banners, the following
attributes may be defined:
name: the name of the program from the CCP4 banner, or
equivalent.
version: the program version; for CCP4 programs, this is the
version found in the program banner. For programs that don't
explicitly give their own version number this will be the same
as the CCP4 library version.
date: the date string found in the CCP4 banner; it is
typically the last date that the source code file was
committed to CVS. It is not the date that the program was
run on - for that, see the 'rundate' and 'runtime' attributes.
ccp4version: the CCP4 library version as it appears in the
program banner. Typically this includes only the major and
minor version numbers, but not the patch level.
user: the user id that appears in the CCP4 banner at runtime.
runtime: the time of day that the program run started at as
reported in the program banner.
rundate: the date that the program run started at as
reported in the program banner.
termination_name: the program name as reported in the
CCP4 termination message at the tail of the program log.
termination_message: the message text displayed in the
CCP4 termination message.
usertime: the value of the 'user time' given at
termination.
systemtime: the value of the 'system time' given at
termination.
elapsedtime: the value of the 'elapsed time' given at
termination.
Note that not all these attributes may be defined, for
example if the program fragment is an incomplete CCP4 log
file or if the program is not a CCP4 program. Use the
'attributes' method to get a list of the defined
attributes.
In addition the program object also stores a list of the
keyword input lines; this list can be retrieved directly
using the 'keywords' method."""
def __init__(self):
"""Initialise a new program object."""
# Initialise the base class
fragment.__init__(self)
# Initialise program-specific flags and
# attributes
self.__isccp4 = False
self.__termination = False
# List of keyword lines
self.__keywords = []
# Dictionary of logical name/filename pairs
self.__logicalnames = {}
def isprogram(self):
"""Return True if this represents a program logfile.
Overrides the 'isprogram' method in the base class."""
return True
def isfragment(self):
"""Return True if this represents a raw logfile fragment.
Overrides the 'isfragment' method in the base class."""
return False
def set_isccp4(self, isccp4):
"""Set whether the logfile fragment is from a CCP4 program or not.
This method sets the value of the isccp4 flag to True
if the logfile fragment is determined to be from a CCP4
program, and False if not. Use the 'isccp4' method to
return the value of this flag."""
# Possibly this should be internally accessible only?
self.__isccp4 = isccp4
def isccp4(self):
"""Check if the logfile fragment is from a CCP4 program.
This returns True if the fragment of logfile appeared to
be from a CCP4 program, and False otherwise."""
return self.__isccp4
def set_termination(self, termination):
"""Set whether the logfile has a termination message.
This sets the value of the 'termination' flag to be
True if a termination message was found, and False if
not. Use the 'termination' method to return the value
of this flag."""
# FIXME should be internally accessible only?
self.__termination = termination
def termination(self):
"""Check if the logfile fragment ends with a valid termination.
This returns True if the fragment appeared to finish with a
recognised termination message, False otherwise.
Program fragments that do not end with a termination
message may have terminated prematurely due to an error."""
return self.__termination
def addkeyword(self, line):
"""Append a keyword input line to the program logfile.
This appends a keyword input line (with any leading text
removed) to the list of keyword lines stored in the
program object."""
self.__keywords.append(line)
def keywords(self):
"""Return the list of keyword lines.
This method returns a list of the keyword input lines
that have been stored for the program object. The lines
are otherwise unprocessed. The lines are stored in the
order that they were originally stored, and so should
reflect the order that they appear in the logfile."""
return self.__keywords
def addlogicalname(self, logical_name, filename):
"""Add a logical name/filename reference.
This adds a logical name and the associated filename to
the dictionary of files that were reported as being opened
in the logfile.
If the same logical name is added multiple times then only
the last associated filename is kept."""
self.__logicalnames[logical_name] = filename
def logicalnames(self):
"""Return a list of logical names associated with the program.
The name of the file associated with a logical name can
be retrieved using the 'logicalnamefile' method."""
return self.__logicalnames.keys()
def logicalnamefile(self, logical_name):
"""Return the filename associated with a logical name.
Given a logical name, return the associated filename.
If the logical name isn't found then a KeyError
exception is raised."""
try:
return self.__logicalnames[logical_name]
except KeyError:
raise KeyError("Logical name '" + str(logical_name) + "' not found")
#
# table
#
# Abstract description of a CCP4 formatted logfile table
#
class table:
"""Object describing a CCP4 logfile table
The table class represents the various components of a table
as output in CCP4 program logfiles. These tables are formatted
in a standard way that enables the data that they contain to
be displayed by the (x|j)loggraph programs.
For a description of the loggraph format see the loggraph
format documentation, e.g.
http://www.ccp4.ac.uk/dist/html/loggraphformat.html
A table consists of a number of columns of data, and a
number of graphs which are defined as being a subset of these
columns. Within smartie the table_column class represents an
individual column, and the table_graph class represents an
individual graph.
A table object can be populated when it is created, by
supplying it with text containing a CCP4-formatted table
(typically, a fragment of logfile text). Alternatively an
'empty' table can be instantiated and then populated using
the methods of the objects.
The contents of the table can be output in the correct
CCP4 format using the 'show' and 'jloggraph' methods."""
# Initialise the table object
def __init__(self, tabletext=""):
"""Create a new table object.
If tabletext contains the text of an existing
CCP4-formatted table then the table object will
attempt to parse the table and populate itself using
the supplied data.
If 'tabletext' cannot be interpreted as a table
then the table object will be 'empty' and will contain
no data. In this case, if 'tabletext' consists of a
single line with no trailing newline then the table
object title will be set to 'tabletext'
automatically."""
# Table attributes
self.__title = ""
self.__type = "GRAPHS" # Default to GRAPHS
self.__graphs = ""
self.__columns = ""
self.__text = ""
self.__data = ""
# Derived data
self.__graph_list = []
self.__column_list = []
# Indicate the the object has been populated
self.__table_parse_error = False
self.__nonzero = False
# The "raw" table data from the log file
self.__rawtable = ""
# Attempt to populate the table
if tabletext:
self.__rawtable = tabletext
if not self.__buildtable(tabletext):
# Failed to extract table
# If it could be a title then use this
# instead
if str(tabletext).count("\n") == 0:
self.settitle(tabletext)
def __nonzero__(self):
"""Builtin: provides the True/False test.
A table object is nonzero if data has been loaded
into it either at instantiation or subsequently
using any of its methods."""
return self.__nonzero
def __str__(self):
"""Builtin: return the table title"""
if self.__nonzero:
return self.__title
return "<Unpopulated table>"
def __buildtable(self, tabletext):
"""Internal: populates the table object from an existing
formatted table.
'tabletext' should be a block of text containing a CCP4
formatted table. This text can also contain extra leading
or trailing text which is not part of the table, and this
will be ignored.
__buildtable extracts the various components of data from
the supplied table text and populates the table object
appropriately."""
# Set up the table object by parsing the
# the supplied text
tabledata = patternmatch().isccp4table(tabletext)
if not tabledata:
# No match
# The regular expression failed to process the table
self.__table_parse_error = True
return False
# Populate the table object
self.settitle(tabledata["title"])
self.settype(tabledata["type"])
self.setgraphs(tabledata["graphs"])
self.setcolumns(tabledata["columns"])
self.settext(tabledata["text"])
self.setdata(tabledata["data"])
self.__nonzero = True
return True
def __populate_columns(self):
"""Internal: populates the table_column objects.
This method processes the raw data in the body of the
loggraph text and extracts white-space delimited data
items, which are then assigned to individual columns.
Where possible data are stored using an appropriate
type, either integer, float or string."""
# Parse the raw data and populate the table_column
# objects for this table
i = 0
for item in self.__data.split():
self.table_column(i).append(item)
i += 1
if i == self.ncolumns():
i = 0
# If there are enough items then i should be
# zero at the end
if i != 0:
# This error could be due to two data items
# no longer being separated by whitespace
print("Unable to parse table - too many data items (or not enough)?")
print('Table title: "' + str(self.title()) + '"')
print("Number of columns : " + str(self.ncolumns()))
print("Number of data items: " + str(len(self.__data.split())))
self.__table_parse_error = True
def parse_error(self):
"""Check if the supplied table was parsed correctly.
If there was a problem parsing the raw table text (for
example if the table was incomplete or misformatted) then
parse_error() will return True, otherwise it will be
False."""
# Check the table_parse_error flag
return self.__table_parse_error
def setrawtable(self, rawtable):
"""Store the 'raw' table text from the original logfile.
The raw table data is the original text (for example, the
fragment of log file text) supplied to the object to
populate itself from."""
# Store the "raw" table data
self.__rawtable = rawtable
def rawtable(self):
"""Return the 'raw' table text taken from the logfile.
This returns any original 'raw' text of the table that
was used to populate the table object.
If the table object wasn't populated from a text fragment
then this will return an empty string. The 'loggraph' and
'jloggraph' methods are recommended over the 'rawtable'
method as a way to return the table data formatted with
loggraph tags."""
return self.__rawtable
def settitle(self, title):
"""Store the table title.
The table title is an arbitrary string of text that is
intended to describe briefly the nature of the data
presented in the table."""
self.__title = title
self.__nonzero = True
def title(self):
"""Return the table title stored in the object."""
return self.__title
def settype(self, graphtype):
"""Store the table graph type.
This is currently one of two possible loggraph keywords,
either GRAPHS or SCATTER. The keyword is an indication
to plotting software of how the data should be displayed:
GRAPHS: line graphs, with data points joined by lines
SCATTER: scatter plots, with data plotted as points.
Raises a ValueError if the graphtype is not recognised."""
if str(graphtype).find("GRAPH") > -1:
self.__type = "GRAPHS"
elif str(graphtype).find("SCATTER") > -1:
self.__type = "SCATTER"
else:
# Unknown type of graph - raise an exception
raise ValueError(
"Unknown graph type: "
+ graphtype
+ "\n"
+ "Must be one of 'GRAPHS' or 'SCATTER'"
)
self.__nonzero = True
def type(self):
"""Return the table graph type.
See the 'settype' method for the possible values and their
associated meanings for the table graph type."""
return self.__type
def nrows(self):
"""Return the number of complete rows in the table.
Returns the length of the shortest column of data stored
in the table."""
if self.ncolumns() == 0:
return 0
nrows = self.table_column(0).nrows()
for i in range(1, self.ncolumns()):
nrows = min(self.table_column(i).nrows(), nrows)
return nrows
def setgraphs(self, graphs):
"""Store the graph definitions in the table in 'raw' format.
Within a CCP4-formatted table, one or more graphs are
be defined using simple strings. Generally the descriptions
take the form:
:graph1 name:graphtype:column_list:
:graph2 name:graphtype:column_list: ...
(The graph definitions can be separated using whitespace,
not necessarily newlines).
The 'setgraphs' method takes an arbitrary number of graph
definition strings of the above form and extracts from each
the data, namely: the graph name (i.e. title), the type
(normally either GRAPH or SCATTER) and a list of column
numbers in the table."""
self.__graphs = graphs
# Create table_graph objects
rgraph = re.compile(r":([^:]+):([^:]+):([^:]+):")
for graph in rgraph.findall(graphs):
new_graph = self.addgraph(graph[0])
new_graph.setscaling(graph[1])
new_graph.setcolumns(graph[2])
self.__nonzero = True
def graphs(self):
"""Return the graph titles and descriptions.
This method returns the 'raw' string containing the
graph definitions for the table, which were originally
supplied via the 'setgraphs' method. Of itself this
data is probably not very useful."""
return self.__graphs
def setcolumns(self, columns):
"""Create new columns in the table from the 'raw' data.
Within a CCP4-formatted table, titles of columns are
supplied as a string of white-space delimited 'tokens'
(the tokens are the titles). For example:
Resln_Range 1/resol^2 Nref Nz1 Nz2 Nz3 ...
This string is supplied to the setcolumns method as the
'columns' argument. setcolumns then extracts the
individual column titles and for each makes a new
(empty) table_column object.
If table values have previously been stored in the
table object (via the 'setdata' method) then setcolumns
will also attempt to populate the columns from this
data."""
# Store the column titles ("raw" format)
# This is a list of white-space separated strings
self.__columns = columns
# Create table_column objects
for col in columns.split():
self.addcolumn(col)
# Attempt to populate the column objects
if self.__data:
self.__populate_columns()
self.__nonzero = True
def columns(self):
"""Return the original column titles text string.
This method returns the 'raw' string that was supplied
via the 'setcolumns' method, i.e. a single string with
whitespace delimited column titles. Of itself this
data is probably not very useful."""
return self.__columns
def settext(self, text):
"""Store the arbitrary text from the table.
Within a CCP4-formatted table there is space for a
'arbitrary text' (see the table format documentation
for more details on this). This text is not used when
plotting graphs but is included when the data in the
table is written back out in CCP4 $TABLE format."""
self.__text = text
self.__nonzero = True
def text(self):
"""Return the arbitrary text from the table.
This returns any arbitrary text associated with the
table header that was previously stored using the
'settext' method."""
return self.__text
def setdata(self, data):
"""Store the raw tabulated data for the table.
The body of a CCP4-formatted table contains tabulated
data items corresponding to columns associated with the
column titles. The table body consists of a sequence
of white-space separated data items (typically numbers
but could be other data).
The table body data is supplied to this method as a
single text string via the 'data' argument, and is
stored as is in the table object.
If table columns have also previously been defined
then this method will further attempt to populate the
columns with the data from the table body."""
# Store the data from the table ("raw" format)
# This is a list of whitespace separated data items
self.__data = data
# Attempt to populate the column objects
if self.ncolumns() > 0:
self.__populate_columns()
self.__nonzero = True
def data(self):
"""Return the 'raw' data from the table body.
This method returns the 'raw' table body text as
supplied originally via the 'setdata' method. Of
itself this data is probably not very useful."""
return self.__data
def ncolumns(self):
"""Return the number of columns in the table.
This method returns the number of table_column
objects associated with the table."""
return len(self.__column_list)
def addcolumn(self, title=""):
"""Add a new column to the table.
This method adds a new 'table_column' object to
the table, to represent a column of data.
Optionally the name of the column can be supplied
via the 'title' argument. The table_column is
otherwise unpopulated.
The new table_column is returned by this method."""
new_column = table_column()
self.__column_list.append(new_column)
if title:
new_column.settitle(title)
return new_column
def list_columns(self):
"""Return a list of the column names defined in the graph."""
columns = []
for icol in range(0, self.ncolumns()):
columns.append(self.table_column(icol).title())
return columns
def add_data(self, rowdata):
"""Add a row of values to the table.
'rowdata' is a dictionary which specifies column
names as keys defining the values to be appended. For
example, if the table has columns called 'X', 'Y' and
'Z', then rowdata might be defined as:
rowdata = { 'X': 0.0, 'Y': 0.776, 'Z': 878 }
'Null' values (i.e. '*' character) will be added to
columns not named to keep the table data well-formed.
For example:
rowdata = { 'X': 0.0, 'Z': 878 }
will assign the expected data to the 'X' and 'Z'columns,
while assigning the '*' character to the 'Y' column.
"""
if not rowdata.keys():
# No columns were specified
return
for colnam in rowdata.keys():
# Check the the column is actually defined in
# in the table
try:
self.list_columns().index(colnam)
except ValueError:
# The column name wasn't found
raise ValueError(
"Column " + str(colnam) + " is not defined in the table"
)
for icol in range(0, self.ncolumns()):
# Look up whether the column has an
# explicit value assigned
colnam = self.table_column(icol).title()
if colnam in rowdata:
self.table_column(icol).append(rowdata[colnam])
else:
# Assign a null value
self.table_column(icol).append("*")
def definegraph(self, title, columns, scaling=""):
"""Add a new graph definition to the table.
This provides an interface to adding new graph
definitions to an existing table.
title: title for the graph.
columns: a list of column names. The first column
will the be the X-axis, others will be
the Y-values.
scaling: (optional) the scaling definition.
Possible scaling strings are:
'A': fully automatic (axes limits are automatically
determined when the graph is rendered (this is
the default)
'N': 'nought', axes limits start at zero
'XMIN|XMAXxYMIN|YMAX': limits xmin,xmax and ymin,ymax.
Raises a ValueError if insufficient number of columns
are specified, or if a specified column name doesn't
appear in the table.
"""
# Check that there are at least two columns
if len(columns) < 2:
raise ValueError("Graph definition needs at least two columns")
# Build the graph description i.e. list of comma-separated
# column numbers (this is the loggraph format)
graph_desc = ""
for colnam in columns:
found = False
for icol in range(0, self.ncolumns()):
if self.table_column(icol).title() == colnam:
graph_desc = graph_desc + "," + str(icol + 1)
found = True
break
if not found:
# The specified column wasn't located
# Raise an exception
raise ValueError("Column " + str(colnam) + " not found in table")
# Built the list - strip any leading commas
graph_desc = graph_desc.strip(",")
# Add a 'blank' table_graph
new_graph = self.addgraph(title)
# Scaling type
new_graph.setscaling("A")
if scaling != "":
new_graph.setscaling(scaling)
new_graph.setcolumns(graph_desc)
return new_graph
def table_column(self, i):
"""Return the i'th column associated with the table.
This returns the i'th table_column object in the
table. Note that i counts from zero.
Generally applications that want to examine the data
stored in a table are better off using the 'col'
method of the table class rather than the
'table_column' method. The 'col' method allows data
to be retrieved by column name, and returns the
column of data as a Python list."""
return self.__column_list[i]
def col(self, name):
"""Return the data in the column identified by 'name'.
This method returns the data in the table column
identified by 'name' (for example, 'Rfree'), as a
list of values. (This is a copy of the list of values
in the table_column object representing the column.)
If the named column isn't found then a LookupError
exception is raised."""
# Identify the column corresponding to the
# supplied name and return a copy of the data
for i in range(0, self.ncolumns()):
if self.table_column(i).title() == name:
return copy.copy(self.table_column(i).data())
# Column not found
raise LookupError("Column called '" + str(name) + "' not found")
def ngraphs(self):
"""Return the number of graphs defined in the table.
This method returns the number of 'table_graph'
objects associated with the table."""
return len(self.__graph_list)
def addgraph(self, title=""):
"""Add a new graph object to the table.
This method adds a new 'table_graph' object to the
table. Optionally the name of the new graph can be
supplied using the 'title' argument. All other graph
attributes are unset by default."""
new_graph = table_graph()
self.__graph_list.append(new_graph)
new_graph.set_parent_table(self)
if title:
new_graph.settitle(title)
return new_graph
def table_graph(self, i):
"""Return the i'th graph object.
This method returns the i'th table_graph object
associated with the table. (Note that i starts from
zero.)"""
return self.__graph_list[i]
def jloggraph(self, codebase="", width=400, height=300):
"""Return a jloggraph-formatted table.
This method returns the text for CCP4-formatted table
from this object which includes the HTML tags required
for the jloggraph Java applet.
The codebase argument should specify the full path for
the JLogGraph.class and JLogCanvas.class files required
to run the applet (typically this is $CCP4/bin/)."""
# Wraps the show method
jloggraph = (
'<applet width="'
+ str(width)
+ '" height="'
+ str(height)
+ '" code="JLogGraph.class"\n'
+ 'codebase="'
+ str(codebase)
+ '"><param name="table" value="\n'
)
jloggraph = jloggraph + self.show(loggraph=True)
jloggraph = jloggraph + '"><b>For inline graphs use a Java browser</b></applet>'
return jloggraph
def loggraph(self, pad_columns=True):
"""Return a loggraph-formatted table.
The loggraph method generates the text of the table based
on the data stored in the object, with the correct
tags defining the columns and graphs and which should
be viewable in (x)loggraph.
For information on the 'pad_columns' option, see the 'show'
method (the setting here is passed directly to 'show').
To generate jloggraph-formatted tables use the
jloggraph method."""
return self.show(loggraph=True, html=True, pad_columns=pad_columns)
def show(self, loggraph=False, html=False, pad_columns=True):
"""Return the text of a CCP4-formatted table.
The show method generates the text of the table based
on the data stored in the object. If the 'loggraph'
argument is specified as 'True' then the table includes
the correct tags defining the columns and graphs and
which should be viewable in (x)loggraph. If the 'html'
argument is specified then special HTML characters in
the titles are escaped.
If 'pad_columns' is True then columns will be padded
with spaces in order to make them line up nicely. If
padding is not required then set it to False."""
tabletext = ""
# Preamble for loggraph
if loggraph:
table_title = self.title()
if html:
table_title = escape_xml_characters(table_title)
tabletext = (
tabletext + "$TABLE: " + table_title + ":\n$" + self.type() + "\n"
)
# Graph descriptions
for i in range(0, self.ngraphs()):
graph = self.table_graph(i)
graph_title = graph.title()
if html:
graph_title = escape_xml_characters(graph_title)
tabletext = tabletext + " :" + graph_title + ":" + graph.scaling() + ":"
for col in graph.columns():
tabletext = tabletext + str(col + 1) + ","
tabletext = tabletext.rstrip(",")
tabletext = tabletext + ":\n"
tabletext = tabletext + "$$\n"
# Columns and rows
ncolumns = self.ncolumns()
if ncolumns > 0:
nrows = len(self.table_column(0))
else:
nrows = 0
# Determine field widths for printing
field_width = []
if pad_columns:
for i in range(0, ncolumns):
max_width = len(self.table_column(i).title())
for item in self.table_column(i).data():
if len(str(item)) > max_width:
max_width = len(str(item))
if max_width >= len(self.table_column(i).title()):
# Put in an extra space again
max_width = max_width + 1
field_width.append(max_width)
else:
for i in range(0, ncolumns):
field_width.append(0)
# Column titles
for i in range(0, ncolumns):
title = self.table_column(i).title()
while len(title) < field_width[i]:
title = " " + title
tabletext = tabletext + " " + title
# Arbitrary text in loggraph format
if loggraph:
tabletext = tabletext + " $$"
if self.text():
tabletext = tabletext + self.text()
tabletext = tabletext + " $$\n"
else:
tabletext = tabletext + "\n\n"
# The columns of data
for i in range(0, nrows):
for j in range(0, ncolumns):
item = self.table_column(j)[i]
while len(str(item)) < field_width[j]:
item = " " + str(item)
tabletext = tabletext + " " + str(item)
tabletext = tabletext + "\n"
# End of table
if loggraph:
tabletext = tabletext + "$$"
return tabletext
def html(self, border=2):
"""Return the text of a table with HTML formatting.
This method returns the body of the table (column
titles and column data) marked up as a HTML table.
The width of the table can be controlled by setting
the 'border' argument.
Any HTML special characters (<, > and &) in the
column titles or data items are automatically
converted to the correct form for HTML."""
tabletext = '<table border="' + str(border) + '">\n'
# Columns and rows
ncolumns = self.ncolumns()
if ncolumns > 0:
nrows = len(self.table_column(0))
else:
nrows = 0
# Column titles
tabletext = tabletext + "<tr>\n"
for i in range(0, ncolumns):
title = self.table_column(i).title()
tabletext = (
tabletext + " <th>" + str(escape_xml_characters(title)) + "</th>\n"
)
tabletext = tabletext + "</tr>\n"
# The columns of data
for i in range(0, nrows):
tabletext = tabletext + "<tr>\n"
for j in range(0, ncolumns):
item = self.table_column(j)[i]
tabletext = (
tabletext + " <td>" + str(escape_xml_characters(item)) + "</td>\n"
)
tabletext = tabletext + "</tr>\n"
# End of table
tabletext = tabletext + "</table>"
return tabletext
#
# table_graph
#
# Abstract description of a graph in a CCP4 logfile table
#
class table_graph:
"""Object describing a graph in a CCP4 logfile table.
Tables in logfiles can contain any number of 'graphs',
which are represented within smartie by table_graph
objects.
A graph is defined by a title, a scaling type, and a
collection of table_columns storing columns of data."""
# Initialise the table_graph object
def __init__(self, title="", scaling="", column_list=None):
"""Create a new table_graph object.
The 'title' argument is a string containing the title
for the graph.
'scaling' is a string describing how the graph should
be displayed within the (x|j)loggraph program.
'column_list' is a list of integers corresponding to
the columns in the table that holds the graph. The first
column in the list will form the 'x' axis of the graph
when displayed, the others will be displayed on the
'y' axis."""
if column_list is None:
column_list = []
self.__title = title
self.__column_list = column_list
self.__scaling = scaling
if self.__title:
self.__nonzero = True
else:
self.__nonzero = False
# Store a reference to the parent table
self.__parent_table = None
def __nonzero__(self):
return self.__nonzero
def settitle(self, title):
"""Store the title of the graph."""
self.__title = title
def title(self):
"""Return the title of the graph."""
return self.__title
def set_parent_table(self, table):
"""Store a reference to the parent table object."""
self.__parent_table = table
def graphcols(self):
"""Return a list of the column names in the graph."""
columns = []
table = self.__parent_table
for col in self.__column_list:
columns.append(table.table_column(col).title())
return columns
def setscaling(self, scaling):
"""Store the scaling description.
This is a string which should take one of three possible
forms, and which is an instruction to the display
program on how to scale the graph data for display.
'A' is 'fully automatic' scaling (the display program
determines the scaling itself for both axes).
'N' (for 'nought') is automatic y coordinate scaling, where
the lowest limit on the y axis is 0s.
'XMIN|XMAXxYMIN|YMAX' (where XMIN, XMAX and YMIN, YMAX are
numbers) specifies the exact limits of both axes."""
self.__scaling = scaling
def scaling(self):
"""Return the scaling description."""
return self.__scaling
def setcolumns(self, columns):
"""Set the table_columns associated with the graph.
The columns are specified as a string of the form
e.g. '1,2,4,5'. Note that the column numbers are adjusted
downwards by 1 to map onto Python numbering (which starts
at zero)."""
self.__column_list = []
for i in columns.split(","):
if str(i).strip().isdigit():
self.__column_list.append(int(i) - 1)
def columns(self):
"""Return the list of columns associated with the graph.
This is a list of integers corresponding to the columns
in the table."""
return self.__column_list
#
# table_column
#
# Abstract description of a column in a CCP4 logfile table
#
class table_column:
"""Object describing a column in a CCP4i logfile table"""
def __init__(self, title=""):
"""Initialise the table_column object."""
self.__title = title
self.__data = []
if self.__title:
self.__nonzero = True
else:
self.__nonzero = False
def __nonzero__(self):
"""Returns True if the column contains data, False otherwise."""
return self.__nonzero
def __len__(self):
"""Implements len(table_column)."""
return len(self.__data)
def __getitem__(self, key):
"""Implement table_column[i] to return the i'th data value."""
return self.__data[key]
def settitle(self, title):
"""Set the title of the column."""
self.__title = title
def title(self):
"""Return the title of the column."""
return self.__title
def append(self, item):
"""Append a data value to the end of the column.
The value will be stored as integer, float or string as
appropriate."""
try:
# Is it a float?
value = float(item)
# But, could it be an integer?
# Try a horrible test
if float(int(item)) == value:
# It's actually an integer
value = int(item)
except ValueError:
# Not a numerical value - store as a string
value = item
# Add the data item as the correct type
self.__data.append(value)
def data(self):
"""Return the list of data values in the column."""
return self.__data
def nrows(self):
"""Return the number of rows in the column."""
return len(self.__data)
#
# keytext
#
# Abstract description of a CCP4 formatted keytext message
#
class keytext:
"""Object describing a keytext message in a CCP4 logfile"""
# Initialise the keytext object
def __init__(self, name="", junk_text="", message=""):
self.setname(name)
self.setjunk_text(junk_text)
self.setmessage(message)
def setname(self, name):
# Set the name attribute
self.__name = str(name).strip()
def name(self):
# Return the name attribute
return self.__name
def setjunk_text(self, junk_text):
# Set the junk_text attribute
self.__junk_text = str(junk_text).strip()
def junk_text(self):
# Return the junk_text attribute
return self.__junk_text
def setmessage(self, message):
# Set the message attribute
self.__message = str(message).strip()
def message(self):
# Return the message attribue
return self.__message
#
# ccp4i_info
#
# Abstract description of a CCP4i information message
#
class ccp4i_info(fragment):
"""Object describing a CCP4i information message in a CCP4 logfile.
The ccp4i_info class has the following attributes:
'message': the text of the CCP4i information message."""
# Initialise the ccp4i_info object
def __init__(self):
# Initialise the base class
fragment.__init__(self)
# Initialise program-specific flags and
# attributes
self.set_attribute("message", "")
def isccp4i_info(self):
return True
def isfragment(self):
return False
#
# summary
#
# Abstract description of a CCP4 "summary" block
#
class summary:
"""Object describing a summary block in a CCP4 logfile.
The summary object holds information about the location
of a block of text in a logfile. Normally this text would
be a summary block from a CCP4 logfile, which is
identified as starting with the text '<!--SUMMARY_BEGIN-->'
and terminating with the text '<!--SUMMARY_END-->'.
In practice, the summary object has three attributes: the
name of a source file, and the start and end line numbers
of the block of text within that file. The actual text is
not stored. It can be fetched using the 'retrieve' method,
in which case it is read directly from the file and
returned."""
# Initialise the keytext object
def __init__(self, source_file, start_line=-1):
self.__source_file = source_file
if start_line > 0:
self.__start_line = start_line
else:
self.__start_line = -1
self.__end_line = -1
def set_start(self, start_line):
"""Set the start line for the summary block."""
self.__start_line = start_line
def set_end(self, end_line):
"""Set the end line for the summary block."""
self.__end_line = end_line
def start(self):
"""Return the start line for the summary block."""
return self.__start_line
def end(self):
"""Return the end line for the summary block."""
return self.__end_line
def iscomplete(self):
"""Check whether the summary block is complete.
Returns True if the start and end line numbers
are valid and consistent, and False otherwise."""
if self.__start_line < 0:
return False
if self.__end_line < self.__start_line:
return False
return True
def retrieve(self):
"""Return the text within the summary block."""
if not self.iscomplete():
return ""
return retrieve(self.__source_file, self.__start_line, self.__end_line)
class patternmatch:
"""Object holding regular expressions for logfile features.
The patternmatch object provides a set of methods that can
match various features that might be found in CCP4 logfiles,
and logfiles from other programs. These are:
isccp4banner: check for CCP4 program banner
isccp4termination: check for CCP4 program termination message
isshelxbanner: check for SHELX program banner
isshelxtermination: check for SHELX program termination
isccp4keytext: check for CCP4 keytext messages
isccp4table: check for CCP4 table
isccp4iheader: check for CCP4i logfile header line
isccp4itail: check for CCP4i logfile tail line
isccp4iinformation: check for CCP4i information block
It also provides methods to match single lines:
isdataline: check if line contains CCP4 keyword input
isfileopen: check if line contains CCP4 file opening information
issummary_begin: check if line contains the start of a summary
issummary_end: check if the line contains the end of a summary
In each case, the method returns False if there is no match,
and a dictionary of data items if there is a match. The data
items are dependent on the type of pattern that is matched -
see the information for the relevant method for descriptions."""
def __init__(self):
# Initialise
# Create a dictionary to hold the regular expressions
self.__patterns = dict()
def compile(self, name, pattern):
"""Returns a compiled regular expression from the pattern.
This method returns a compiled regular expression associated
with 'name', based on the supplied 'pattern'. If the name
already has a compiled expression then that is returned,
otherwise the compile method compiles and stores it before
returning it."""
try:
return self.get_pattern(name)
except KeyError:
return self.store_pattern(name, re.compile(pattern))
def has_pattern(self, name):
"""Returns True if there is a pattern associated 'name'."""
return name in self.__patterns
def store_pattern(self, name, cpattern):
"""Store a compiled regular expression associated with 'name'.
'cpattern' is a compiled regular expression which
will be associated with 'name'. The expression can be
retrieved using the 'get_pattern'."""
# Store the compiled regular expression in "pattern"
# with the key "name"
if not self.has_pattern(name):
self.__patterns[name] = cpattern
return cpattern
# Raise an exception if a pattern has already been
# stored with the same name
raise KeyError
def get_pattern(self, name):
"""Fetch a compiled regular expression associated with 'name'."""
return self.__patterns[name]
def isccp4banner(self, text):
"""Regular expression match to CCP4 program banner.
Given a block of text, attempts to match it against
regular expressions for a CCP4 program banner.
Returns False if the match fails, otherwise returns
a dictionary object populated with attributes
derived from the supplied text.
See the isccp4banner_standard and isccp4banner_phaser
functions for descriptions of the attributes that are
extracted."""
# Try standard CCP4 banner
result = self.isccp4banner_standard(text)
if not result:
# Try Phaser-style CCP4 banner
result = self.isccp4banner_phaser(text)
if not result:
# Try old-style CCP4 banner
result = self.isccp4banner_old(text)
return result
# Match CCP4 program termination
def isccp4termination(self, text):
"""Regular expression match to CCP4 program termination.
Given a block of text, attempts to match it against
regular expressions for a CCP4 program termination.
Returns False if the match fails, otherwise returns
a dictionary object populated with attributes
derived from the supplied text.
See the isccp4termination_standard and
isccp4termination_phaser functions for descriptions of
the attributes that are extracted."""
# Try standard CCP4 termination
result = self.isccp4termination_standard(text)
if not result:
# Try Phaser-style CCP4 termination
result = self.isccp4termination_phaser(text)
return result
# Match standard CCP4 program banner
def isccp4banner_standard(self, text):
"""Test if text matches a standard CCP4 program banner.
If the match fails then return False; if it succeeds then
return a dictionary with the following keys:
name: the name of the program from the CCP4 banner.
version: the program version; for CCP4 programs, this is the
version found in the program banner. For programs that don't
explicitly give their own version number this will be the same
as the CCP4 library version.
date: the date string found in the CCP4 banner; it is
typically the last date that the source code file was
committed to CVS. (It is not the date that the program was
run on - for that, use 'rundate' and 'runtime').
ccp4version: the CCP4 library version as it appears in the
program banner. Typically this includes only the major and
minor version numbers, but not the patch level.
user: the user id that appears in the CCP4 banner at runtime.
runtime: the time of day that the program run started at as
reported in the program banner.
rundate: the date that the program run started at as
reported in the program banner."""
#
# Current banner looks like:
# ###############################################################
# ###############################################################
# ###############################################################
# ### CCP4 5.99: Refmac_5.2.0019 version 5.2.0019 : 04/08/05##
# ###############################################################
# User: pjx Run date: 25/10/2005 Run time: 15:19:23
#
# There is also an intermediate version between 4.0 and later:
# 1###############################################################
# ###############################################################
# ###############################################################
# ### CCP4 4.1: OASIS version 4.1 : 12/02/01##
# ###############################################################
# User: pjx Run date: 14/ 5/01 Run time:15:24:36
#
if text.find("### CCP") < 0:
return dict()
banner = self.compile(
"isccp4banner_standard",
r"(?: |1)#{63,63}\n #{63,63}\n #{63,63}\n ### CCP4 ([0-9.]+[a-z]*): ([A-Za-z0-9_().]+) *version ([0-9.]+[a-z]*) *: ([0-9 /]+)##\n #{63,63}\n User: ([^ ]+) *Run date: ([0-9 /]+) Run time: ?([0-9:]+) ?",
).search(text)
# banner = rbanner.search(text)
result = dict()
if banner:
result["banner_text"] = banner.group(0)
result["ccp4version"] = banner.group(1)
result["name"] = banner.group(2)
result["version"] = banner.group(3)
result["date"] = banner.group(4)
result["user"] = banner.group(5)
result["rundate"] = banner.group(6)
result["runtime"] = banner.group(7)
result["nlines"] = banner.group(0).count("\n")
return result
# Match standard CCP4 program termination
def isccp4termination_standard(self, text):
"""Test if text matches a standard CCP4 program termination.
If the match fails then return False; if it succeeds then
return a dictionary with the following keys:
termination_name: the program name as reported in the
CCP4 termination message at the tail of the program log.
termination_message: the message text displayed in the
CCP4 termination message, e.g. 'Normal termination'.
usertime: the value of the 'user time' given at
termination.
systemtime: the value of the 'system time' given at
termination.
elapsedtime: the value of the 'elapsed time' given at
termination."""
#
# Termination looks like:
# Refmac_5.2.0019: End of Refmac_5.2.0019
# Times: User: 6.0s System: 0.4s Elapsed: 0:07
#
# (Note that older program logs may have additional or different
# whitespace arrangements)
#
if text.find("Times: User: ") < 0:
return dict()
term = self.compile(
"isccp4termination_standard",
r" *([A-Za-z0-9_().]+): *([^\n]+)\n *Times: User: +([0-9.]+)s System: +([0-9.]+)s Elapsed: +([0-9:]+) *",
).search(text)
result = dict()
if term:
result["termination_text"] = term.group(0)
result["termination_name"] = term.group(1)
result["termination_message"] = term.group(2)
result["usertime"] = term.group(3)
result["systemtime"] = term.group(4)
result["elapsedtime"] = term.group(5)
result["nlines"] = term.group(0).count("\n")
return result
# Match "phaser-style" CCP4 banner
def isccp4banner_phaser(self, text):
"""Test if text matches a 'phaser-style' CCP4 program banner.
'Phaser-style' banners look similar to CCP4 banners but
contain some different information. They are also used by
the 'pointless' program.
If the match fails then return False; if it succeeds then
return a dictionary with the following keys:
name: the name of the program from the banner.
version: the reported program version.
user: the user id that appears in the banner at runtime.
rundate: the date that the program run started at as
reported in the program banner.
runtime: the time of day that the program run started at as
reported in the program banner.
os: corresponds to the 'os type' as reported in the banner,
for example 'linux'.
date: corresponds to the 'release date' of the program as
reported in the banner.
ccp4version: currently set to '?'."""
# This style of banner looks like:
# 1234567890123456789012345678901234567890123456789012345678901234567890123456789012345
# #####################################################################################
# #####################################################################################
# #####################################################################################
# ### CCP4 PROGRAM SUITE: Phaser 1.3.2 ###
# #####################################################################################
# User: pjx
# Run time: Wed May 17 09:27:42 2006
# Version: 1.3.2
# OS type: linux
# Release Date: Sun Feb 5 17:29:18 2006
#
# Note:
# 1. The "OS type" line may not always be present
# 2. Pointless also writes a similar banner, but with "CCP4 SUITE"
# substituted for "CCP4 PROGRAM SUITE"
#
# The regular expression accommodates both these differences.
if text.find("### CCP") < 0:
return dict()
banner = self.compile(
"isccp4banner_phaser",
r"#+\n#+\n#+\n### CCP4 (PROGRAM )?SUITE: ([A-Za-z0-9_.]+) *([0-9.]+) *###\n#+\nUser: *([^ ]+)\nRun time: *([A-Za-z0-9: /]+)\nVersion: *([0-9.]+)(?:\nOS type: *)?([^\n]*)\nRelease Date: *([A-Za-z0-9: /]+)",
).search(text)
result = dict()
if banner:
##print "Identified Phaser-style banner"
result["banner_text"] = banner.group(0)
result["name"] = banner.group(2)
result["version"] = banner.group(3)
result["user"] = banner.group(4)
result["rundate"] = banner.group(5)
result["runtime"] = banner.group(5)
result["os"] = banner.group(7)
result["date"] = banner.group(8)
result["ccp4version"] = "?"
result["nlines"] = banner.group(0).count("\n")
return result
# Match "phaser-style" CCP4 program termination
def isccp4termination_phaser(self, text):
"""Test if text matches a 'phaser-style' CCP4 program termination.
If the match fails then return False; if it succeeds then
return a dictionary with the following keys:
termination_name: the program name as reported in the
CCP4 termination message at the tail of the program log.
termination_message: the message text displayed in the
CCP4 termination message, e.g. 'SUCCESS'.
systemtime: the value of the 'CPU time' given at
termination.
Note that this is a subset of the attributes collected
for standard CCP4 termination messages."""
# This style of banner looks like:
# 12345678012345678012
# --------------------
# EXIT STATUS: SUCCESS
# --------------------
#
# CPU Time: 0 days 0 hrs 1 mins 34.43 secs (94.43 secs)
# Finished: Wed May 17 09:29:25 2006
if text.find("EXIT STATUS:") < 0:
return dict()
term = self.compile(
"isccp4termination_phaser",
r"\-*\nEXIT STATUS: *([^\n]+)\n\-*\n\nCPU Time: *([A-Za-z0-9 \.\(\)]*)\nFinished: *([A-Za-z0-9 \.\(\)]*)",
).search(text)
result = dict()
if term:
result["termination_text"] = term.group(0)
result["termination_message"] = term.group(1)
result["systemtime"] = term.group(2)
result["nlines"] = term.group(0).count("\n")
return result
# Match old-style standard CCP4 program banner
def isccp4banner_old(self, text):
"""Test if text matches an old-style CCP4 program banner.
'Old-style' banners come from versions of CCP4 predating
version 4.1 of the suite.
If the match fails then return False; if it succeeds then
return a dictionary with the following keys:
name: the name of the program from the CCP4 banner.
version: the program version; for CCP4 programs, this is the
version found in the program banner. For programs that don't
explicitly give their own version number this will be the same
as the CCP4 library version.
date: the date string found in the CCP4 banner; it is
typically the last date that the source code file was
committed to CVS. (It is not the date that the program was
run on - for that, use 'rundate' and 'runtime').
ccp4version: the CCP4 library version as it appears in the
program banner. Typically this includes only the major and
minor version numbers, but not the patch level.
user: the user id that appears in the CCP4 banner at runtime.
runtime: the time of day that the program run started at as
reported in the program banner.
rundate: the date that the program run started at as
reported in the program banner."""
#
# Banner looks like:
# 123456789012345678901234567890123456789012345678901234567890
# 1##########################################################
# ##########################################################
# ##########################################################
# ### CCP PROGRAM SUITE: dm VERSION 4.0: 26/11/98##
# ##########################################################
# User: pjx Run date: 3/16/00 Run time:14:12:40
if text.find("### CCP") < 0:
return dict()
banner = self.compile(
"isccp4banner_old",
r"1#{58,58}\n #{58,58}\n #{58,58}\n ### CCP PROGRAM SUITE: ([A-Za-z0-9_().]+) *VERSION ([0-9.]+) *: ([0-9 /]+)##\n #{58,58}\n User: ([^ ]+) *Run date: ([0-9 /]+) Run time:([0-9:]+) ?",
).search(text)
result = dict()
if banner:
result["banner_text"] = banner.group(0)
result["name"] = banner.group(1)
result["ccp4version"] = banner.group(2)
result["version"] = result["ccp4version"]
result["date"] = banner.group(3)
result["user"] = banner.group(4)
result["rundate"] = banner.group(5)
result["runtime"] = banner.group(6)
result["nlines"] = banner.group(0).count("\n")
return result
# Match CCP4 keytext i.e. $TEXT ...
def isccp4keytext(self, text):
"""Test if text matches CCP4 keytext message ($TEXT).
If the match fails then return False; if it succeeds then
return a dictionary with the following keys:
name: the message 'name' or identifier
junk_text: 'junk' text provided by the program (normally ignored)
message: the message text
nlines: the number of lines of text covered by the entire keytext
message block."""
#
# See e.g. http://www.ccp4.ac.uk/dist/html/loggraphformat.html
# for format of TEXT information, but essentially it's:
#
# $TEXT :text name: $$ junk (ignored) text $$any text characters$$
#
keytext = self.compile(
"isccp4keytext", r"\$TEXT[ \n]*:([^:]*):[ \n]*\$\$([^\$]*)\$\$([^\$]*)\$\$"
).search(text)
result = dict()
if keytext:
result["name"] = keytext.group(1)
result["junk_text"] = keytext.group(2)
result["message"] = keytext.group(3)
result["nlines"] = keytext.group(0).count("\n")
return result
# Match CCP4 TABLE
def isccp4table(self, text):
"""Test if text matches CCP4 logfile table ($TABLE).
If the match fails then return False; if it succeeds then
return a dictionary with the following keys:
rawtable: the exact text of the table as it appeared in the
logfile
title: the title of the table.
type: the table type.
graphs: the text of the $GRAPHS portion of the table text.
columns: the text of the column headers in the table.
text: the 'junk' text after the column headers and before the
actual table data.
data: the text of the table data (i.e. columns and rows of
numbers or other data.
nlines: the number of lines of text covered by the entire table
block.
These data items are themselves relatively unprocessed, so it
is recommended that the text that matches the table should be
fed into a 'table' object which provides a much easier to use
interface to the various bits of data in the table.
a table"""
#
# See e.g. http://www.ccp4.ac.uk/dist/html/loggraphformat.html
# for format of TABLES
#
# Note that this regular expression accommodates slight deviations
# by making the "closing" ":" of the $TABLE line optional.
# This is done for consistency with loggraph's behaviour.
#
# Set up regular expression for entire table
# This is the "strict" form of the table
table = self.compile(
"isccp4table",
r" *\$TABLE ?:([^:]*):?[ \n]+\$(GRAPHS|SCATTER)[^:]*(:[^\$]*)\$\$([^\$]*)\$\$([^\$]*)\$\$([^\$]*)\$\$",
).search(text)
result = dict()
if table:
result["rawtable"] = table.group(0)
result["title"] = table.group(1).strip()
result["type"] = table.group(2).strip()
result["graphs"] = table.group(3)
result["columns"] = table.group(4)
result["text"] = table.group(5)
result["data"] = table.group(6)
result["nlines"] = table.group(0).count("\n")
return result
# If there wasn't a match then try a simpler match
# This relaxes some of the rules in the format definintion
table = self.compile(
"isccp4simplertable",
r" *\$TABLE ?:([^\n]*)\n+\$(GRAPHS|SCATTER)[^:]*(:[^\$]*)\$\$([^\$]*)\$\$([^\$]*)\$\$([^\$]*)\$\$",
).search(text)
if table:
result["rawtable"] = table.group(0)
result["title"] = table.group(1).strip()
result["type"] = table.group(2).strip()
result["graphs"] = table.group(3)
result["columns"] = table.group(4)
result["text"] = table.group(5)
result["data"] = table.group(6)
result["nlines"] = table.group(0).count("\n")
return result
return result
# Match CCP4i header information
def isccp4iheader(self, text):
"""Test if text matches a CCP4i header line."""
#
# CCP4i header lines look like:
# #CCP4I VERSION CCP4Interface 1.4.1
# #CCP4I SCRIPT LOG refmac5
# #CCP4I DATE 25 Oct 2005 15:19:22
#
if self.isccp4itail(text):
# Reject tail elements
return ""
header = self.compile("isccp4iheader", r"#CCP4I (.*)").search(text)
result = ""
if header:
result = header.group(0)
return result
# Match CCP4i header information
def isccp4itail(self, text):
"""Test if text matches a CCP4i tail line."""
#
# CCP4i tail lines look like:
# #CCP4I TERMINATION STATUS 1
# #CCP4I TERMINATION TIME 25 Oct 2005 15:19:30
# #CCP4I TERMINATION OUTPUT_FILES /home/pjx/PROJECTS/myProject/...
# #CCP4I MESSAGE Task completed successfully
#
tail = self.compile("isccp4itail", r"#CCP4I (TERMINATION|MESSAGE) (.*)").search(
text
)
result = ""
if tail:
result = tail.group(0)
return result
# Match CCP4i information text
def isccp4i_information(self, text):
"""Test if text matches a CCP4i information block."""
#
# CCP4i information lines look like:
# 123456789012345678901234567890123456789012345678901234567890123456789012345
# ***************************************************************************
# * Information from CCP4Interface script
# ***************************************************************************
# Running SHELXC to prepare data for heavy atom search
# ***************************************************************************
#
info = self.compile(
"isccp4iinformation",
r"\*{75,75}\n\* Information from CCP4Interface script\n\*{75,75}\n(.*)\n\*{75,75}",
).search(text)
result = dict()
if info:
result["message"] = info.group(1)
result["nlines"] = info.group(0).count("\n")
return result
# Match SHELX banners
# 123456789012345678901234567890123456789012345678901234567890123456789012
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + SHELXC - Create input files for SHELXD and SHELXE - Version 2006/3 +
# + Copyright (C) George M. Sheldrick 2003-6 +
# + SHELX_56_shelxc Started at 14:30:07 on 21 Apr 2006 +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + SHELXD-2006/3 - MACROMOLECULAR DIRECT METHODS - FORTRAN-95 VERSION +
# + Copyright (C) George M. Sheldrick 2000-2006 +
# + SHELX_56_shelxd_fa started at 14:30:11 on 21 Apr 2006 +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# 12345678901234567890123456789012345678901234567890123456789012345678
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + SHELXE - PHASING AND DENSITY MODIFICATION - Version 2006/3 +
# + Copyright (C) George M. Sheldrick 2001-6 +
# + Started at 14:30:36 on 21 Apr 2006 +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
def isshelxbanner(self, text):
"""Test if text matches a SHELX program banner.
This function tries to match the banners from SHELXC,
SHELXD and SHELXE.
If the match fails then return False; if it succeeds then
return a dictionary with the following keys:
name: the program name.
version: the program version.
runtime: the time of day that the program run started at as
reported in the program banner.
rundate: the date that the program run started at as
reported in the program banner."""
# Set up regular expression for partial SHELX banner
if text.find("SHELX") < 0:
return dict()
banner = self.compile(
"isshelxbanner",
r"\+{68,72}\n \+ (SHELXC|SHELXD|SHELXE)([^\+]*)\+\n \+ Copyright \(C\) *George M. Sheldrick[^\n]*\n \+ ([^\+]*)\+\n \+{68,72}",
).search(text)
result = dict()
if banner:
result["banner_text"] = banner.group(0)
result["name"] = banner.group(1)
result["nlines"] = banner.group(0).count("\n")
# Try and untangle the versions
name = banner.group(1)
version = False
if name == "SHELXC" or name == "SHELXE":
version = re.compile(r"Version ([^ \+]*)").search(banner.group(2))
elif name == "SHELXD":
version = re.compile(r"\-([^ \-]*)").search(banner.group(2))
if version:
result["version"] = version.group(1)
else:
result["version"] = "?"
# Try and untangle the start times
date = re.compile(r"(S|s)tarted at ([0-9:]+) on ([0-9A-Za-z ]+)").search(
banner.group(3)
)
if date:
result["runtime"] = str(date.group(2))
result["rundate"] = str(date.group(3))
else:
result["runtime"] = "?"
result["rundate"] = "?"
return result
# Match SHELX program termination
def isshelxtermination(self, text):
"""Test if text matches a SHELX program termination.
This function tries to match the messages from SHELXC,
SHELXD and SHELXE.
If the match fails then return False; if it succeeds then
return a dictionary with the following keys:
termination_name: the program name as reported in the
SHELX termination message at the tail of the program log.
termination_message: the message text displayed in the
termination message. The content of this text varies
between SHELXC and SHELXD/E so no further processing is
currently attempted."""
#
# Termination looks like:
# 123456789012345678901234567890123456789012345678901234567890123456789012
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + SHELXC for SHELX_56_shelxc finished at 14:30:11 on 21 Apr 2006 +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + SHELXD finished at 14:30:36 Total time: 23.43 secs +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# + SHELXE finished at 14:34:01 Total time: 198.15 secs +
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#
# Set up regular expression for partial SHELX termination
if text.find("SHELX") < 0:
return dict()
term = self.compile(
"isshelxtermination",
r"\+{68,72}\n \+ (SHELXC|SHELXD|SHELXE)([^\+]*)\+\n \+{68,72}",
).search(text)
result = dict()
if term:
result["termination_text"] = term.group(0)
result["termination_name"] = term.group(1)
result["termination_message"] = term.group(2)
result["nlines"] = term.group(0).count("\n")
return result
# Match program keyword input line ("Data line")
def isdataline(self, line):
"""Test if line matches a CCP4 keyword input line.
This function tries to match the keyword input lines.
If the match fails then return False; if it succeeds then
returns a dictionary with the following keys:
data_line: the keyword data"""
#
# Keyworded lines look like:
#
# Data line--- make check NONE
#
# Set up regular expression for keyword input lines
data = self.compile("isdataline", r"^ Data line--- ([^\n]*)\n").search(line)
result = dict()
if data:
result["data_line_text"] = data.group(0)
result["data_line"] = data.group(1)
result["nlines"] = data.group(0).count("\n")
return result
# Match CCP4 file opening line (logical name/filename)
def isfileopen(self, line):
"""Test if line matches a CCP4 file opening report.
This function tries to match the reports of a file opening
event from the CCP4 libraries, which report the logical name
and associated filename.
If the match fails then return False; if it succeeds then
returns a dictionary with the following keys:
logical_name: the logical name
filename: the associated filename"""
#
# File opening report lines look like:
#
# Logical Name: /home/pjx/PROJECTS/myProject/aucn.mtz Filename: /home/pjx/PROJECTS/myProject/aucn.mtz
#
# Set up regular expression for file opening report lines
fileopen = self.compile(
"isfileopen", r"^ Logical Name: ([^\n]*) Filename: ([^\n]*)\n"
).search(line)
result = dict()
if fileopen:
result["fileopen_text"] = fileopen.group(0)
result["logical_name"] = fileopen.group(1).strip()
result["filename"] = fileopen.group(2).strip()
return result
# Match CCP4 SUMMARY_BEGIN line (summary start)
def issummary_begin(self, line):
"""Test if line matches a CCP4 SUMMARY_BEGIN line.
This function tries to match lines that indicate the start
of a CCP4 summary block i.e. lines containing the text
'<!--SUMMARY_BEGIN-->'.
If the match fails then return False; if it succeeds then
return True."""
#
# Summary start lines look like:
# <B><FONT COLOR="#FF0000"><!--SUMMARY_BEGIN-->
#
# Set up regular expression for SUMMARY_BEGIN lines
summary = self.compile("issummary_begin", r"<\!--SUMMARY_BEGIN-->").search(line)
if summary:
return True
return False
# Match CCP4 SUMMARY_END line (summary end)
def issummary_end(self, line):
"""Test if line matches a CCP4 SUMMARY_END line.
This function tries to match lines that indicate the end
of a CCP4 summary block i.e. lines containing the text
'<!--SUMMARY_END-->'.
If the match fails then return False; if it succeeds then
return True."""
#
# Summary start lines look like:
# <!--SUMMARY_END--></FONT></B>
#
# Set up regular expression for SUMMARY_BEGIN lines
summary = self.compile("issummary_end", r"<\!--SUMMARY_END-->").search(line)
if summary:
return True
return False
#######################################################################
# External Functions
#######################################################################
# parselog
#
# Given the name of a logfile, populates and returns a
# logfile object
def parselog(filen, progress=0):
"""Process a file and return a populated logfile object.
parselog takes a file name as input; optionally if the
progress argument is set to a positive integer then the
function also reports its progress when it reaches a
multiple of that number of lines.
parselog works by reading the source file one line at a
time from beginning to end. Each line is added to two
buffers: a 'small' buffer, which stores the last 10 lines
read, and a 'large' tablebuffer, which can store the last
1000 lines.
After a line has been added, the small buffer is checked
against a series of regular expressions designed to match
the various features (banners, terminations and so on).
If a match is found then parselog updates the logfile object
that it is constructing and then clears the buffer.
The tablebuffer is also checked at each line, to see if it
contains a whole CCP4 logfile table. (The tablebuffer is a
specialised form of buffer which is intended to optimise
dealing with tables).
The buffer sizes for the small and large buffers affect the
speed of operation of parselog - if they are large then the
parsing is slower because larger chunks of text are being
tested multiple times.
However if the buffers are too small to accommodate some
of the logfile features then parselog is unable to detect
those features. As some logfiles can contain extremely large
tables, the tablebuffer must also be large. However other
features are generally quite small.
The other factor that can affect the speed of parsing is the
make-up of the logfile. Counterintuitively, long files that
contain few recognisable features can take longer because the
buffers are only infrequently flushed."""
# Process a file and return a populated logfile object
#
# Maximum size of text buffer to use
bufsize = 50
# Initial size of chunks to process
chunksize = 50
# Regular expression object
regex = patternmatch()
# Buffer objects
buff = buffer(bufsize)
tablebuff = tablebuffer()
linecount = 0
# New (empty) logfile object
log = logfile(filen)
prog = False
summary = None
# Open the file for reading
f = open(filen, "r")
# Read line-by-line
for line in f:
linecount += 1
# Progress indicator (if requested)
# Report reaching "progress" number of lines
if progress:
if not linecount % progress:
print("Processed " + str(linecount) + " lines")
# Append line to buffers
buff.append(line)
tablebuff.append(line)
# Get a chunk of text to process
bufftext = buff.tail(chunksize)
# Test the line for matches
#
# Data line i.e. CCP4 program keywords
result = regex.isdataline(line)
if result:
if not prog or not prog.isprogram():
# Found a data line outside the context
# of a program
# Assume that we are now inside a program
prog = log.addprogram()
# Set the start line to be immediately
# after the previous fragment
try:
previous_fragment = log.fragment(log.nfragments() - 2)
start = previous_fragment.get_endline() + 1
except IndexError:
# Failed to get end line of previous
# fragment
start = 0
log.set_fragment_start(start)
# Remove any html tags and store
data_line = strip_logfile_html(result["data_line"])
prog.addkeyword(data_line)
# File opening report line i.e. logical name/filename pairs
result = regex.isfileopen(line)
if result:
if not prog or not prog.isprogram():
# Found a file opening report outside the context
# of a program
# Assume that we are now inside a program
prog = log.addprogram()
# Set the start line to be immediately
# after the previous fragment
try:
previous_fragment = log.fragment(log.nfragments() - 2)
start = previous_fragment.get_endline() + 1
except IndexError:
# Failed to get end line of previous
# fragment
start = 0
log.set_fragment_start(start)
# Store the logical name/filename pair
prog.addlogicalname(result["logical_name"], result["filename"])
# Start of a summary block i.e. <!--SUMMARY_BEGIN-->
result = regex.issummary_begin(line)
if result:
summary = log.addsummary(linecount)
# End of a summary block i.e. <!--SUMMARY_END-->
result = regex.issummary_end(line)
if result:
if not summary:
# Make a new summary with no start
summary = log.addsummary()
# Close out the current summary
summary.set_end(linecount)
# Test the buffer for matches
#
# CCP4 program banner
result = regex.isccp4banner(bufftext)
if result:
##print "Found CCP4 program banner"
##print "Result = "+str(result)
prog = log.addprogram()
prog.set_isccp4(True)
prog.set_attributes_from_dictionary(result)
log.set_fragment_start(linecount)
buff.clear()
tablebuff.clear()
continue
# SHELX program banner
result = regex.isshelxbanner(bufftext)
if result:
##print "Found SHELX program banner"
##print "Result = "+str(result)
prog = log.addprogram()
prog.set_attributes_from_dictionary(result)
log.set_fragment_start(linecount)
buff.clear()
tablebuff.clear()
continue
# CCP4 program termination
result = regex.isccp4termination(bufftext)
if result:
##print "Found CCP4 program termination"
##print "Result = "+str(result)
if not prog:
# Outside the context of any fragment, and
# found the end of a program before its start
log.set_fragment_end(offsetline(linecount, result))
prog = log.addprogram()
elif not prog.isprogram():
# Within the context of a fragment which
# is not a program and found the end of a
# program before its start
log.set_fragment_end(offsetline(linecount, result))
prog = log.addprogram()
prog.set_attributes_from_dictionary(result)
log.set_fragment_end(linecount)
prog.set_termination(True)
# Clear the current pointer
prog = False
buff.clear()
tablebuff.clear()
continue
# SHELX program termination
result = regex.isshelxtermination(bufftext)
if result:
##print "Found SHELX program termination"
##print "Result = "+str(result)
if not prog:
# Found the end of a program before its start
prog = log.addprogram()
prog.set_attributes_from_dictionary(result)
log.set_fragment_end(linecount)
prog.set_termination(True)
# Clear the current pointer
prog = False
buff.clear()
tablebuff.clear()
continue
# CCP4 table
if tablebuff.complete():
if not prog:
# Found a table outside the context of a program
##print "Adding table as a fragment"
prog = log.newfragment()
log.set_fragment_start(linecount)
table_error = False
table = prog.addtable(tablebuff.all())
if not table:
print("*** Failed to extract table data ***")
table_error = True
elif table.parse_error():
print("*** Failed to parse table data ***")
table_error = True
if table_error:
print("\tLogfile: " + str(log.filename()))
print("\tTable start: L" + str(linecount - len(tablebuff) + 1))
print("\tTable end : L" + str(linecount))
# Add the table to the log, regardless of status
log.addtable(table)
# clear the buffers
buff.clear()
tablebuff.clear()
continue
# CCP4 keytext message
result = regex.isccp4keytext(bufftext)
if result:
##print "Found CCP4 keytext"
##print "Result = "+str(result)
if not prog:
# Found a message outside the context of a program
##print "Adding keytext as a fragment"
prog = log.newfragment()
log.set_fragment_start(linecount)
keytext = prog.addkeytext(
result["name"], result["junk_text"], result["message"]
)
log.addkeytext(keytext)
buff.clear()
tablebuff.clear()
continue
# CCP4i header
result = regex.isccp4iheader(bufftext)
if result:
##print "Found CCP4i header"
##print "Result = "+str(result)
log.append_ccp4i_header(result)
buff.clear()
continue
# CCP4i tail
result = regex.isccp4itail(bufftext)
if result:
##print "Found CCP4i tail"
##print "Result = "+str(result)
log.append_ccp4i_tail(result)
buff.clear()
tablebuff.clear()
continue
# CCP4i information
result = regex.isccp4i_information(bufftext)
if result:
##print "Found CCP4i information"
##print "Result = "+str(result)
# Make a new fragment - these messages shouldn't
# appear inside the context of another program
prog = log.addccp4i_info()
prog.set_attributes_from_dictionary(result)
log.set_fragment_start(linecount)
log.set_fragment_end(linecount)
# Clear the current context
prog = False
buff.clear()
tablebuff.clear()
continue
# Ensure that the endline of the last fragment
# is assigned
log.set_fragment_end(linecount)
# Close the file
f.close()
return log
#
# summarise
#
# Produce a summary of the data in a logfile object
#
def summarise(thislog):
"""Summarise the content of a logfile object.
This function takes a logfile object as input and writes a
a summary of the contents (fragments, programs, tables, messages
and so on) to stdout."""
# Logfile name
print("Summary for " + thislog.filename() + "\n")
# Was it from CCP4i?
if thislog.isccp4i():
print("This is a CCP4i logfile\n")
# Number of programs or pseudo-programs
print(str(thislog.nfragments()) + " logfile fragments\n")
print("Fragments:")
for i in range(0, thislog.nfragments()):
fragment = thislog.fragment(i)
if fragment.isprogram():
if fragment.has_attribute("name"):
print("\tProgram: " + str(fragment.name))
else:
print("\tProgram: <no name>")
else:
if fragment.isccp4i_info():
print("\tCCP4i info")
elif fragment.isfragment():
print("\tFragment")
if fragment.ntables():
print("\t\t" + str(fragment.ntables()) + " tables")
if fragment.nkeytexts():
print("\t\t" + str(fragment.nkeytexts()) + " keytexts")
print("")
# Summarise program logfile fragments
if thislog.nprograms() > 0:
print(str(thislog.nprograms()) + " program logfiles\n")
print("Programs:")
for i in range(0, thislog.nprograms()):
prog = thislog.program(i)
# Is it a CCP4 program?
if prog.isccp4():
# Print name, version (and CCP4 version)
print(
"\t"
+ prog.name
+ "\tv"
+ prog.version
+ "\t(CCP4 "
+ prog.ccp4version
+ ")"
)
else:
# Print name and version
if prog.has_attribute("name") and prog.has_attribute("version"):
print("\t" + prog.name + "\t" + prog.version)
else:
print("\t<No name and/or version>")
if prog.termination():
print("\tTerminated with: " + prog.termination_message)
else:
print("\tNo termination message found")
# Keytexts
if prog.nkeytexts():
print("\n\t\tKeytext messages:")
for j in range(0, prog.nkeytexts()):
print(
"\t\t"
+ str(prog.keytext(j).name())
+ ': "'
+ str(prog.keytext(j).message())
+ '"'
)
# Tables
if prog.ntables():
print("\n\t\tTables:")
for table in prog.tables():
print('\t\tTable: "' + table.title() + '"')
print("")
else:
print("No program logfiles found")
print("")
# Total set of CCP4i information messages in the file
print("CCP4i messages in file:")
if thislog.nccp4i_info():
for i in range(0, thislog.nccp4i_info()):
print('\tCCP4i info: "' + thislog.ccp4i_info(i).message + '"')
else:
print("\tNo messages found")
print("")
# Total set of tables in the file
print("Tables in file:")
if thislog.ntables():
for table in thislog.tables():
print('\tTable: "' + table.title() + '" (' + str(table.nrows()) + " rows)")
else:
print("\tNo tables found")
print("")
# Total set of keytexts in the file
print("Keytext messages in file:")
if thislog.nkeytexts():
for i in range(0, thislog.nkeytexts()):
print(
"\t"
+ str(thislog.keytext(i).name())
+ ': "'
+ thislog.keytext(i).message()
+ '"'
)
else:
print("\tNo keytext messages found")
print("")
#######################################################################
# Utility Functions
#######################################################################
def copyfragment(fragment0, newobj):
"""Copy the data in a fragment to another object.
The data in the source fragment 'fragment0' is copied to the
target object 'newobj', and 'newobj' is returned. 'newobj'
should be a fragment object or some subclass of fragment (such
as a 'program' object).
copyfragment can be used to 'mutate' a fragment into (for
example) a program object."""
# Copy attribute data
for item in fragment0.attributes():
newobj[item] = fragment0.get_attribute(item)
# Copy tables
for tbl in fragment0.tables():
newobj.addtable(tbl)
# Copy keytexts
for i in range(0, fragment0.nkeytexts()):
keytext = fragment0.keytext(i)
newobj.addkeytext(keytext.name(), keytext.junk_text(), keytext.message())
# Try to copy other attributes that fragment subclasses
# have (such as keywords)
try:
for line in fragment0.keywords():
newobj.addkeyword(line)
except AttributeError:
# Either the source or target doesn't support
# keyword storage
pass
# Return the populated object
return newobj
def offsetline(linen, pattern_result):
"""Return the line number offset by the size of a matched pattern.
This is an internal utility function.
Given 'linen' (the current line number) and 'pattern_result'
(a dictionary containing data items returned from one of the
regular expression functions), this function returns a line
number which is offset to the start of the regular expression.
It does this by locating a dictionary key 'nlines', which
gives the size of the regular expression match."""
if "nlines" in pattern_result:
nlines = pattern_result["nlines"]
else:
nlines = 0
new_linen = linen - nlines - 1
if new_linen < 0:
return 0
else:
return new_linen
def find_table_by_title(table_list, title_pattern, index=0):
"""Fetch a table object from a list by matching the title.
This method is deprecated; use find_tables_by_title instead.
This method looks up a particular table in a list
of table objects (argument 'table_list'), by finding
the first table in the list which matches the supplied
regular expression 'title_pattern'.
If there is more than one matching table then the 'index'
argument specifies which of the list of matching tables
should be returned. If index is out of range (or there are
no matching tables) then return 'None'."""
rtable_list = find_tables_by_title(table_list, title_pattern)
try:
return rtable_list[index]
except Exception:
return None
def find_tables_by_title(table_list, title_pattern):
"""Return a list of tables by matching the title.
This method returns a list of table objects containing
all the tables in the supplied list 'table_list' for
which the regular expression 'title_pattern' matches
the table title.
If no pattern is given then a list with all the table
objects will be returned.
A list is always returned, so in cases where there
are no matches an empty list is returned, and if there
is just one match then a list with a single item is
returned."""
rtitle = re.compile(title_pattern)
rtable_list = []
for table in table_list:
if rtitle.match(table.title()):
rtable_list.append(table)
return rtable_list
def escape_xml_characters(data):
"""Return copy of string with XML special characters escaped.
This replaces the characters <, > and & with the XML escape
sequences <, > and &. It also replaces double
quotes with ".
This could be replaced in future by the
'xml.sax.saxutils.escape' function."""
return (
str(data)
.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
.replace('"', """)
)
def strip_logfile_html(text):
"""Strip out HTML tags from logfile text.
Return copy of input 'text' with HTML tags removed
and any HTML special characters escaped.
Note that this is specialised for CCP4 logfiles,
in particular CCP4-formatted logfiles will be
extracted from <param name='table' ...> tags."""
out_text = ""
buff = ""
start_tag = ""
end_tag = ""
context = "none"
for i in range(len(text)):
c = text[i]
# print "c = "+str(c)+" context = "+str(context)
if c == "<":
if context == "none":
# Possible start of a tag, depending on
# next character
context = "putative_tag"
buff = c
else:
# Everything up to this needs to
# be dumped directly to output
out_text = out_text + escape_xml_characters(buff)
elif context == "putative_tag":
buff = buff + c
if c.isalpha():
context = "start_tag"
elif c == "/":
context = "end_tag"
elif c == "!":
context = "comment_tag"
else:
# Not a tag so dump it
context = "none"
out_text = out_text + escape_xml_characters(buff)
elif context == "start_tag" or context == "end_tag" or context == "comment_tag":
buff = buff + c
if c == ">":
if context == "start_tag":
# End of a start tag
# Process it and see if we can
# salvage something
salvage_text = salvage_tag_data(buff)
if salvage_text != "":
out_text = out_text + escape_xml_characters(salvage_text)
# Reset the buffer
context = "none"
buff = ""
elif context == "end_tag":
# End of an end tag
# Throw this away (for now)
context = "none"
buff = ""
elif context == "comment_tag":
# End of a comment
# Throw this away (for now)
context = "none"
buff = ""
else:
# Nothing special about this
# Add to the output
out_text = out_text + escape_xml_characters(c)
# Finished - append the remaining buffer
out_text = out_text + escape_xml_characters(buff)
return remove_blank_lines(out_text)
def remove_blank_lines(text):
"""Remove duplicated blank lines from text.
This function tries to remove extra blank lines from
the supplied text, so that multiple blank lines are
collapsed to just a single line."""
out_text = ""
blank = True
for line in text.splitlines(True):
if line.isspace():
if not blank:
blank = True
out_text = out_text + line
else:
blank = False
out_text = out_text + line
return out_text
def process_start_tag(tag_text):
"""Process an arbitrary HTML start tag.
Given the text of an arbitrary tag, this function returns
a tuple consisting of two elements. The first element is
the tag name, the second is a dictionary with keys
corresponding to attributes found in the tag and the values
of those keys corresponding to the attribute values."""
tokens = tokenise(tag_text.strip("<>"))
tag = tokens[0]
attributes = {}
if len(tokens) > 1:
for token in tokens[1:]:
try:
i = token.index("=")
key = token[0:i]
value = token[i + 1 :].strip(' "')
except ValueError:
key = token
value = ""
attributes[key] = value
return (tag, attributes)
def salvage_tag_data(tag_text):
"""Extract data from a HTML tag.
This function deals with extracting the useful data from
certain HTML tags found in CCP4 logfiles.
Currently it is set up to extract CCP4 table data from the
'param' tags of JLogGraph applets.
If no data could be salvaged then an empty string is
returned."""
data = process_start_tag(tag_text)
tag = data[0]
attributes = data[1]
# Jloggraph applet data
if tag == "param" and "name" in attributes:
if attributes["name"] == "table" and "value" in attributes:
return attributes["value"]
# Spacegroup
if tag_is_spacegroup(tag_text):
return tag_text
# Return an empty string by default
return ""
def tag_is_spacegroup(text):
"""Check if a HTML tag looks like a spacegroup name.
This does a very crude test to see whether the supplied
string looks like a spacegroup name (rather than a random
HTML tag)."""
spacegroup = re.compile(
r"<?[PABCIFHRpabcifhr] *[1-9][1-9]? *[1-9]?[1-9]? *[1-9]?[1-9]?>$"
)
result = spacegroup.search(text)
if result:
return True
else:
return False
def retrieve(filen, start, end):
"""Retrieve a block of text from a file.
Given the name of a file 'filen' and a pair of start and
end line numbers, extract and return the text from the
file.
This uses the linecache module - beware of problems with
consuming too much memory if the cache isn't cleared."""
text = ""
# Check for consistency and validity of lines
if start < 0 and end < 0 or end < start:
return ""
# Fetch from a file if possible
if os.path.isfile(filen):
try:
for i in range(start, end + 1):
text = text + str(linecache.getline(filen, i))
return text
except Exception:
print("Exception raised in retrieve method:")
print('\tSource file = "' + str(filen) + '"')
print("\tStart line = " + str(start))
print("\tEnd line = " + str(end))
print("\tCurrent line = " + str(i))
raise
# Otherwise return nothing
return ""
def tokenise(line):
"""Tokenise a string and return a list.
Split a line of text into tokens separated by whitespace, but
ignoring whitespace that appears within quotes.
This attempts to do a similar to job the CCP4 'parser' (which is
itself actually a tokeniser) in the core CCP4 libraries. The
hard part is dealing with quoted strings which form a single
token, and which can themselves also contain quotes."""
sline = str(line)
tokens = []
token = False
quote = False
tquote = ""
start = 0
for i in range(len(sline)):
c = sline[i]
if token and not quote:
if c == " " or c == "\t" or c == "\n":
# end of current token
tokens.append(sline[start:i])
token = False
quote = False
if token and (c == '"' or c == "'"):
# Detected a quote - flip the quote flag
if quote:
if c == tquote:
quote = False
else:
quote = True
tquote = c
if not token:
if c != " " and c != "\t" and c != "\n":
# Start of a new token
token = True
start = i
if c == '"' or c == "'":
# Also it's quoted
quote = True
tquote = c
# End of the loop
if token:
# End of the last token
tokens.append(sline[start : len(sline)])
return tokens
##############################################################
# Diagnostic methods used for testing and as examples
##############################################################
# List the TABLE tags in a file
def table_tags(filen):
"""Report occurances of '$TABLE' tags in a log file.
This function is principally a diagnostic tool and is
independent of the other classes and methods in this
module. It takes the name of a log file as input,
scans the file for occurances of the $TABLE tag, and
reports this to stdout."""
print("Scanning file " + str(filen))
rtable = re.compile(r"\$TABLE *:")
f = open(filen, "r")
linecount = 0
tablecount = 0
tablelist = []
for line in f:
linecount = linecount + 1
table = rtable.search(line)
if table:
tablecount = tablecount + 1
print(str(linecount) + ": " + str(line.rstrip("\n")))
tablelist.append(line.rstrip("\n"))
f.close()
print(str(linecount) + " lines and " + str(tablecount) + " tables")
return tablelist
# An example of making a new table from scratch
def table_example():
"""Demonstration function that creates and populates a table object.
This function is for demonstration purposes only; it shows
the basics of how to make and output a table. It creates
a new table object, names it, populates some columns of data
and then adds some graph definitions before outputting the
formatted table to stdout."""
print("\nExample making a new table from scratch:\n")
# Make a new (empty) table object
tbl = table("A table with random data")
# Add three columns called "x", "x^2" and "1/x"
tbl.addcolumn("x")
tbl.addcolumn("x^2")
tbl.addcolumn("1/x")
# Add some rows of data
for i in range(0, 10):
row = dict()
row["x"] = i
row["x^2"] = i * i
if i != 0:
row["1/x"] = 1.0 / float(i)
else:
row["1/x"] = "?"
tbl.add_data(row)
# Define some graphs
tbl.definegraph("Y = X(squared)", ("x", "x^2"))
tbl.definegraph("Y = 1/X", ("x", "1/x"))
tbl.definegraph("All data", ("x", "x^2", "1/x"))
# Print out the data as a simple "table" and in loggraph markup
print(tbl.show())
print(tbl.loggraph())
if __name__ == "__main__":
"""Usage example and demonstration for smartie.
Run the main program using:
python smartie.py file1 [file2 [...] ]
For each file this example will generate a logfile object
and then use the logfile 'summarise' method to print out
a summary of the file's contents."""
print("Running test on logparser code")
# Get the command line args
print("command line: " + str(sys.argv))
if len(sys.argv) == 1:
print("Usage: smartie.py file1 [file2 [...] ]")
sys.exit(0)
# Cycle over files and process
for filen in sys.argv[1:]:
print('**** Parsing file "' + filen + '"')
start_time = time.clock()
log = parselog(filen)
end_time = time.clock()
# Use the summarise function
summarise(log)
print("\nTime: " + str(end_time - start_time) + "\n")
| 36.678857
| 217
| 0.591132
|
aaad7f5c0ecb48fbd51c78e81b0273101e6ea4e6
| 1,004
|
py
|
Python
|
src/process/infrastructor/connection/file/connectors/FileConnector.py
|
jedicontributors/pythondataintegrator
|
3e877b367ab9b20185476128ec053db41087879f
|
[
"MIT"
] | null | null | null |
src/process/infrastructor/connection/file/connectors/FileConnector.py
|
jedicontributors/pythondataintegrator
|
3e877b367ab9b20185476128ec053db41087879f
|
[
"MIT"
] | null | null | null |
src/process/infrastructor/connection/file/connectors/FileConnector.py
|
jedicontributors/pythondataintegrator
|
3e877b367ab9b20185476128ec053db41087879f
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from queue import Queue
from infrastructor.dependency.scopes import IScoped
from pandas import DataFrame
class FileConnector(ABC,IScoped):
@abstractmethod
def connect(self):
pass
@abstractmethod
def disconnect(self):
pass
@abstractmethod
def get_unpredicted_data(self, file: str, names: [], header: int, separator: str, limit: int, process_count:int, data_queue: Queue, result_queue:Queue):
pass
@abstractmethod
def get_data_count(self, file: str):
pass
@abstractmethod
def get_data(self, file: str, names: [], start: int, limit: int,
header: int, separator: str) -> DataFrame:
pass
@abstractmethod
def write_data(self, file: str, data: DataFrame, separator: str):
pass
@abstractmethod
def recreate_file(self, file: str, headers: [], separator: str):
pass
@abstractmethod
def delete_file(self, file: str):
pass
| 23.904762
| 156
| 0.658367
|
07609ef1d425b874c69711869b686c0e29498fbf
| 16,602
|
py
|
Python
|
helper_functions.py
|
srdjanko/CarND-Advanced-Lane-Lines
|
896e5957f0fb693456e82ff57d806e8bd384869c
|
[
"MIT"
] | null | null | null |
helper_functions.py
|
srdjanko/CarND-Advanced-Lane-Lines
|
896e5957f0fb693456e82ff57d806e8bd384869c
|
[
"MIT"
] | null | null | null |
helper_functions.py
|
srdjanko/CarND-Advanced-Lane-Lines
|
896e5957f0fb693456e82ff57d806e8bd384869c
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import logging
def draw_polyline(img, vertices, color=[255, 0, 0], thickness=2, Closed = False):
"""
Simple method for drawing connected lines or polygons, given the
set of points. Starting and ending point can be connected automatically
to form closed polygon figure.
"""
cv2.polylines(img, vertices, Closed, color, thickness, lineType=cv2.LINE_AA)
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
Simple method for drawing set of individual lines, each defined by start and
end points.
"""
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def draw_label(img, text, pos, scale = 0.7, color = (0,0,0)):
"""
Method for displaying text on given part of the image.
"""
font_face = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, text, pos, font_face, scale, color, 1, cv2.LINE_AA)
def filter_image(img, l_thresh=(20, 100), s_thresh=(50, 120)):
"""
Taken from materials and modified.
Performs image filtering based on the L and S channel (HLS), where each
channel is filtered separately, thresholded, and then combined into single
binary output. This is different from the material version where the S binary
is directly combined with the sobel binary.
"""
# Convert to HLS color space and separate the S channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# Sobel x, l channel
sobel_l = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobel_l = np.absolute(sobel_l) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel_l = np.uint8(255*abs_sobel_l/np.max(abs_sobel_l))
# Threshold x gradient
l_binary = np.zeros_like(scaled_sobel_l)
l_binary[(scaled_sobel_l >= l_thresh[0]) & (scaled_sobel_l <= l_thresh[1])] = 1
# Sobel x, s channel
sobel_s = cv2.Sobel(s_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobel_s = np.absolute(sobel_s) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel_s = np.uint8(255*abs_sobel_s/np.max(abs_sobel_s))
# Threshold x gradient
s_binary = np.zeros_like(scaled_sobel_s)
s_binary[(scaled_sobel_s >= s_thresh[0]) & (scaled_sobel_s <= s_thresh[1])] = 1
s_l_binary = np.zeros_like(s_binary)
s_l_binary[(s_binary == 1) | (l_binary == 1)] = 1
return s_l_binary
def find_pixels_mirror(binary_warped, params):
"""
Taken from materials.
Method performs inital lane detection by using the 'mirror' algorithm. Since
this method is likely resource intensive, it is only used in initial estimation
or as fallback when other methods fail.
"""
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = params['nwindows'] #9
# Set the width of the windows +/- margin
margin = params['margin'] # 100
# Set minimum number of pixels found to recenter window
minpix = params['minpix'] # 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window + 1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Identify the nonzero pixels in x and y within the window #
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty
def find_pixels_poly(binary_warped, left_fit, right_fit, params):
"""
Taken from materials and tailored for this pipeline.
Based on existing lane polynomials, we choose a region with given margin arround
the polynomial curves. This region is then used to select candidate points for next
estimation. This algorithm is likely faster then 'mirror' algorithm is used whenever
possible.
"""
# HYPERPARAMETER
# Choose the width of the margin around the previous polynomial to search
margin = params['margin'] # 100
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
### within the +/- margin of our polynomial function ###
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty
def plot_debug(binary_warped, left_x_nonzero, left_y_nonzero, right_x_nonzero, right_y_nonzero,
left_fit_poly, right_fit_poly, margin):
"""
Taken from materials.
Visualization of relevant debug information, to better estimate the quality of the
lane detection pipeline.
"""
## Visualization ##
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[left_y_nonzero, left_x_nonzero] = [255, 0, 0]
out_img[right_y_nonzero, right_x_nonzero] = [0, 0, 255]
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
try:
left_fitx = left_fit_poly[0]*ploty**2 + left_fit_poly[1]*ploty + left_fit_poly[2]
right_fitx = right_fit_poly[0]*ploty**2 + right_fit_poly[1]*ploty + right_fit_poly[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
draw_left = (np.asarray([left_fitx, ploty]).T).astype(np.int32)
draw_right = (np.asarray([right_fitx, ploty]).T).astype(np.int32)
cv2.polylines(result, [draw_left], False, (255,0,0), thickness=5)
cv2.polylines(result, [draw_right], False, (255,0,0), thickness=5)
return result
def plot_lanes(undist, Minv, left_fit_poly, right_fit_poly):
"""
Taken from materials.
Final visualization of the lane lines.
"""
# Generate x and y values for plotting
img_shape = undist.shape
ploty = np.linspace(0, undist.shape[0]-1, undist.shape[0] )
try:
left_fitx = left_fit_poly[0]*ploty**2 + left_fit_poly[1]*ploty + left_fit_poly[2]
right_fitx = right_fit_poly[0]*ploty**2 + right_fit_poly[1]*ploty + right_fit_poly[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
# Create an image to draw the lines on
warp_zero = np.zeros(img_shape[:2]).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
draw_left = (np.asarray([left_fitx, ploty]).T).astype(np.int32)
draw_right = (np.asarray([right_fitx, ploty]).T).astype(np.int32)
cv2.polylines(color_warp, [draw_left], False, (255,0,0), thickness=5)
cv2.polylines(color_warp, [draw_right], False, (255,0,0), thickness=5)
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img_shape[1], img_shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(undist, 1, newwarp, 0.5, 0)
return result
def plot_poly(ploty, poly):
"""
Taken from the materials and modified.
Returns a set of plotx points calulated from the polynomial and input ploty data.
"""
fit_success = False
try:
plotx = poly[0]*ploty**2 + poly[1]*ploty + poly[2]
fit_success = True
except TypeError:
# Avoids an error if poly is still none or incorrect
print('The function failed to fit a line!')
plotx = 1*ploty**2 + 1*ploty
return plotx, fit_success
def fit_poly_to_points(x, y):
"""
Taken from the materials.
Based on the detected points, calculate polynomials of the lane curve.
"""
fit_success = True
try:
fit = np.polyfit(x, y, 2)
except np.RankWarning:
# In case if polyfit fails, return coefficients of x = 0 line
fit = [0, 0, 0]
fit_success = False
return fit, fit_success
def fit_poly_to_lanes(warped_binary):
"""
Procedure for detecting road lanes based on the binary pixel data, obtained by filtering and
warping each recorded frame.
"""
import globals
lane_params = globals.lane_params
# Fetch previously detected lanes
lanes = lane_params.detected_lanes
# Current lane
current_lane = globals.Lane_fits()
lanes_length = len(lanes)
if lanes_length == 0:
# Try new mirror detection sequence
leftx, lefty, rightx, righty = find_pixels_mirror(warped_binary, lane_params.find_pixels_mirror)
else:
# Use previous best fit to define fit area
average_lane = lane_params.best_fit
leftx, lefty, rightx, righty = find_pixels_poly(warped_binary, average_lane.left_fit,
average_lane.right_fit, lane_params.find_pixels_poly)
# Calculate polynomial from detected points
left_fit, left_fit_success = fit_poly_to_points(lefty, leftx)
right_fit, right_fit_success = fit_poly_to_points(righty, rightx)
fit_success = left_fit_success & right_fit_success
current_lane.left_fit = left_fit
current_lane.right_fit = right_fit
current_lane.fit_success = fit_success
if (not fit_success) and (lanes_length == 0):
logging.warning('Lane detection not successful.')
if current_lane.fit_success:
lanes.insert(0, current_lane)
# Best fit
best_fit = globals.find_lane_average(lanes)
lane_params.best_fit = best_fit
if len(lanes) > lane_params.lane_count:
lanes.pop()
return leftx, lefty, rightx, righty, best_fit
def radius_measurements(left_fit, right_fit, lane_params):
'''
Taken from the materials and adapted for the pipeline.
Calculates the radius of the curvature of the lanes in [m].
'''
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
# Lambda for calculating curvature radius in pixels
# Comvert from lines measured in pixels to lines measured in meters
xm_per_pix = lane_params.xm_per_pix
ym_per_pix = lane_params.ym_per_pix
# The bottom left point of the region, at the same time the lowest point of the curves
y_pix = lane_params.lane_region[0][1]
y_m = y_pix * ym_per_pix
left_fit_m = left_fit * [xm_per_pix/ym_per_pix**2, xm_per_pix/ym_per_pix, xm_per_pix]
right_fit_m = right_fit * [xm_per_pix/ym_per_pix**2, xm_per_pix/ym_per_pix, xm_per_pix]
curv = lambda a, b, y : (1 + (2*a*y + b)**2)**(1.5) / np.abs(2*a)
left_curverad = curv(left_fit_m[0], left_fit_m[1], y_m)
right_curverad = curv(right_fit_m[0], right_fit_m[1], y_m)
return left_curverad, right_curverad
def position_measurement(left_fit, right_fit, lane_params):
'''
Taken from the materials and adapted for the pipeline.
Calculates the vehicle offset from the middle of the lane in [m].
'''
# Comvert from lines measured in pixels to lines measured in meters
xm_per_pix = lane_params.xm_per_pix
ym_per_pix = lane_params.ym_per_pix
# The bottom left point of the region, at the same time the lowest point of the curves
y_pix = lane_params.lane_region[0][1]
y_m = y_pix * ym_per_pix
left_fit_m = left_fit * lane_params.transform_poly_2_m
right_fit_m = right_fit * lane_params.transform_poly_2_m
# Calculate position from middle of the lane
left_curve_pos = left_fit_m[0]*y_m**2 + left_fit_m[1]*y_m + left_fit_m[2]
right_curve_pos = right_fit_m[0]*y_m**2 + right_fit_m[1]*y_m + right_fit_m[2]
lane_middle_pos = (left_curve_pos + right_curve_pos) / 2
image_middle_pos = lane_params.img_shape[1] * xm_per_pix / 2
# Since x values grow to the right, positive values here mean vehicle is shifted to
# the right of the lane middle
vehicle_pos = image_middle_pos - lane_middle_pos
return vehicle_pos
| 39.717703
| 109
| 0.688833
|
dda6f9fba1cfbc1c43aae7a05557c55bc96b9f19
| 1,218
|
py
|
Python
|
building_deep_learning_apps/Exercise Files/06/model_logging final.py
|
gilson27/linkedin_learning
|
ebed1866eeeef598ca678af7ef11d2ad5ef0850d
|
[
"MIT"
] | null | null | null |
building_deep_learning_apps/Exercise Files/06/model_logging final.py
|
gilson27/linkedin_learning
|
ebed1866eeeef598ca678af7ef11d2ad5ef0850d
|
[
"MIT"
] | 2
|
2020-07-16T22:04:26.000Z
|
2021-05-08T12:41:13.000Z
|
building_deep_learning_apps/Exercise Files/06/model_logging final.py
|
gilson27/linkedin_learning
|
ebed1866eeeef598ca678af7ef11d2ad5ef0850d
|
[
"MIT"
] | null | null | null |
import pandas as pd
import keras
from keras.models import Sequential
from keras.layers import *
training_data_df = pd.read_csv("sales_data_training_scaled.csv")
X = training_data_df.drop('total_earnings', axis=1).values
Y = training_data_df[['total_earnings']].values
# Define the model
model = Sequential()
model.add(Dense(50, input_dim=9, activation='relu', name='layer_1'))
model.add(Dense(100, activation='relu', name='layer_2'))
model.add(Dense(50, activation='relu', name='layer_3'))
model.add(Dense(1, activation='linear', name='output_layer'))
model.compile(loss='mean_squared_error', optimizer='adam')
# Create a TensorBoard logger
logger = keras.callbacks.TensorBoard(
log_dir='logs',
write_graph=True,
histogram_freq=5
)
# Train the model
model.fit(
X,
Y,
epochs=50,
shuffle=True,
verbose=2,
callbacks=[logger]
)
# Load the separate test data set
test_data_df = pd.read_csv("sales_data_test_scaled.csv")
X_test = test_data_df.drop('total_earnings', axis=1).values
Y_test = test_data_df[['total_earnings']].values
test_error_rate = model.evaluate(X_test, Y_test, verbose=0)
print("The mean squared error (MSE) for the test data set is: {}".format(test_error_rate))
| 28.325581
| 90
| 0.7422
|
e484759d1f00fe05116132c6bf12918f5f2c8829
| 147
|
py
|
Python
|
exercises/Interactive Widgets/soln/data_explorer.py
|
kaishuocheng/jupyter
|
96ae75723eb62d30cb02768295422898aace79ef
|
[
"BSD-3-Clause"
] | 748
|
2015-01-05T05:48:49.000Z
|
2022-02-27T01:05:42.000Z
|
exercises/Interactive Widgets/soln/data_explorer.py
|
kaishuocheng/jupyter
|
96ae75723eb62d30cb02768295422898aace79ef
|
[
"BSD-3-Clause"
] | 32
|
2015-04-02T22:25:41.000Z
|
2022-01-18T05:31:46.000Z
|
exercises/Interactive Widgets/soln/data_explorer.py
|
kaishuocheng/jupyter
|
96ae75723eb62d30cb02768295422898aace79ef
|
[
"BSD-3-Clause"
] | 816
|
2015-01-04T04:19:15.000Z
|
2022-03-17T20:57:19.000Z
|
def plot_iris(a=None, col1=0, col2=0):
plt.scatter(a[:,col1], a[:,col2])
interact(plot_iris, a=fixed(iris_data.data), col1=(0,3), col2=(0,3));
| 36.75
| 69
| 0.646259
|
9db4ba777ee9fbe75b0add6795bbbccc2de8d1fe
| 3,266
|
py
|
Python
|
tests/unit/states/test_git.py
|
Bacon-Unlimited/salt
|
9b1b791d212a6810c430dd15c63fbce3a4f7e1d6
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
tests/unit/states/test_git.py
|
Bacon-Unlimited/salt
|
9b1b791d212a6810c430dd15c63fbce3a4f7e1d6
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
tests/unit/states/test_git.py
|
Bacon-Unlimited/salt
|
9b1b791d212a6810c430dd15c63fbce3a4f7e1d6
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
"""
:codeauthor: Erik Johnson <erik@saltstack.com>
"""
import logging
import os
import salt.states.git as git_state # Don't potentially shadow GitPython
from tests.support.helpers import with_tempdir
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import DEFAULT, MagicMock, Mock, patch
from tests.support.unit import TestCase
log = logging.getLogger(__name__)
class GitTestCase(TestCase, LoaderModuleMockMixin):
"""
Test cases for salt.states.git
"""
def setup_loader_modules(self):
return {
git_state: {"__env__": "base", "__opts__": {"test": False}, "__salt__": {}}
}
@with_tempdir()
def test_latest_no_diff_for_bare_repo(self, target):
"""
This test ensures that we don't attempt to diff when cloning a repo
using either bare=True or mirror=True.
"""
name = "https://foo.com/bar/baz.git"
gitdir = os.path.join(target, "refs")
isdir_mock = MagicMock(
side_effect=lambda path: DEFAULT if path != gitdir else True
)
branches = ["foo", "bar", "baz"]
tags = ["v1.1.0", "v.1.1.1", "v1.2.0"]
local_head = "b9ef06ab6b7524eb7c27d740dbbd5109c6d75ee4"
remote_head = "eef672c1ec9b8e613905dbcd22a4612e31162807"
git_diff = Mock()
dunder_salt = {
"git.current_branch": MagicMock(return_value=branches[0]),
"git.config_get_regexp": MagicMock(return_value={}),
"git.diff": git_diff,
"git.fetch": MagicMock(return_value={}),
"git.is_worktree": MagicMock(return_value=False),
"git.list_branches": MagicMock(return_value=branches),
"git.list_tags": MagicMock(return_value=tags),
"git.remote_refs": MagicMock(return_value={"HEAD": remote_head}),
"git.remotes": MagicMock(
return_value={"origin": {"fetch": name, "push": name}}
),
"git.rev_parse": MagicMock(side_effect=git_state.CommandExecutionError()),
"git.revision": MagicMock(return_value=local_head),
"git.version": MagicMock(return_value="1.8.3.1"),
}
with patch("os.path.isdir", isdir_mock), patch.dict(
git_state.__salt__, dunder_salt
):
result = git_state.latest(
name=name,
target=target,
mirror=True, # mirror=True implies bare=True
)
assert result["result"] is True, result
git_diff.assert_not_called()
def test_latest_without_target(self):
"""
Test latest when called without passing target
"""
name = "https://foo.com/bar/baz.git"
self.assertRaises(TypeError, git_state.latest, name)
def test_detached_without_target(self):
"""
Test detached when called without passing target
"""
name = "https://foo.com/bar/baz.git"
self.assertRaises(TypeError, git_state.detached, name)
def test_cloned_without_target(self):
"""
Test cloned when called without passing target
"""
name = "https://foo.com/bar/baz.git"
self.assertRaises(TypeError, git_state.cloned, name)
| 35.11828
| 87
| 0.615126
|
2135cc8ab7c089debd1eba0fe31b58fbf5861a80
| 1,868
|
py
|
Python
|
var/spack/repos/builtin/packages/py-fava/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11
|
2015-10-04T02:17:46.000Z
|
2018-02-07T18:23:00.000Z
|
var/spack/repos/builtin/packages/py-fava/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 22
|
2017-08-01T22:45:10.000Z
|
2022-03-10T07:46:31.000Z
|
var/spack/repos/builtin/packages/py-fava/package.py
|
player1537-forks/spack
|
822b7632222ec5a91dc7b7cda5fc0e08715bd47c
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 4
|
2016-06-10T17:57:39.000Z
|
2018-09-11T04:59:38.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyFava(PythonPackage):
"""Fava is a web interface for the double-entry bookkeeping software
Beancount with a focus on features and usability."""
homepage = "https://beancount.github.io/fava/"
pypi = "fava/fava-1.18.tar.gz"
version('1.18', sha256='21336b695708497e6f00cab77135b174c51feb2713b657e0e208282960885bf5')
# For some reason Fava adds a whole bunch of executables to
# its bin directory, and this causes clashes when loading
# the module.
extends('python', ignore='bin/^(?!fava).*')
# Some of the dependencies are not listed as required at
# build or run time, but actually are.
# - py-setuptools
# - py-importlib
# - py-pytest
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-setuptools-scm', type=('build'))
depends_on('py-babel@2.6.0:', type=('build', 'run'))
depends_on('py-beancount@2.3.0:', type=('build', 'run'))
depends_on('py-cheroot', type=('build', 'run'))
depends_on('py-click', type=('build', 'run'))
depends_on('py-flask@0.10.1:', type=('build', 'run'))
depends_on('py-flask-babel@1.0.0:', type=('build', 'run'))
depends_on('py-jinja2@2.10:', type=('build', 'run'))
depends_on('py-markdown2@2.3.0:', type=('build', 'run'))
depends_on('py-ply', type=('build', 'run'))
depends_on('py-pytest', type=('build', 'run'))
depends_on('py-simplejson@3.2.0:', type=('build', 'run'))
depends_on('py-werkzeug@0.15.0:', type=('build', 'run'))
| 42.454545
| 95
| 0.615632
|
c6380470d975e516f9d2c3603bb1076d2fd39036
| 4,632
|
py
|
Python
|
mobula/Net.py
|
wkcn/mobula
|
4eec938d6477776f5f2d68bcf41de83fb8da5195
|
[
"MIT"
] | 47
|
2017-07-15T02:13:18.000Z
|
2022-01-01T09:37:59.000Z
|
mobula/Net.py
|
wkcn/mobula
|
4eec938d6477776f5f2d68bcf41de83fb8da5195
|
[
"MIT"
] | 3
|
2018-06-22T13:55:12.000Z
|
2020-01-29T01:41:13.000Z
|
mobula/Net.py
|
wkcn/mobula
|
4eec938d6477776f5f2d68bcf41de83fb8da5195
|
[
"MIT"
] | 8
|
2017-09-03T12:42:54.000Z
|
2020-09-27T03:38:59.000Z
|
#coding=utf-8
from .Defines import *
from .layers.utils.MultiInput import *
from .layers.utils.MultiOutput import *
from .layers.utils.Saver import *
from . import solvers
from . import wrapper
import functools
import signal
import weakref
try:
import queue
except:
import Queue as queue
class Net(object):
def __init__(self):
self.topo = []
self.layers = dict()
self.set_solver(solvers.SGD())
self.phase = TRAIN
# signal.signal(signal.SIGINT, self.signal_handler)
def set_loss(self, lossLayers):
if type(lossLayers) != list:
lossLayers = [lossLayers]
# Count
q = queue.Queue()
for l in lossLayers:
q.put(l)
vis = set()
cs = dict() # in degree
while not q.empty():
l = q.get()
if l in vis:
continue
vis.add(l)
# if layer l has input
# Data.model is None
# l.model may be Layer or MultiInput
if l.model is not None:
for md in l.model.input_models():
cs[md] = cs.get(md, 0) + 1
q.put(md)
# Find
q = queue.Queue()
for l in lossLayers:
q.put(l)
st = []
while not q.empty():
l = q.get()
st.append(l)
if l.model is not None:
for md in l.model.input_models():
cs[md] -= 1
if cs[md] == 0:
q.put(md)
self.topo = st[::-1]
self.layers = dict()
for l in self.topo:
self.layers[l.name] = l
l.forward_time = 0.0
l.backward_time = 0.0
self.forward_times = 0
self.backward_times = 0
# Avoid bidirection reference
l.net = weakref.proxy(self)
self.reshape()
for l in lossLayers:
l.dY = np.ones(l.Y.shape)
self.init_solver()
def set_solver(self, solver):
self.solver = solver
self.init_solver()
def reshape(self):
for l in self.topo:
l.reshape()
def init_solver(self):
if self.solver is not None:
for l in self.topo:
self.solver.init(l)
def forward(self):
self.forward_times += 1
for l in self.topo:
t = time.time()
l.forward()
l.forward_time += time.time() - t
def backward(self):
self.backward_times += 1
self.solver.update_lr(self.backward_times)
for l in self.topo[::-1]:
t = time.time()
num_next_layers = len(l.next_layers)
if num_next_layers > 0:
if num_next_layers == 1:
l.dY = l.next_layers[0].dX
else:
l.dY = np.zeros(l.Y.shape)
for e in l.next_layers:
l.dY += e.dX
l.dY = l.dY.reshape(l.Y.shape)
# compute the gradient dX of layer l
l.backward()
# use the solver to update weights of layer l
if l.lr > 0:
self.solver.update(l)
l.backward_time += time.time() - t
def time(self):
if self.forward_times == 0 or self.backward_times == 0:
return
print ("name\t|forward_time\t|backward_time\t|forward_mean\t|backward_mean\t|forward_times: %d, backward_times: %d" % (self.forward_times, self.backward_times))
for l in self.topo:
print ("%s\t|%f\t|%f\t|%f\t|%f" % (l.name, l.forward_time, l.backward_time, l.forward_time / self.forward_times, l.backward_time / self.backward_times))
def save(self, filename):
# Save the learning parameters of network by name
print ("Saving the parameters of the network to %s:" % filename)
save_layers(filename, self.topo, info = True)
print ("Saving Finished :-)")
def load(self, filename):
# Load the learning parameters of network by name
print ("Loading the parameters of the network from %s:" % filename)
load_layers(filename, self.topo, info = True)
print ("Loading Finished :-)")
def __getitem__(self, name):
return wrapper.get_layer(name)
@property
def lr(self):
return self.solver.lr
@lr.setter
def lr(self, value):
self.solver.base_lr = value
def signal_handler(self, signal, frame):
# TODO: Exit to Save
print ("Exit")
pass
# For compatibility
Net.setLoss = Net.set_loss
| 30.88
| 168
| 0.530009
|
647716fe60d32877837b9b44ced048b662a0498e
| 1,635
|
py
|
Python
|
src/cli.py
|
jconradhanson/BEAT
|
47a828c486e674323782c11b78be63aae003c45d
|
[
"MIT"
] | null | null | null |
src/cli.py
|
jconradhanson/BEAT
|
47a828c486e674323782c11b78be63aae003c45d
|
[
"MIT"
] | null | null | null |
src/cli.py
|
jconradhanson/BEAT
|
47a828c486e674323782c11b78be63aae003c45d
|
[
"MIT"
] | null | null | null |
import logging
import argparse
from beat import beat
from definitions import path_log
# LOGGING CONFIGURATION
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p'
)
logging.root.addHandler(logging.FileHandler(path_log, mode='w', encoding='UTF-8'))
logging.getLogger("easyprocess").setLevel(logging.WARNING)
# COMMAND LINE ARGUMENT PARSER
parser = argparse.ArgumentParser()
# POSITIONAL ARGS
parser.add_argument('subject', type=str,
help='the subject you want to search')
parser.add_argument('state_code', type=str,
help='the two letter state abbreviation for where you want to search the subject')
# OPTIONAL ARGS
parser.add_argument('-c', '--city', type=str,
help='the city you want to begin the search at (cities are searched alphabetically)')
args = parser.parse_args()
subject = args.subject.strip()
state_code = args.state_code.strip().upper()
# VALIDATE ARG VALUES & RUN BEAT
if len(state_code) != 2:
print(f"\"{state_code}\"")
logging.error('State Code is invalid. Must be two letters.')
elif not isinstance(state_code, str):
logging.error('State Code is invalid. Must be a string.')
elif not isinstance(subject, str):
logging.error('Subject is invalid. Must be a string.')
else:
if args.city:
city = args.city.strip()
if not isinstance(city, str):
logging.error('City is invalid. Must be a string.')
else:
beat(subject=subject, state_code=state_code, start_city=city)
else:
beat(subject=subject, state_code=state_code)
| 35.543478
| 105
| 0.687462
|
653713db78428bc85ee72e51d56aa494f9404d44
| 2,563
|
py
|
Python
|
experiments/layer-outputs/rnn-mnist.py
|
sabuj7177/TensorFI2
|
6272a3951793255815506f143748bdd9345c1d2f
|
[
"MIT"
] | 1
|
2021-05-22T19:34:57.000Z
|
2021-05-22T19:34:57.000Z
|
experiments/layer-outputs/rnn-mnist.py
|
sabuj7177/TensorFI2
|
6272a3951793255815506f143748bdd9345c1d2f
|
[
"MIT"
] | 7
|
2021-05-17T01:50:21.000Z
|
2022-02-10T13:45:03.000Z
|
experiments/layer-outputs/rnn-mnist.py
|
sabuj7177/TensorFI2
|
6272a3951793255815506f143748bdd9345c1d2f
|
[
"MIT"
] | 1
|
2022-03-01T19:29:28.000Z
|
2022-03-01T19:29:28.000Z
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import numpy as np
import time, sys, math, random
from src import tensorfi2 as tfi
(train_images, train_labels), (test_images, test_labels) = datasets.mnist.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0
# Input sequences to RNN are the sequence of rows of MNIST digits (treating each row of pixels as a timestep), and predict the digit's label.
model = models.Sequential()
model.add(layers.RNN(layers.LSTMCell(64), input_shape=(None, 28)))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10))
model.compile(optimizer='sgd',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
'''
# Change to True if you want to train from scratch
train = False
if(train):
# Save the untrained weights for future training with modified dataset
model.save_weights('h5/rnn-untrained.h5')
model.fit(train_images, train_labels, batch_size=100, epochs=10,
validation_data=(test_images, test_labels))
model.save_weights('h5/rnn-trained.h5')
else:
model.load_weights('h5/rnn-trained.h5')
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print("Accuracy before faults:", test_acc)
tfi.inject(model=model, confFile="confFiles/sample.yaml")
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print("Accuracy after faults:", test_acc)
'''
conf = sys.argv[1]
filePath = sys.argv[2]
filePath = os.path.join(filePath, "res.csv")
f = open(filePath, "w")
numFaults = int(sys.argv[3])
numInjections = int(sys.argv[4])
offset = 10
num = test_images.shape[0]
totsdc = 0.0
model.load_weights('h5/rnn-trained.h5')
ind = []
init = random.sample(range(num), numInjections+offset)
for i in init:
test_loss, test_acc = model.evaluate(test_images[i:i+1], test_labels[i:i+1], verbose=0)
if(test_acc == 1.):
ind.append(i)
ind = ind[:numInjections]
start = time.time()
for i in range(numFaults):
model.load_weights('h5/rnn-trained.h5')
sdc = 0.
for i in ind:
res = tfi.inject(model=model, x_test=test_images[i:i+1], confFile=conf)
if(res != test_labels[i:i+1]):
sdc = sdc + 1.
f.write(str(sdc/numInjections))
f.write("\n")
totsdc = totsdc + sdc
f.write("\n")
f.write(str(totsdc/(numFaults*numInjections)))
f.write("\n")
f.write("Time for %d injections: %f seconds" % (numFaults*numInjections, time.time() - start))
f.close()
| 29.125
| 141
| 0.715568
|
d3e8418d06138c6c7d035a29acbb7346c90cffce
| 414
|
py
|
Python
|
aliexpress/api/rest/CainiaoGlobalSolutionInquiry.py
|
bayborodin/aliexpress-sdk
|
89935adf46412d8d054fa80a19153971279c4106
|
[
"MIT"
] | 3
|
2021-03-10T16:46:43.000Z
|
2022-03-29T15:28:50.000Z
|
aliexpress/api/rest/CainiaoGlobalSolutionInquiry.py
|
bayborodin/aliexpress-sdk
|
89935adf46412d8d054fa80a19153971279c4106
|
[
"MIT"
] | null | null | null |
aliexpress/api/rest/CainiaoGlobalSolutionInquiry.py
|
bayborodin/aliexpress-sdk
|
89935adf46412d8d054fa80a19153971279c4106
|
[
"MIT"
] | 2
|
2021-10-30T17:09:34.000Z
|
2021-11-25T11:50:52.000Z
|
from aliexpress.api.base import RestApi
class CainiaoGlobalSolutionInquiryRequest(RestApi):
def __init__(self, domain="gw.api.taobao.com", port=80):
RestApi.__init__(self, domain, port)
self.locale = None
self.package_params = None
self.seller_info_param = None
self.trade_order_param = None
def getapiname(self):
return "cainiao.global.solution.inquiry"
| 29.571429
| 60
| 0.695652
|
27f59ff1173252c9e4d069bb2411b98acd62252f
| 1,922
|
py
|
Python
|
aliyun-python-sdk-foas/aliyunsdkfoas/request/v20181111/GetInstanceExceptionsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 1,001
|
2015-07-24T01:32:41.000Z
|
2022-03-25T01:28:18.000Z
|
aliyun-python-sdk-foas/aliyunsdkfoas/request/v20181111/GetInstanceExceptionsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 363
|
2015-10-20T03:15:00.000Z
|
2022-03-08T12:26:19.000Z
|
aliyun-python-sdk-foas/aliyunsdkfoas/request/v20181111/GetInstanceExceptionsRequest.py
|
yndu13/aliyun-openapi-python-sdk
|
12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5
|
[
"Apache-2.0"
] | 682
|
2015-09-22T07:19:02.000Z
|
2022-03-22T09:51:46.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkfoas.endpoint import endpoint_data
class GetInstanceExceptionsRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'foas', '2018-11-11', 'GetInstanceExceptions','foas')
self.set_protocol_type('https')
self.set_uri_pattern('/api/v2/projects/[projectName]/jobs/[jobName]/instances/[instanceId]/exceptions')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_projectName(self):
return self.get_path_params().get('projectName')
def set_projectName(self,projectName):
self.add_path_param('projectName',projectName)
def get_instanceId(self):
return self.get_path_params().get('instanceId')
def set_instanceId(self,instanceId):
self.add_path_param('instanceId',instanceId)
def get_jobName(self):
return self.get_path_params().get('jobName')
def set_jobName(self,jobName):
self.add_path_param('jobName',jobName)
| 36.961538
| 106
| 0.765349
|
0e4e07080b9bdbc59afa51fc2f8121dce22f3ea4
| 8,881
|
py
|
Python
|
htdfsdk/web3/_utils/rpc_abi.py
|
youngqqcn/htdfsdk
|
c22f213a967c8233bb6ccfb01bf148112efd44db
|
[
"MIT"
] | 2
|
2021-01-21T01:46:29.000Z
|
2021-03-12T05:59:19.000Z
|
htdfsdk/web3/_utils/rpc_abi.py
|
youngqqcn/htdfsdk
|
c22f213a967c8233bb6ccfb01bf148112efd44db
|
[
"MIT"
] | null | null | null |
htdfsdk/web3/_utils/rpc_abi.py
|
youngqqcn/htdfsdk
|
c22f213a967c8233bb6ccfb01bf148112efd44db
|
[
"MIT"
] | null | null | null |
from typing import (
Any,
Callable,
Dict,
Iterable,
Sequence,
Tuple,
)
from eth_typing import (
TypeStr,
)
from eth_utils import (
to_dict,
)
from eth_utils.curried import (
apply_formatter_at_index,
)
from eth_utils.toolz import (
curry,
)
from htdfsdk.web3._utils.abi import (
map_abi_data,
)
from htdfsdk.web3.types import (
RPCEndpoint,
)
class RPC:
# admin
admin_addPeer = RPCEndpoint("admin_addPeer")
admin_datadir = RPCEndpoint("admin_datadir")
admin_nodeInfo = RPCEndpoint("admin_nodeInfo")
admin_peers = RPCEndpoint("admin_peers")
admin_startRPC = RPCEndpoint("admin_startRPC")
admin_startWS = RPCEndpoint("admin_startWS")
admin_stopRPC = RPCEndpoint("admin_stopRPC")
admin_stopWS = RPCEndpoint("admin_stopWS")
# eth
eth_accounts = RPCEndpoint("eth_accounts")
eth_blockNumber = RPCEndpoint("eth_blockNumber")
eth_call = RPCEndpoint("eth_call")
eth_chainId = RPCEndpoint("eth_chainId")
eth_coinbase = RPCEndpoint("eth_coinbase")
eth_estimateGas = RPCEndpoint("eth_estimateGas")
eth_gasPrice = RPCEndpoint("eth_gasPrice")
eth_getBalance = RPCEndpoint("eth_getBalance")
eth_getBlockByHash = RPCEndpoint("eth_getBlockByHash")
eth_getBlockByNumber = RPCEndpoint("eth_getBlockByNumber")
eth_getBlockTransactionCountByHash = RPCEndpoint("eth_getBlockTransactionCountByHash")
eth_getBlockTransactionCountByNumber = RPCEndpoint("eth_getBlockTransactionCountByNumber")
eth_getCode = RPCEndpoint("eth_getCode")
eth_getFilterChanges = RPCEndpoint("eth_getFilterChanges")
eth_getFilterLogs = RPCEndpoint("eth_getFilterLogs")
eth_getLogs = RPCEndpoint("eth_getLogs")
eth_getProof = RPCEndpoint("eth_getProof")
eth_getStorageAt = RPCEndpoint("eth_getStorageAt")
eth_getTransactionByBlockHashAndIndex = RPCEndpoint("eth_getTransactionByBlockHashAndIndex")
eth_getTransactionByBlockNumberAndIndex = RPCEndpoint("eth_getTransactionByBlockNumberAndIndex")
eth_getTransactionByHash = RPCEndpoint("eth_getTransactionByHash")
eth_getTransactionCount = RPCEndpoint("eth_getTransactionCount")
eth_getTransactionReceipt = RPCEndpoint("eth_getTransactionReceipt")
eth_getUncleByBlockHashAndIndex = RPCEndpoint("eth_getUncleByBlockHashAndIndex")
eth_getUncleByBlockNumberAndIndex = RPCEndpoint("eth_getUncleByBlockNumberAndIndex")
eth_getUncleCountByBlockHash = RPCEndpoint("eth_getUncleCountByBlockHash")
eth_getUncleCountByBlockNumber = RPCEndpoint("eth_getUncleCountByBlockNumber")
eth_getWork = RPCEndpoint("eth_getWork")
eth_hashrate = RPCEndpoint("eth_hashrate")
eth_mining = RPCEndpoint("eth_mining")
eth_newBlockFilter = RPCEndpoint("eth_newBlockFilter")
eth_newFilter = RPCEndpoint("eth_newFilter")
eth_newPendingTransactionFilter = RPCEndpoint("eth_newPendingTransactionFilter")
eth_protocolVersion = RPCEndpoint("eth_protocolVersion")
eth_sendRawTransaction = RPCEndpoint("eth_sendRawTransaction")
eth_sendTransaction = RPCEndpoint("eth_sendTransaction")
eth_sign = RPCEndpoint("eth_sign")
eth_signTransaction = RPCEndpoint("eth_signTransaction")
eth_signTypedData = RPCEndpoint("eth_signTypedData")
eth_submitHashrate = RPCEndpoint("eth_submitHashrate")
eth_submitWork = RPCEndpoint("eth_submitWork")
eth_syncing = RPCEndpoint("eth_syncing")
eth_uninstallFilter = RPCEndpoint("eth_uninstallFilter")
# evm
evm_mine = RPCEndpoint("evm_mine")
evm_reset = RPCEndpoint("evm_reset")
evm_revert = RPCEndpoint("evm_revert")
evm_snapshot = RPCEndpoint("evm_snapshot")
# miner
miner_makeDag = RPCEndpoint("miner_makeDag")
miner_setExtra = RPCEndpoint("miner_setExtra")
miner_setEtherbase = RPCEndpoint("miner_setEtherbase")
miner_setGasPrice = RPCEndpoint("miner_setGasPrice")
miner_start = RPCEndpoint("miner_start")
miner_stop = RPCEndpoint("miner_stop")
miner_startAutoDag = RPCEndpoint("miner_startAutoDag")
miner_stopAutoDag = RPCEndpoint("miner_stopAutoDag")
# net
net_listening = RPCEndpoint("net_listening")
net_peerCount = RPCEndpoint("net_peerCount")
net_version = RPCEndpoint("net_version")
# parity
parity_addReservedPeer = RPCEndpoint("parity_addReservedPeer")
parity_enode = RPCEndpoint("parity_enode")
parity_listStorageKeys = RPCEndpoint("parity_listStorageKeys")
parity_netPeers = RPCEndpoint("parity_netPeers")
parity_mode = RPCEndpoint("parity_mode")
parity_setMode = RPCEndpoint("parity_setMode")
# personal
personal_ecRecover = RPCEndpoint("personal_ecRecover")
personal_importRawKey = RPCEndpoint("personal_importRawKey")
personal_listAccounts = RPCEndpoint("personal_listAccounts")
personal_listWallets = RPCEndpoint("personal_listWallets")
personal_lockAccount = RPCEndpoint("personal_lockAccount")
personal_newAccount = RPCEndpoint("personal_newAccount")
personal_sendTransaction = RPCEndpoint("personal_sendTransaction")
personal_sign = RPCEndpoint("personal_sign")
personal_signTypedData = RPCEndpoint("personal_signTypedData")
personal_unlockAccount = RPCEndpoint("personal_unlockAccount")
# testing
testing_timeTravel = RPCEndpoint("testing_timeTravel")
# trace
trace_block = RPCEndpoint("trace_block")
trace_call = RPCEndpoint("trace_call")
trace_filter = RPCEndpoint("trace_filter")
trace_rawTransaction = RPCEndpoint("trace_rawTransaction")
trace_replayBlockTransactions = RPCEndpoint("trace_replayBlockTransactions")
trace_replayTransaction = RPCEndpoint("trace_replayTransaction")
trace_transaction = RPCEndpoint("trace_transaction")
# txpool
txpool_content = RPCEndpoint("txpool_content")
txpool_inspect = RPCEndpoint("txpool_inspect")
txpool_status = RPCEndpoint("txpool_status")
# web3
web3_clientVersion = RPCEndpoint("web3_clientVersion")
TRANSACTION_PARAMS_ABIS = {
'data': 'bytes',
'from': 'address',
'gas': 'uint',
'gasPrice': 'uint',
'nonce': 'uint',
'to': 'address',
'value': 'uint',
}
FILTER_PARAMS_ABIS = {
'to': 'address',
'address': 'address[]',
}
TRACE_PARAMS_ABIS = {
'to': 'address',
'from': 'address',
}
RPC_ABIS = {
# eth
'eth_call': TRANSACTION_PARAMS_ABIS,
'eth_estimateGas': TRANSACTION_PARAMS_ABIS,
'eth_getBalance': ['address', None],
'eth_getBlockByHash': ['bytes32', 'bool'],
'eth_getBlockTransactionCountByHash': ['bytes32'],
'eth_getCode': ['address', None],
'eth_getLogs': FILTER_PARAMS_ABIS,
'eth_getStorageAt': ['address', 'uint', None],
'eth_getProof': ['address', 'uint[]', None],
'eth_getTransactionByBlockHashAndIndex': ['bytes32', 'uint'],
'eth_getTransactionByHash': ['bytes32'],
'eth_getTransactionCount': ['address', None],
'eth_getTransactionReceipt': ['bytes32'],
'eth_getUncleCountByBlockHash': ['bytes32'],
'eth_newFilter': FILTER_PARAMS_ABIS,
'eth_sendRawTransaction': ['bytes'],
'eth_sendTransaction': TRANSACTION_PARAMS_ABIS,
'eth_signTransaction': TRANSACTION_PARAMS_ABIS,
'eth_sign': ['address', 'bytes'],
'eth_signTypedData': ['address', None],
'eth_submitHashrate': ['uint', 'bytes32'],
'eth_submitWork': ['bytes8', 'bytes32', 'bytes32'],
# personal
'personal_sendTransaction': TRANSACTION_PARAMS_ABIS,
'personal_lockAccount': ['address'],
'personal_unlockAccount': ['address', None, None],
'personal_sign': [None, 'address', None],
'personal_signTypedData': [None, 'address', None],
'trace_call': TRACE_PARAMS_ABIS,
# parity
'parity_listStorageKeys': ['address', None, None, None],
}
@curry
def apply_abi_formatters_to_dict(
normalizers: Sequence[Callable[[TypeStr, Any], Tuple[TypeStr, Any]]],
abi_dict: Dict[str, Any],
data: Dict[Any, Any]
) -> Dict[Any, Any]:
fields = list(set(abi_dict.keys()) & set(data.keys()))
formatted_values = map_abi_data(
normalizers,
[abi_dict[field] for field in fields],
[data[field] for field in fields],
)
formatted_dict = dict(zip(fields, formatted_values))
return dict(data, **formatted_dict)
@to_dict
def abi_request_formatters(
normalizers: Sequence[Callable[[TypeStr, Any], Tuple[TypeStr, Any]]],
abis: Dict[RPCEndpoint, Any],
) -> Iterable[Tuple[RPCEndpoint, Callable[..., Any]]]:
for method, abi_types in abis.items():
if isinstance(abi_types, list):
yield method, map_abi_data(normalizers, abi_types)
elif isinstance(abi_types, dict):
single_dict_formatter = apply_abi_formatters_to_dict(normalizers, abi_types)
yield method, apply_formatter_at_index(single_dict_formatter, 0)
else:
raise TypeError("ABI definitions must be a list or dictionary, got %r" % abi_types)
| 37.952991
| 100
| 0.734715
|
7ff2b96920d01e8487d316105914b12ef91e2121
| 67
|
py
|
Python
|
src/drugex/api/designer/__init__.py
|
martin-sicho/DrugEx
|
c303cb675e2d4a99b12f1dfa83cd7e5c5e3550df
|
[
"MIT"
] | 5
|
2020-05-29T13:19:04.000Z
|
2022-03-09T12:54:58.000Z
|
src/drugex/api/designer/__init__.py
|
martin-sicho/DrugEx
|
c303cb675e2d4a99b12f1dfa83cd7e5c5e3550df
|
[
"MIT"
] | null | null | null |
src/drugex/api/designer/__init__.py
|
martin-sicho/DrugEx
|
c303cb675e2d4a99b12f1dfa83cd7e5c5e3550df
|
[
"MIT"
] | 1
|
2020-05-20T13:15:42.000Z
|
2020-05-20T13:15:42.000Z
|
"""
__init__.py
Created by: Martin Sicho
On: 25-11-19, 15:16
"""
| 8.375
| 24
| 0.626866
|
0ee9f06b94ddb47e72b3105cdacfc4906a2352f1
| 6,399
|
py
|
Python
|
basewatch_alert_service.py
|
sskirch/basewatch
|
2ccb3466a524966baf269659708121d5fd909c62
|
[
"Apache-2.0"
] | null | null | null |
basewatch_alert_service.py
|
sskirch/basewatch
|
2ccb3466a524966baf269659708121d5fd909c62
|
[
"Apache-2.0"
] | null | null | null |
basewatch_alert_service.py
|
sskirch/basewatch
|
2ccb3466a524966baf269659708121d5fd909c62
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import time
import math
import sys
from twilio.rest import Client
from ConfigParser import SafeConfigParser
import smtplib
from smtplib import SMTPException
from datetime import datetime, timedelta
import sensors
import triggers
conn = None
count = 0
msg_time = {}
SMS_from = None
SMS_to = None
Twillio_ACCOUNT_SID = None
Twillio_AUTH_TOKEN = None
email_from = None
email_to = None
smtp_url = None
def setup():
global SMS_from
global SMS_to
global Twillio_ACCOUNT_SID
global Twillio_AUTH_TOKEN
global email_from
global email_to
global smtp_url
config = SafeConfigParser()
config.read('config.ini')
SMS_from = config.get('sms', 'sms_from')
SMS_to = config.get('sms', 'sms_to')
Twillio_ACCOUNT_SID = config.get('sms', 'account_sid')
Twillio_AUTH_TOKEN = config.get('sms', 'auth_token')
email_from = config.get('email', 'email_from')
email_to = config.get('email', 'email_to')
smtp_url = config.get('email', 'url')
def check_msg(msg, force):
global count
global msg_time
if count < 600 and not force : #Don't do anything if for the first 10 minutes. So we can get a good baseline.
return True
elif msg_time.has_key(msg) == False: #If the key does not exist, then this is the first time we are sending the message. Create the key with the time and send
msg_time.update({msg:time.time()})
elif (time.time() - msg_time[msg]) < 3600 and not force: #Only allow messages to be sent once an hour.
return True
else:
msg_time[msg] = time.time()
return False
def smsalert(msg, data, force=False):
global count
global msg_time
global SMS_from
global SMS_to
global Twillio_ACCOUNT_SID
global Twillio_AUTH_TOKEN
if check_msg(msg,force): return
client = Client(Twillio_ACCOUNT_SID, Twillio_AUTH_TOKEN)
client.messages.create(
to=SMS_to,
from_=SMS_from,
body=msg + ' ' + data,
)
print 'SMS sent ' + msg
#todo: This has never been tested
def emailalert(msg, data, force=False):
global count
global msg_time
global email_from
global email_to
if check_msg(msg,force): return
message = """From: From Person <""" + email_from + """>
To: To Person <""" + email_to + """>
Subject: """ + msg + """
This is a test e-mail message.
"""
try:
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(email_from, email_to, message)
print "Successfully sent email: " + msg
except SMTPException:
print "Error: unable to send email"
sensor_gas = sensors.sensor_PCF8591('Gas',19,0,30,0)
sensor_water = sensors.sensor_water('Water', 17,None,None,0)
sensor_flame = sensors.sensor_PCF8591('Flame', 16,2,30,0)
sensor_smoke = sensors.sensor_PCF8591('Smoke', 20,1,30,0)
sensor_co = sensors.sensor_PCF8591('CO', 18,3,30,0)
sensor_temp = sensors.sensor_temp('Temp', None,None,10,0)
drain_filler_trigger = triggers.trigger('Drain Filler',27)
def loop():
water_count = 0
print "\n" + 'Cycle Started '
drain_fill_time = datetime.now() + timedelta(minutes=1)
while True:
global count
#will turn a solinoid on one a week for 60 secconds
if datetime.now() > drain_fill_time:
print "\n" + 'Solenoid on'
drain_filler_trigger.on()
time.sleep(60)
drain_filler_trigger.off()
print "\n" + 'Solenoid off'
drain_fill_time = datetime.now() + timedelta(weeks=1)
#print "\n" + 'count: ' + str(count)
if (count == 0 or (count % 86400 == 0)) : smsalert('Basewatcher Heartbeat', '', True) # Send heartbeat on startup and once a day
#once every 10 minutes
if count % (60 * 10) == 0 and count > 0 :
#print "Ten Minutes"
if sensor_flame.check_analog_alert():
print "\r" + sensor_flame.sensor_name + ' Alert!!!!' + "\r"
smsalert(sensor_flame.sensor_name + ' Alert!!!!', str(sensor_flame.get_analog_data()))
print "flame: " + str(sensor_flame.sensor_que)
if sensor_gas.check_analog_alert():
print "\r" + sensor_gas.sensor_name + ' Alert!!!!' + "\r"
smsalert(sensor_gas.sensor_name + ' Alert!!!!', str(sensor_gas.get_analog_data()))
print "gas: " + str(sensor_gas.sensor_que)
if sensor_co.check_analog_alert():
print "\r" + sensor_co.sensor_name + ' Alert!!!!' + "\r"
smsalert(sensor_co.sensor_name + ' Alert!!!!', str(sensor_co.get_analog_data()))
print "CO: " + str(sensor_co.sensor_que)
if sensor_smoke.check_analog_alert():
print "\r" + sensor_smoke.sensor_name + ' Alert!!!!' + "\r"
smsalert(sensor_smoke.sensor_name + ' Alert!!!!', str(sensor_smoke.get_analog_data()))
print "Smoke: " + str(sensor_smoke.sensor_que)
if sensor_temp.check_analog_alert():
print "\r" + sensor_temp.sensor_name + ' Alert!!!!' + "\r"
smsalert(sensor_temp.sensor_name + ' Alert!!!!', str(sensor_temp.get_analog_data()))
print "Temp: " + str(sensor_temp.sensor_que)
#once every hour
if count % (60 * 60) == 0 and count > 0 :
print "One Hour"
#sensor_gas.logger()
#sensor_flame.logger()
#sensor_co.logger()
#sensor_smoke.logger()
#sensor_water.logger()
#sensor_temp.logger()
#once every Minute
#if (count % 60 == 0 and count > 0) :
# print "One Minute"
#Once Every Second:
if sensor_flame.check_binary_alert() :
print "\r" + sensor_flame.sensor_name + 'Binary Alert!!!!' + "\r"
smsalert(sensor_flame.sensor_name + ' Alert!!!!', 'True')
if sensor_gas.check_binary_alert():
print "\r" + sensor_gas.sensor_name + 'Binary Alert!!!!' + "\r"
smsalert(sensor_gas.sensor_name + ' Alert!!!!', 'True')
if sensor_co.check_binary_alert():
print "\r" + sensor_co.sensor_name + 'Binary Alert!!!!' + "\r"
smsalert(sensor_co.sensor_name + ' Alert!!!!', 'True')
if sensor_smoke.check_binary_alert():
print "\r" + sensor_smoke.sensor_name + 'Binary Alert!!!!' + "\r"
smsalert(sensor_smoke.sensor_name + ' Alert!!!!', 'True')
if sensor_water.check_binary_alert():
print "\r" + sensor_water.sensor_name + 'Binary Alert!!!!' + "\r"
smsalert(sensor_water.sensor_name + ' Alert!!!!', 'True')
if sensor_temp.check_binary_alert():
print "\r" + sensor_temp.sensor_name + 'Binary Alert!!!!' + "\r"
smsalert(sensor_temp.sensor_name + ' Alert!!!!', 'True')
count += 1
time.sleep(1)
if __name__ == '__main__':
setup()
loop()
| 28.824324
| 169
| 0.66276
|
3f1536413b6b2d2d00a9aa06678a6306e7ac3bd4
| 2,386
|
py
|
Python
|
rllab/envs/base.py
|
prosello/rllab
|
8677356874d41eb9354785500b554eaf635ece2e
|
[
"MIT"
] | 10
|
2017-09-05T17:57:26.000Z
|
2021-11-10T20:54:09.000Z
|
rllab/envs/base.py
|
prosello/rllab
|
8677356874d41eb9354785500b554eaf635ece2e
|
[
"MIT"
] | null | null | null |
rllab/envs/base.py
|
prosello/rllab
|
8677356874d41eb9354785500b554eaf635ece2e
|
[
"MIT"
] | 18
|
2017-04-11T22:29:39.000Z
|
2021-01-11T13:45:50.000Z
|
from .env_spec import EnvSpec
import collections
class Env(object):
def step(self, action):
"""
Run one timestep of the environment's dynamics. When end of episode
is reached, reset() should be called to reset the environment's internal state.
Input
-----
action : an action provided by the environment
Outputs
-------
(observation, reward, done, info)
observation : agent's observation of the current environment
reward [Float] : amount of reward due to the previous action
done : a boolean, indicating whether the episode has ended
info : a dictionary containing other diagnostic information from the previous action
"""
raise NotImplementedError
def reset(self):
"""
Resets the state of the environment, returning an initial observation.
Outputs
-------
observation : the initial observation of the space. (Initial reward is assumed to be 0.)
"""
raise NotImplementedError
@property
def action_space(self):
"""
Returns a Space object
"""
raise NotImplementedError
@property
def observation_space(self):
"""
Returns a Space object
"""
raise NotImplementedError
# Helpers that derive from Spaces
@property
def action_dim(self):
return self.action_space.flat_dim
def render(self):
pass
def log_diagnostics(self, paths):
"""
Log extra information per iteration based on the collected paths
"""
pass
@property
def spec(self):
return EnvSpec(
observation_space=self.observation_space,
action_space=self.action_space,
)
@property
def horizon(self):
"""
Horizon of the environment, if it has one
"""
raise NotImplementedError
def terminate(self):
"""
Clean up operation,
"""
pass
_Step = collections.namedtuple("Step", ["observation", "reward", "done", "info"])
def Step(observation, reward, done, **kwargs):
"""
Convenience method creating a namedtuple with the results of the
environment.step method.
Put extra diagnostic info in the kwargs
"""
return _Step(observation, reward, done, kwargs)
| 25.934783
| 96
| 0.610645
|
a587e15b92c4d70adcf9d4f9079c6baeb615c8e9
| 7,193
|
py
|
Python
|
setup.py
|
huangruizhe/espresso
|
ee658bcc959bfbe8a7a61d7374d532d082d2aa26
|
[
"MIT"
] | null | null | null |
setup.py
|
huangruizhe/espresso
|
ee658bcc959bfbe8a7a61d7374d532d082d2aa26
|
[
"MIT"
] | null | null | null |
setup.py
|
huangruizhe/espresso
|
ee658bcc959bfbe8a7a61d7374d532d082d2aa26
|
[
"MIT"
] | 2
|
2021-01-15T09:55:07.000Z
|
2021-01-15T10:02:31.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import sys
from setuptools import setup, find_packages, Extension
from setuptools import Extension, find_packages, setup
if sys.version_info < (3, 6):
sys.exit("Sorry, Python >= 3.6 is required for fairseq.")
def write_version_py():
with open(os.path.join("fairseq", "version.txt")) as f:
version = f.read().strip()
# append latest commit hash to version string
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"])
.decode("ascii")
.strip()
)
version += "+" + sha[:7]
except Exception:
pass
# write version info to fairseq/version.py
with open(os.path.join("fairseq", "version.py"), "w") as f:
f.write('__version__ = "{}"\n'.format(version))
return version
version = write_version_py()
with open("README.md") as f:
readme = f.read()
if sys.platform == "darwin":
extra_compile_args = ["-stdlib=libc++", "-O3"]
else:
extra_compile_args = ["-std=c++11", "-O3"]
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
"fairseq.libbleu",
sources=[
"fairseq/clib/libbleu/libbleu.cpp",
"fairseq/clib/libbleu/module.cpp",
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.data_utils_fast",
sources=["fairseq/data/data_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
NumpyExtension(
"fairseq.data.token_block_utils_fast",
sources=["fairseq/data/token_block_utils_fast.pyx"],
language="c++",
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat",
sources=[
"fairseq/clib/libnat/edit_dist.cpp",
],
)
]
)
if "CUDA_HOME" in os.environ:
extensions.extend(
[
cpp_extension.CppExtension(
"fairseq.libnat_cuda",
sources=[
"fairseq/clib/libnat_cuda/edit_dist.cu",
"fairseq/clib/libnat_cuda/binding.cpp",
],
)
]
)
cmdclass["build_ext"] = cpp_extension.BuildExtension
except ImportError:
pass
if "READTHEDOCS" in os.environ:
# don't build extensions when generating docs
extensions = []
if "build_ext" in cmdclass:
del cmdclass["build_ext"]
# use CPU build of PyTorch
dependency_links = [
"https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl"
]
else:
dependency_links = []
if "clean" in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(
["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"],
shell=True,
)
extra_packages = []
if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")):
extra_packages.append("fairseq.model_parallel.megatron.mpu")
def do_setup(package_data):
setup(
name="fairseq",
version=version,
description="Facebook AI Research Sequence-to-Sequence Toolkit",
url="https://github.com/pytorch/fairseq",
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=[
"cython",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"setuptools>=18.0",
],
install_requires=[
"cffi",
"cython",
'dataclasses; python_version<"3.7"',
"hydra-core<1.1",
"omegaconf<2.1",
"kaldi_io",
'numpy<1.20.0; python_version<"3.7"',
'numpy; python_version>="3.7"',
"regex",
"sacrebleu>=1.4.12",
"torch",
"tqdm",
],
dependency_links=dependency_links,
packages=find_packages(
exclude=[
"examples",
"examples.*",
"scripts",
"scripts.*",
"tests",
"tests.*",
]
)
+ extra_packages,
package_data=package_data,
ext_modules=extensions,
test_suite="tests",
entry_points={
"console_scripts": [
"fairseq-eval-lm = fairseq_cli.eval_lm:cli_main",
"fairseq-generate = fairseq_cli.generate:cli_main",
"fairseq-hydra-train = fairseq_cli.hydra_train:cli_main",
"fairseq-interactive = fairseq_cli.interactive:cli_main",
"fairseq-preprocess = fairseq_cli.preprocess:cli_main",
"fairseq-score = fairseq_cli.score:cli_main",
"fairseq-train = fairseq_cli.train:cli_main",
"fairseq-validate = fairseq_cli.validate:cli_main",
],
},
cmdclass=cmdclass,
zip_safe=False,
)
def get_files(path, relative_to="fairseq"):
all_files = []
for root, _dirs, files in os.walk(path, followlinks=True):
root = os.path.relpath(root, relative_to)
for file in files:
if file.endswith(".pyc"):
continue
all_files.append(os.path.join(root, file))
return all_files
try:
# symlink examples into fairseq package so package_data accepts them
fairseq_examples = os.path.join("fairseq", "examples")
if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples):
os.symlink(os.path.join("..", "examples"), fairseq_examples)
package_data = {
"fairseq": (
get_files(fairseq_examples) + get_files(os.path.join("fairseq", "config"))
)
}
do_setup(package_data)
finally:
if "build_ext" not in sys.argv[1:] and os.path.exists(fairseq_examples):
os.unlink(fairseq_examples)
| 28.207843
| 92
| 0.573057
|
da9cd90b653ccc7771a54c5ec65a7d901c4366fd
| 3,384
|
py
|
Python
|
sdk/opendp/smartnoise/sql/reader/base.py
|
benlistyg/smartnoise-sdk
|
8d6de762700090dbed074336f643f7fabebae10e
|
[
"MIT"
] | 1
|
2021-12-30T15:21:54.000Z
|
2021-12-30T15:21:54.000Z
|
sdk/opendp/smartnoise/sql/reader/base.py
|
benlistyg/smartnoise-sdk
|
8d6de762700090dbed074336f643f7fabebae10e
|
[
"MIT"
] | null | null | null |
sdk/opendp/smartnoise/sql/reader/base.py
|
benlistyg/smartnoise-sdk
|
8d6de762700090dbed074336f643f7fabebae10e
|
[
"MIT"
] | null | null | null |
from opendp.smartnoise.reader.base import Reader
class SqlReader(Reader):
def __init__(self, name_compare=None, serializer=None):
self.compare = NameCompare() if name_compare is None else name_compare
self.serializer = serializer
def execute(self, query):
raise NotImplementedError("Execute must be implemented on the inherited class")
def _execute_ast(self, query):
if isinstance(query, str):
raise ValueError("Please pass ASTs to execute_ast. To execute strings, use execute.")
if hasattr(self, 'serializer') and self.serializer is not None:
query_string = self.serializer.serialize(query)
else:
query_string = str(query)
return self.execute(query_string)
def _execute_ast_df(self, query):
return self._to_df(self._execute_ast(query))
"""
Implements engine-specific identifier matching rules
for escaped identifiers.
"""
class NameCompare:
_name_compare_classes = {}
@classmethod
def register_name_compare(cls, engine, class_to_add):
cls._name_compare_classes[engine] = class_to_add
@classmethod
def get_name_compare(cls, engine):
if engine in cls._name_compare_classes:
return cls._name_compare_classes[engine]()
else:
return NameCompare()
def __init__(self, search_path=None):
self.search_path = search_path if search_path is not None else []
"""
True if schema portion of identifier used in query
matches schema or metadata object. Follows search
path. Pass in only the schema part.
"""
def reserved(self):
return ["select", "group", "on"]
def schema_match(self, query, meta):
if query.strip() == "" and meta in self.search_path:
return True
return self.identifier_match(query, meta)
"""
Uses database engine matching rules to report True
if identifier used in query matches identifier
of metadata object. Pass in one part at a time.
"""
def identifier_match(self, query, meta):
return query == meta
"""
Removes all escaping characters, keeping identifiers unchanged
"""
def strip_escapes(self, value):
return value.replace('"','').replace('`','').replace('[','').replace(']','')
"""
True if any part of identifier is escaped
"""
def is_escaped(self, identifier):
return any([p[0] in ['"', '[', '`'] for p in identifier.split('.') if p != ""])
"""
Converts proprietary escaping to SQL-92. Supports multi-part identifiers
"""
def clean_escape(self, identifier):
escaped = []
for p in identifier.split('.'):
if self.is_escaped(p):
escaped.append(p.replace('[', '"').replace(']', '"').replace('`', '"'))
else:
escaped.append(p.lower())
return '.'.join(escaped)
"""
Returns true if an identifier should
be escaped. Checks only one part per call.
"""
def should_escape(self, identifier):
if self.is_escaped(identifier):
return False
if identifier.lower() in self.reserved():
return True
if identifier.lower().replace(' ', '') == identifier.lower():
return False
else:
return True
| 34.886598
| 98
| 0.618203
|
118c58ba8f30364167ed859364c66962e9a938f3
| 2,593
|
py
|
Python
|
calipso/propertiesdialog.py
|
NASA-DEVELOP/vocal
|
15182377cd9d7de1c03b123e3bbe15b0e959ac80
|
[
"NASA-1.3"
] | 18
|
2017-06-26T19:16:40.000Z
|
2022-03-05T22:19:20.000Z
|
calipso/propertiesdialog.py
|
NASA-DEVELOP/vocal
|
15182377cd9d7de1c03b123e3bbe15b0e959ac80
|
[
"NASA-1.3"
] | 14
|
2017-05-12T16:43:14.000Z
|
2021-10-05T06:08:15.000Z
|
calipso/propertiesdialog.py
|
NASA-DEVELOP/vocal
|
15182377cd9d7de1c03b123e3bbe15b0e959ac80
|
[
"NASA-1.3"
] | 11
|
2017-06-20T19:58:56.000Z
|
2021-12-29T02:09:15.000Z
|
###################################
# Created on Aug 9, 2015
#
# @author: Grant Mercer
#
###################################
from Tkconstants import FLAT, RIGHT, LEFT
import collections
import tkFileDialog
import tkMessageBox
from Tkinter import Toplevel, Entry, Button, BOTH, Frame, \
Label, BOTTOM, TOP, X, RIDGE, Checkbutton, IntVar, StringVar, TclError
import constants
from constants import CSV, TXT
from sqlalchemy import or_
from db import db, DatabasePolygon
from tools.tools import center, get_shape_ranges
from tools.treelistbox import TreeListBox
from tools.tooltip import create_tool_tip
from log.log import logger
from advancedsearchdialog import AdvancedSearchDialog
from extractcolumnsdialog import ExtractColumnsDialog
class PropertyDialog(Toplevel):
"""
Displays the porperties of the shape in a window
"""
dialogs = []
def __init__(self, root, shape):
logger.info('Instantiating PropertyDialog')
Toplevel.__init__(self)
self.root = root
self.protocol('WM_DELETE_WINDOW', self.free)
self.wm_overrideredirect(1)
self.\
geometry('+%d+%d' %
(root.winfo_pointerx() - 60,
root.winfo_pointery()))
try:
self.tk.call('::Tk::unsupported::MacWindowStyle',
'style', self._w,
'help', 'noActivates')
except TclError:
pass
window_frame = Frame(self)
window_frame.pack(side=TOP, fill=BOTH, expand=True)
exit_frame = Frame(window_frame, background='#ffffe0')
exit_frame.pack(side=TOP, fill=X, expand=True)
button = Button(exit_frame, text='x', width=3, command=self.free,
background='#ffffe0', highlightthickness=0, relief=FLAT)
button.pack(side=RIGHT)
text_frame = Frame(window_frame)
text_frame.pack(side=TOP, fill=BOTH, expand=True)
label = Label(text_frame, text=str(shape), justify=LEFT,
background='#ffffe0',
font=('tahoma', '8', 'normal'))
label.pack(ipadx=1)
PropertyDialog.dialogs.append(self)
self.attributes("-topmost", True)
def free(self):
self.destroy()
for val, widget in enumerate(PropertyDialog.dialogs):
if widget is self:
PropertyDialog.dialogs.pop(val)
break
if PropertyDialog.dialogs:
for widget in PropertyDialog.dialogs:
widget.lift(aboveThis=self.root)
| 35.520548
| 74
| 0.606633
|
6735dde96117a97d582c9bb8caa39800c612b9cf
| 946
|
py
|
Python
|
madox_ws/src/jetbot_oled_display/jetbot_oled_display/timer.py
|
Cap-n-Proud/scrap
|
25109e96e4a3c30ca525cb65ffc02e28e6e05b94
|
[
"Apache-2.0"
] | null | null | null |
madox_ws/src/jetbot_oled_display/jetbot_oled_display/timer.py
|
Cap-n-Proud/scrap
|
25109e96e4a3c30ca525cb65ffc02e28e6e05b94
|
[
"Apache-2.0"
] | null | null | null |
madox_ws/src/jetbot_oled_display/jetbot_oled_display/timer.py
|
Cap-n-Proud/scrap
|
25109e96e4a3c30ca525cb65ffc02e28e6e05b94
|
[
"Apache-2.0"
] | null | null | null |
# timer.py
import time
class TimerError(Exception):
"""A custom exception used to report errors in use of Timer class"""
class Timer:
def __init__(self):
self._start_time = None
def start(self):
"""Start a new timer"""
if self._start_time is not None:
raise TimerError(f"Timer is running. Use .stop() to stop it")
self._start_time = time.perf_counter()
def value(self):
elapsed_time = (
time.perf_counter() - self._start_time
) # + 60 * 60 * 1000 * 27 + 57 * 60 * 1000
return elapsed_time
def stop(self):
"""Stop the timer, and report the elapsed time"""
if self._start_time is None:
raise TimerError(f"Timer is not running. Use .start() to start it")
elapsed_time = time.perf_counter() - self._start_time
self._start_time = None
print(f"Elapsed time: {elapsed_time:0.4f} seconds")
| 26.277778
| 79
| 0.607822
|
22dfcb37989b5108c8ffebeb8d658fce5999aadb
| 4,947
|
py
|
Python
|
parsePioneerWomanRecipe.py
|
zachRudz/dokuWikiRecipeParsing
|
ad36d4e21e84e897339940c8b9e9e8bbb853606e
|
[
"MIT"
] | null | null | null |
parsePioneerWomanRecipe.py
|
zachRudz/dokuWikiRecipeParsing
|
ad36d4e21e84e897339940c8b9e9e8bbb853606e
|
[
"MIT"
] | null | null | null |
parsePioneerWomanRecipe.py
|
zachRudz/dokuWikiRecipeParsing
|
ad36d4e21e84e897339940c8b9e9e8bbb853606e
|
[
"MIT"
] | null | null | null |
# This script will parse a recipe from thePioneerWoman.com,
# and output it in dokuwiki format to stdout.
## Reqirements: Beautiful soup, requests
# pip3 install beautifulsoup4
# pip3 install requests
## Usage:
# python3 parsePioneerWomanRecipe.py [url of recipe]
# python3 parsePioneerWomanRecipe.py http://thepioneerwoman.com/cooking/easy-mulligatawny/
import re
import sys
import requests
from bs4 import BeautifulSoup
## Print the title of the recipe
def printTitle(soup, url):
## Grabbing the recipe name
recipeName = soup.find_all("h2", 'entry-title')
# Making sure we found the recipe name
if(len(recipeName) != 1):
print("Error: Encountered unexpected entries when getting the recipe title. Found:")
for r in recipeName:
print(r.getText())
sys.exit(2)
# Bulding the link in the wiki
wikiLink = recipeName[0].getText().replace(" ", "").lower()
## Grabbing the serving size, cook time, and prep time
prepSummary = soup.find_all("div", "recipe-summary-time")
# Making sure we found the preperation info
if(len(prepSummary) < 1):
print("Error: Wasn't able to find preperation times and serving info.")
sys.exit(2)
# There's multiple prep summaries on the webpage. (Top sidebar + bottom).
# Isolate only one, so we can pull preptime and serving size
prepInfo = prepSummary[0].find_all('dd')
# Grabbing serving size
prepTime = prepInfo[0].getText()
difficulty = prepInfo[1].getText()
cookTime = prepInfo[2].getText()
servingSize = prepInfo[3].getText()
## Printing wiki page name
print("")
print("====={0}=====".format(recipeName[0].getText()))
## Printing table entry for this recipe
print("^Name^Meat^Serves^Difficulty^Prep time^Cook time^Tried it?^Rating^Source rating^Calories^Protein^Source^")
# URL
print("|[[.:{0}|{1}]]|".format(wikiLink, recipeName[0].getText()), end='')
# Meat type, serving size, prep time, cook time
print("|{0}|{1}|{2}|{3}|".format(servingSize, difficulty, prepTime, cookTime), end='')
# Tried myself, rating, source rating, calories, protein
print("No|||||", end='')
# Source
print("[[{0}|The Pioneer Woman]]|".format(url))
## Print the ingredients section
def printIngredients(soup):
print("")
print("====Ingredients====")
# The webpage has 2 sections of ingredients
# Grab the first one
panel = soup.find_all("div", "panel-body")
if(len(panel) < 1):
print("Error: Wasn't able to find ingredients section")
sys.exit(2)
ingredientSection = panel[0].find_all("ul", "list-ingredients")[0]
# Grabbing ingredients and their proportions
proportions = ingredientSection.find_all("span", itemprop="amount")
item = ingredientSection.find_all("span", itemprop="name")
# Printing ingredients/proportions
i = 0
for p in proportions:
# Some recipies don't have proportions for their ingredients.
# Eg: "Sliced tomato", "Salt and pepper to taste"
# This check is to help our formatting so that we don't get a leading space
# in our ingredients list when this happens
if(p.getText().strip() == ""):
print(" * {0}".format(item[i].getText().strip()))
else:
print(" * {0} {1}".format(p.getText().strip(), item[i].getText().strip()))
i = i + 1
def printInstructions(soup):
# The webpage has 4 sections of instructions: [Header / Instruction body] * 2
# Grab the first instruction body
instructionSections = soup.find_all("div", id=re.compile("recipe-instructions*"))
if(len(instructionSections) < 2):
print("Error: Wasn't able to find instructions section")
sys.exit(2)
# Isolating panel of instructions
instructionPanel = instructionSections[1].find_all("div", "panel-body")
if(len(instructionPanel) < 1):
print("Error: Wasn't able to find instructions section")
sys.exit(2)
# Formatting
instructions = instructionPanel[0].getText()
instructions = instructions.replace("\n", "")
instructions = instructions.replace("\t", "")
# Printing to dokuwiki format
print("")
print("====Instructions====")
print(instructions)
##################################################
#
## Entry Point
#
##################################################
# Getting command line args
if(len(sys.argv) != 2):
print("Usage: python3 {0} [url of a recipe from thePioneerWoman.com]".format(sys.argv[0]))
print("Example: python3 {0} http://thepioneerwoman.com/cooking/easy-mulligatawny/".format(sys.argv[0]))
sys.exit(0)
# Getting the website
#print("Making HTTP request...")
page = requests.get(sys.argv[1])
if(page.status_code != 200):
print("Error: Couldn't resolve the HTTP request (Status code: {0})".format(page.status_code));
sys.exit(1)
#print("Done. Building wiki entry...")
#print()
# Building a nice ol' bowl of soup
soup = BeautifulSoup(page.content, 'html.parser');
# Printing the title, and the info of the recipe
# This info includes...
# - Recipe name
# - Serving size
# - Prep time
# - Cook time
# - Source link
printTitle(soup, sys.argv[1])
printIngredients(soup)
printInstructions(soup)
| 29.801205
| 114
| 0.690722
|
f753b672926677649d60a11fcfabcae3b859e9d9
| 4,181
|
py
|
Python
|
pypureclient/pure1/Pure1_1_0/models/metric_get_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 14
|
2018-12-07T18:30:27.000Z
|
2022-02-22T09:12:33.000Z
|
pypureclient/pure1/Pure1_1_0/models/metric_get_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 28
|
2019-09-17T21:03:52.000Z
|
2022-03-29T22:07:35.000Z
|
pypureclient/pure1/Pure1_1_0/models/metric_get_response.py
|
Flav-STOR-WL/py-pure-client
|
03b889c997d90380ac5d6380ca5d5432792d3e89
|
[
"BSD-2-Clause"
] | 15
|
2020-06-11T15:50:08.000Z
|
2022-03-21T09:27:25.000Z
|
# coding: utf-8
"""
Pure1 Public REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.pure1.Pure1_1_0 import models
class MetricGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[Metric]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.Metric]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the continuation_token query param to get the next page of data. If you use the continuation token to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The continuation token is generated if the limit is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying filter params.
items (list[Metric])
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `MetricGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MetricGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MetricGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 33.717742
| 522
| 0.587898
|
52f274e7a67e17f7dde22850f2f725fd8843cea7
| 3,020
|
py
|
Python
|
test/test_pml/test_utils/test_collection_utils.py
|
ashadhaz/pml
|
8d9261c8885a82d795c89de23f53ff7d05ef9495
|
[
"MIT"
] | 2
|
2018-03-21T16:26:16.000Z
|
2020-10-27T02:45:11.000Z
|
test/test_pml/test_utils/test_collection_utils.py
|
ashadhaz/pml
|
8d9261c8885a82d795c89de23f53ff7d05ef9495
|
[
"MIT"
] | null | null | null |
test/test_pml/test_utils/test_collection_utils.py
|
ashadhaz/pml
|
8d9261c8885a82d795c89de23f53ff7d05ef9495
|
[
"MIT"
] | 4
|
2015-02-25T16:19:05.000Z
|
2021-02-06T06:36:46.000Z
|
# Copyright (C) 2012 David Rusk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Unit tests for collection_utils module.
@author: drusk
"""
import unittest
from pml.utils import collection_utils
class CollectionUtilsTest(unittest.TestCase):
def test_get_key_with_highest_value(self):
dictionary = {"dog": 5, "cat": 10, "bird": 7}
key = collection_utils.get_key_with_highest_value(dictionary)
self.assertEqual(key, "cat")
def test_get_key_with_highest_value_float(self):
dictionary = {0: 0.10, 1: 0.0567, 2: 0.72}
key = collection_utils.get_key_with_highest_value(dictionary)
self.assertEqual(key, 2)
def test_get_key_with_highest_value_empty(self):
dictionary = {}
self.assertIsNone(
collection_utils.get_key_with_highest_value(dictionary))
def test_are_all_equal_empty(self):
iterable = []
self.assertTrue(collection_utils.are_all_equal(iterable))
def test_are_all_equal_one_element(self):
iterable = ['a']
self.assertTrue(collection_utils.are_all_equal(iterable))
def test_are_all_equal(self):
iterable1 = ['a', 'b']
self.assertFalse(collection_utils.are_all_equal(iterable1))
iterable2 = ['a', 'b', 'b']
self.assertFalse(collection_utils.are_all_equal(iterable2))
iterable3 = ['b', 'b']
self.assertTrue(collection_utils.are_all_equal(iterable3))
iterable4 = ['b', 'b', 'b']
self.assertTrue(collection_utils.are_all_equal(iterable4))
def test_get_most_common(self):
collection = ["a", "b", "a", "a", "b"]
self.assertEqual(collection_utils.get_most_common(collection), "a")
def test_get_most_common_empty(self):
collection = []
self.assertIsNone(collection_utils.get_most_common(collection))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 38.227848
| 79
| 0.698344
|
fb18dbf1aebde279fd228ae56402fef9c3e8d2a5
| 15,953
|
py
|
Python
|
sdk/purview/azure-purview-scanning/azure/purview/scanning/rest/scans/_request_builders.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 3
|
2020-06-23T02:25:27.000Z
|
2021-09-07T18:48:11.000Z
|
sdk/purview/azure-purview-scanning/azure/purview/scanning/rest/scans/_request_builders.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 510
|
2019-07-17T16:11:19.000Z
|
2021-08-02T08:38:32.000Z
|
sdk/purview/azure-purview-scanning/azure/purview/scanning/rest/scans/_request_builders.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 5
|
2019-09-04T12:51:37.000Z
|
2020-09-16T07:28:40.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.pipeline.transport._base import _format_url_section
from azure.purview.scanning.core.rest import HttpRequest
from msrest import Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional, Union
_SERIALIZER = Serializer()
def build_create_or_update_request(
data_source_name, # type: str
scan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Creates an instance of a scan.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow.
:param data_source_name:
:type data_source_name: str
:param scan_name:
:type scan_name: str
:keyword json:
:paramtype json: Any
:keyword content:
:paramtype content: Any
:return: Returns an :class:`~azure.purview.scanning.core.rest.HttpRequest` that you will pass to the client's `send_request` method.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow.
:rtype: ~azure.purview.scanning.core.rest.HttpRequest
Example:
.. code-block:: python
# kind template as part of your input body
kind = 'AdlsGen1CredentialScan' or 'AdlsGen1MsiScan' or 'AdlsGen2CredentialScan' or 'AdlsGen2MsiScan' or 'AmazonAccountCredentialScan' or 'AmazonPostgreSqlCredentialScan' or 'AmazonS3CredentialScan' or 'AmazonS3RoleARNScan' or 'AmazonSqlCredentialScan' or 'AzureCosmosDbCredentialScan' or 'AzureDataExplorerCredentialScan' or 'AzureDataExplorerMsiScan' or 'AzureFileServiceCredentialScan' or 'AzureMySqlCredentialScan' or 'AzurePostgreSqlCredentialScan' or 'AzureResourceGroupCredentialScan' or 'AzureResourceGroupMsiScan' or 'AzureSqlDataWarehouseCredentialScan' or 'AzureSqlDataWarehouseMsiScan' or 'AzureSqlDatabaseCredentialScan' or 'AzureSqlDatabaseManagedInstanceCredentialScan' or 'AzureSqlDatabaseManagedInstanceMsiScan' or 'AzureSqlDatabaseMsiScan' or 'AzureStorageCredentialScan' or 'AzureStorageMsiScan' or 'AzureSubscriptionCredentialScan' or 'AzureSubscriptionMsiScan' or 'AzureSynapseCredentialScan' or 'AzureSynapseMsiScan' or 'AzureSynapseWorkspaceCredentialScan' or 'AzureSynapseWorkspaceMsiScan' or 'OracleCredentialScan' or 'OracleUserPassScan' or 'PowerBIDelegatedScan' or 'PowerBIMsiScan' or 'SapEccCredentialScan' or 'SapEccUserPassScan' or 'SapS4HanaSapS4HanaCredentialScan' or 'SapS4HanaSapS4HanaUserPassScan' or 'SqlServerDatabaseCredentialScan' or 'TeradataCredentialScan' or 'TeradataUserPassScanAutoGenerated' or 'TeradataUserPassScan'
# JSON input template you can fill out and use as your `json` input.
json = {
"kind": "Scan",
"scanResults": [
{
"assetsClassified": "long (optional)",
"assetsDiscovered": "long (optional)",
"dataSourceType": "str (optional)",
"diagnostics": {},
"endTime": "datetime (optional)",
"error": {},
"errorMessage": "str (optional)",
"id": "str (optional)",
"parentId": "str (optional)",
"pipelineStartTime": "datetime (optional)",
"queuedTime": "datetime (optional)",
"resourceId": "str (optional)",
"runType": "str (optional)",
"scanLevelType": "str (optional)",
"scanRulesetType": "str (optional)",
"scanRulesetVersion": "int (optional)",
"startTime": "datetime (optional)",
"status": "str (optional)"
}
]
}
# response body for status code(s): 200, 201
response_body == {
"kind": "Scan",
"scanResults": [
{
"assetsClassified": "long (optional)",
"assetsDiscovered": "long (optional)",
"dataSourceType": "str (optional)",
"diagnostics": {},
"endTime": "datetime (optional)",
"error": {},
"errorMessage": "str (optional)",
"id": "str (optional)",
"parentId": "str (optional)",
"pipelineStartTime": "datetime (optional)",
"queuedTime": "datetime (optional)",
"resourceId": "str (optional)",
"runType": "str (optional)",
"scanLevelType": "str (optional)",
"scanRulesetType": "str (optional)",
"scanRulesetVersion": "int (optional)",
"startTime": "datetime (optional)",
"status": "str (optional)"
}
]
}
"""
content_type = kwargs.pop("content_type", None)
api_version = "2018-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/datasources/{dataSourceName}/scans/{scanName}')
path_format_arguments = {
'dataSourceName': _SERIALIZER.url("data_source_name", data_source_name, 'str'),
'scanName': _SERIALIZER.url("scan_name", scan_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_request(
data_source_name, # type: str
scan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Gets a scan information.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow.
:param data_source_name:
:type data_source_name: str
:param scan_name:
:type scan_name: str
:return: Returns an :class:`~azure.purview.scanning.core.rest.HttpRequest` that you will pass to the client's `send_request` method.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow.
:rtype: ~azure.purview.scanning.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response_body == {
"kind": "Scan",
"scanResults": [
{
"assetsClassified": "long (optional)",
"assetsDiscovered": "long (optional)",
"dataSourceType": "str (optional)",
"diagnostics": {},
"endTime": "datetime (optional)",
"error": {},
"errorMessage": "str (optional)",
"id": "str (optional)",
"parentId": "str (optional)",
"pipelineStartTime": "datetime (optional)",
"queuedTime": "datetime (optional)",
"resourceId": "str (optional)",
"runType": "str (optional)",
"scanLevelType": "str (optional)",
"scanRulesetType": "str (optional)",
"scanRulesetVersion": "int (optional)",
"startTime": "datetime (optional)",
"status": "str (optional)"
}
]
}
"""
api_version = "2018-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/datasources/{dataSourceName}/scans/{scanName}')
path_format_arguments = {
'dataSourceName': _SERIALIZER.url("data_source_name", data_source_name, 'str'),
'scanName': _SERIALIZER.url("scan_name", scan_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_delete_request(
data_source_name, # type: str
scan_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""Deletes the scan associated with the data source.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow.
:param data_source_name:
:type data_source_name: str
:param scan_name:
:type scan_name: str
:return: Returns an :class:`~azure.purview.scanning.core.rest.HttpRequest` that you will pass to the client's `send_request` method.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow.
:rtype: ~azure.purview.scanning.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response_body == {
"kind": "Scan",
"scanResults": [
{
"assetsClassified": "long (optional)",
"assetsDiscovered": "long (optional)",
"dataSourceType": "str (optional)",
"diagnostics": {},
"endTime": "datetime (optional)",
"error": {},
"errorMessage": "str (optional)",
"id": "str (optional)",
"parentId": "str (optional)",
"pipelineStartTime": "datetime (optional)",
"queuedTime": "datetime (optional)",
"resourceId": "str (optional)",
"runType": "str (optional)",
"scanLevelType": "str (optional)",
"scanRulesetType": "str (optional)",
"scanRulesetVersion": "int (optional)",
"startTime": "datetime (optional)",
"status": "str (optional)"
}
]
}
"""
api_version = "2018-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/datasources/{dataSourceName}/scans/{scanName}')
path_format_arguments = {
'dataSourceName': _SERIALIZER.url("data_source_name", data_source_name, 'str'),
'scanName': _SERIALIZER.url("scan_name", scan_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_list_by_data_source_request(
data_source_name, # type: str
**kwargs # type: Any
):
# type: (...) -> HttpRequest
"""List scans in data source.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder into your code flow.
:param data_source_name:
:type data_source_name: str
:return: Returns an :class:`~azure.purview.scanning.core.rest.HttpRequest` that you will pass to the client's `send_request` method.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this response into your code flow.
:rtype: ~azure.purview.scanning.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response_body == {
"count": "long (optional)",
"nextLink": "str (optional)",
"value": [
{
"kind": "Scan",
"scanResults": [
{
"assetsClassified": "long (optional)",
"assetsDiscovered": "long (optional)",
"dataSourceType": "str (optional)",
"diagnostics": {},
"endTime": "datetime (optional)",
"error": {},
"errorMessage": "str (optional)",
"id": "str (optional)",
"parentId": "str (optional)",
"pipelineStartTime": "datetime (optional)",
"queuedTime": "datetime (optional)",
"resourceId": "str (optional)",
"runType": "str (optional)",
"scanLevelType": "str (optional)",
"scanRulesetType": "str (optional)",
"scanRulesetVersion": "int (optional)",
"startTime": "datetime (optional)",
"status": "str (optional)"
}
]
}
]
}
"""
api_version = "2018-12-01-preview"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/datasources/{dataSourceName}/scans')
path_format_arguments = {
'dataSourceName': _SERIALIZER.url("data_source_name", data_source_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
| 42.884409
| 1,375
| 0.55651
|
5b6bcc05aeaa92d98bc6ffc63aa9967c28a1b51b
| 2,275
|
py
|
Python
|
tests/integration/modules/test_test.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12
|
2015-01-21T00:18:25.000Z
|
2021-07-11T07:35:26.000Z
|
tests/integration/modules/test_test.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 86
|
2017-01-27T11:54:46.000Z
|
2020-05-20T06:25:26.000Z
|
tests/integration/modules/test_test.py
|
byteskeptical/salt
|
637fe0b04f38b2274191b005d73b3c6707d7f400
|
[
"Apache-2.0"
] | 12
|
2015-01-05T09:50:42.000Z
|
2019-08-19T01:43:40.000Z
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
# Import salt libs
import salt.version
import salt.config
class TestModuleTest(ModuleCase, AdaptedConfigurationTestCaseMixin):
'''
Validate the test module
'''
def test_ping(self):
'''
test.ping
'''
self.assertTrue(self.run_function('test.ping'))
def test_echo(self):
'''
test.echo
'''
self.assertEqual(self.run_function('test.echo', ['text']), 'text')
def test_version(self):
'''
test.version
'''
self.assertEqual(self.run_function('test.version'),
salt.version.__saltstack_version__.string)
def test_conf_test(self):
'''
test.conf_test
'''
self.assertEqual(self.run_function('test.conf_test'), 'baz')
def test_get_opts(self):
'''
test.get_opts
'''
opts = salt.config.minion_config(
self.get_config_file_path('minion')
)
self.assertEqual(
self.run_function('test.get_opts')['cachedir'],
opts['cachedir']
)
def test_cross_test(self):
'''
test.cross_test
'''
self.assertTrue(
self.run_function(
'test.cross_test',
['test.ping']
)
)
def test_fib(self):
'''
test.fib
'''
self.assertEqual(
self.run_function(
'test.fib',
['20'],
)[0],
6765
)
def test_collatz(self):
'''
test.collatz
'''
self.assertEqual(
self.run_function(
'test.collatz',
['40'],
)[0][-1],
2
)
def test_outputter(self):
'''
test.outputter
'''
self.assertEqual(self.run_function('test.outputter', ['text']), 'text')
| 23.697917
| 79
| 0.498022
|
5e45389d213b7fcccca87dfd16fa3e94e6b8e2bf
| 7,795
|
py
|
Python
|
codes/ONNet/python-package/onnet/DiffractiveLayer.py
|
azopticsinc/optical-neural-network
|
28280014a6c1fc717a5077ed5e3c3496a4b103ac
|
[
"MIT"
] | 1
|
2021-04-27T00:50:12.000Z
|
2021-04-27T00:50:12.000Z
|
codes/ONNet/python-package/onnet/DiffractiveLayer.py
|
dalerxli/optical-neural-networks-1
|
28280014a6c1fc717a5077ed5e3c3496a4b103ac
|
[
"MIT"
] | null | null | null |
codes/ONNet/python-package/onnet/DiffractiveLayer.py
|
dalerxli/optical-neural-networks-1
|
28280014a6c1fc717a5077ed5e3c3496a4b103ac
|
[
"MIT"
] | 1
|
2021-08-14T02:31:06.000Z
|
2021-08-14T02:31:06.000Z
|
import torch
from .Z_utils import COMPLEX_utils as Z
from .some_utils import *
import numpy as np
import random
import torch.nn as nn
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
#https://pytorch.org/tutorials/beginner/pytorch_with_examples.html#pytorch-custom-nn-modules
class DiffractiveLayer(torch.nn.Module):
def SomeInit(self, M_in, N_in,HZ=0.4e12):
assert (M_in == N_in)
self.M = M_in
self.N = N_in
self.z_modulus = Z.modulus
self.size = M_in
self.delta = 0.03
self.dL = 0.02
self.c = 3e8
self.Hz = HZ#0.4e12
self.H_z = self.Init_H()
def __repr__(self):
#main_str = super(DiffractiveLayer, self).__repr__()
main_str = f"DiffractiveLayer_[{(int)(self.Hz/1.0e9)}G]_[{self.M},{self.N}]"
return main_str
def __init__(self, M_in, N_in,config,HZ=0.4e12):
super(DiffractiveLayer, self).__init__()
self.SomeInit(M_in, N_in,HZ)
assert config is not None
self.config = config
#self.init_value = init_value
#self.rDrop = rDrop
if not hasattr(self.config,'wavelet') or self.config.wavelet is None:
if self.config.modulation=="phase":
self.transmission = torch.nn.Parameter(data=torch.Tensor(self.size, self.size), requires_grad=True)
else:
self.transmission = torch.nn.Parameter(data=torch.Tensor(self.size, self.size, 2), requires_grad=True)
init_param = self.transmission.data
if self.config.init_value=="reverse": #
half=self.transmission.data.shape[-2]//2
init_param[..., :half, :] = 0
init_param[..., half:, :] = np.pi
elif self.config.init_value=="random":
init_param.uniform_(0, np.pi*2)
elif self.config.init_value == "random_reverse":
init_param = torch.randint_like(init_param,0,2)*np.pi
elif self.config.init_value == "chunk":
sections = split__sections()
for xx in init_param.split(sections, -1):
xx = random.random(0,np.pi*2)
#self.rDrop = config.rDrop
#self.bias = torch.nn.Parameter(data=torch.Tensor(1, 1), requires_grad=True)
def visualize(self,visual,suffix, params):
param = self.transmission.data
name = f"{suffix}_{self.config.modulation}_"
return visual.image(name,param, params)
def share_weight(self,layer_1):
tp = type(self)
assert(type(layer_1)==tp)
#del self.transmission
#self.transmission = layer_1.transmission
def Init_H(self):
# Parameter
N = self.size
df = 1.0 / self.dL
d=self.delta
lmb=self.c / self.Hz
k = np.pi * 2.0 / lmb
D = self.dL * self.dL / (N * lmb)
# phase
def phase(i, j):
i -= N // 2
j -= N // 2
return ((i * df) * (i * df) + (j * df) * (j * df))
ph = np.fromfunction(phase, shape=(N, N), dtype=np.float32)
# H
H = np.exp(1.0j * k * d) * np.exp(-1.0j * lmb * np.pi * d * ph)
H_f = np.fft.fftshift(H)*self.dL*self.dL/(N*N)
# print(H_f); print(H)
H_z = np.zeros(H_f.shape + (2,))
H_z[..., 0] = H_f.real
H_z[..., 1] = H_f.imag
H_z = torch.from_numpy(H_z).cuda()
return H_z
def Diffractive_(self,u0, theta=0.0):
if Z.isComplex(u0):
z0 = u0
else:
z0 = u0.new_zeros(u0.shape + (2,))
z0[...,0] = u0
N = self.size
df = 1.0 / self.dL
z0 = Z.fft(z0)
u1 = Z.Hadamard(z0,self.H_z.float())
u2 = Z.fft(u1,"C2C",inverse=True)
return u2 * N * N * df * df
def GetTransCoefficient(self):
'''
eps = 1e-5; momentum = 0.1; affine = True
mean = torch.mean(self.transmission, 1)
vari = torch.var(self.transmission, 1)
amp_bn = torch.batch_norm(self.transmission,mean,vari)
:return:
'''
amp_s = Z.exp_euler(self.transmission)
return amp_s
def forward(self, x):
diffrac = self.Diffractive_(x)
amp_s = self.GetTransCoefficient()
x = Z.Hadamard(diffrac,amp_s.float())
if(self.config.rDrop>0):
drop = Z.rDrop2D(1-self.rDrop,(self.M,self.N),isComlex=True)
x = Z.Hadamard(x, drop)
#x = x+self.bias
return x
class DiffractiveAMP(DiffractiveLayer):
def __init__(self, M_in, N_in,rDrop=0.0):
super(DiffractiveAMP, self).__init__(M_in, N_in,rDrop,params="amp")
#self.amp = torch.nn.Parameter(data=torch.Tensor(self.size, self.size, 2), requires_grad=True)
self.transmission.data.uniform_(0, 1)
def GetTransCoefficient(self):
# amp_s = Z.sigmoid(self.amp)
# amp_s = torch.clamp(self.amp, 1.0e-6, 1)
amp_s = self.transmission
return amp_s
class DiffractiveWavelet(DiffractiveLayer):
def __init__(self, M_in, N_in,config,HZ=0.4e12):
super(DiffractiveWavelet, self).__init__(M_in, N_in,config,HZ)
#self.hough = torch.nn.Parameter(data=torch.Tensor(2), requires_grad=True)
self.Init_DisTrans()
#self.GetXita()
def __repr__(self):
main_str = f"Diffrac_Wavelet_[{(int)(self.Hz/1.0e9)}G]_[{self.M},{self.N}]"
return main_str
def share_weight(self,layer_1):
tp = type(self)
assert(type(layer_1)==tp)
del self.wavelet
self.wavelet = layer_1.wavelet
del self.dis_map
self.dis_map = layer_1.dis_map
del self.wav_indices
self.wav_indices = layer_1.wav_indices
def Init_DisTrans(self):
origin_r, origin_c = (self.M-1) / 2, (self.N-1) / 2
origin_r = random.uniform(0, self.M-1)
origin_c = random.uniform(0, self.N - 1)
self.dis_map={}
#self.dis_trans = torch.zeros((self.size, self.size)).int()
self.wav_indices = torch.LongTensor((self.size*self.size)).cuda()
nz=0
for r in range(self.M):
for c in range(self.N):
off = np.sqrt((r - origin_r) * (r - origin_r) + (c - origin_c) * (c - origin_c))
i_off = (int)(off+0.5)
if i_off not in self.dis_map:
self.dis_map[i_off]=len(self.dis_map)
id = self.dis_map[i_off]
#self.dis_trans[r, c] = id
self.wav_indices[nz] = id; nz=nz+1
#print(f"[{r},{c}]={self.dis_trans[r, c]}")
nD = len(self.dis_map)
if False:
plt.imshow(self.dis_trans.numpy())
plt.show()
self.wavelet = torch.nn.Parameter(data=torch.Tensor(nD), requires_grad=True)
self.wavelet.data.uniform_(0, np.pi*2)
#self.dis_trans = self.dis_trans.cuda()
def GetXita(self):
if False:
xita = torch.zeros((self.size, self.size))
for r in range(self.M):
for c in range(self.N):
pos = self.dis_trans[r, c]
xita[r,c] = self.wavelet[pos]
origin_r,origin_c=self.M/2,self.N/2
#xita = self.dis_trans*self.hough[0]+self.hough[1]
else:
xita = torch.index_select(self.wavelet, 0, self.wav_indices)
xita = xita.view(self.size, self.size)
# print(xita)
return xita
def GetTransCoefficient(self):
xita = self.GetXita()
amp_s = Z.exp_euler(xita)
return amp_s
def visualize(self,visual,suffix, params):
xita = self.GetXita()
name = f"{suffix}"
return visual.image(name,torch.sin(xita.detach()), params)
| 35.112613
| 118
| 0.563566
|
1d7c013683768d39995a9bf0b10b2b52e3ea2d3f
| 1,906
|
py
|
Python
|
masakari-7.0.0/masakari/api/openstack/ha/versionsV1.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | null | null | null |
masakari-7.0.0/masakari/api/openstack/ha/versionsV1.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
masakari-7.0.0/masakari/api/openstack/ha/versionsV1.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# Copyright (c) 2016 NTT Data
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six.moves import http_client
import webob.exc
from masakari.api.openstack import extensions
from masakari.api.openstack.ha import versions
from masakari.api.openstack.ha.views import versions as views_versions
from masakari.api.openstack import wsgi
ALIAS = "versions"
class VersionsController(wsgi.Controller):
@extensions.expected_errors(http_client.NOT_FOUND)
def show(self, req, id='v1'):
builder = views_versions.get_view_builder(req)
if id not in versions.VERSIONS:
raise webob.exc.HTTPNotFound()
return builder.build_version(versions.VERSIONS[id])
class Versions(extensions.V1APIExtensionBase):
"""API Version information."""
name = "Versions"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension(ALIAS, VersionsController(),
custom_routes_fn=self.version_map)]
return resources
def get_controller_extensions(self):
return []
def version_map(self, mapper, wsgi_resource):
mapper.connect("versions", "/",
controller=wsgi_resource,
action='show', conditions={"method": ['GET']})
mapper.redirect("", "/")
| 32.305085
| 78
| 0.681007
|
780abe59e45e780094b58b5573249a2f20c12ee6
| 495
|
py
|
Python
|
src/eventbus/logger.py
|
cyclegen/eventbus-py
|
423a2201ca9a084206ed0102903cff7c0e940e16
|
[
"MIT"
] | 1
|
2021-06-18T09:58:24.000Z
|
2021-06-18T09:58:24.000Z
|
src/eventbus/logger.py
|
GhostLee/eventbus-py
|
f378c5ce28fe8ca4b9b2fc9f3959bfb70cc8d874
|
[
"MIT"
] | null | null | null |
src/eventbus/logger.py
|
GhostLee/eventbus-py
|
f378c5ce28fe8ca4b9b2fc9f3959bfb70cc8d874
|
[
"MIT"
] | 1
|
2021-05-24T23:54:13.000Z
|
2021-05-24T23:54:13.000Z
|
import abc
from abc import abstractmethod
class Logger(abc.ABC):
@abstractmethod
def trace(self, msg):
pass
@abstractmethod
def debug(self, msg):
pass
@abstractmethod
def info(self, msg):
pass
@abstractmethod
def warning(self, msg):
pass
@abstractmethod
def error(self, msg):
pass
@abstractmethod
def exception(self, msg):
pass
@abstractmethod
def critical(self, msg):
pass
| 14.558824
| 30
| 0.589899
|
919268f99e3556ef97ca43fa7ec3a2c9d8999388
| 19,946
|
py
|
Python
|
features/efficientnet.py
|
SConsul/FLITE
|
7e3f462e66845a5c05e909d6a21dc1862a58579b
|
[
"MIT"
] | null | null | null |
features/efficientnet.py
|
SConsul/FLITE
|
7e3f462e66845a5c05e909d6a21dc1862a58579b
|
[
"MIT"
] | null | null | null |
features/efficientnet.py
|
SConsul/FLITE
|
7e3f462e66845a5c05e909d6a21dc1862a58579b
|
[
"MIT"
] | null | null | null |
"""model.py - Model and module class for EfficientNet.
They are built to mirror those in the official TensorFlow implementation.
"""
# Author: lukemelas (github username)
# Github repo: https://github.com/lukemelas/EfficientNet-PyTorch
# With adjustments and added comments by workingcoder (github username).
import torch
from torch import nn
from torch.nn import functional as F
from features.efficientnet_utils import (
round_filters,
round_repeats,
drop_connect,
get_same_padding_conv2d,
get_model_params,
efficientnet_params,
load_pretrained_weights,
Swish,
MemoryEfficientSwish,
Conv2dDynamicSamePadding,
Conv2dStaticSamePadding,
calculate_output_image_size
)
from thop.vision.basic_hooks import count_convNd, zero_ops
from feature_adapters.efficientnet_adaptation_layers import FilmLayer, FilmLayerGenerator
VALID_MODELS = (
'efficientnet-b0', 'efficientnet-b1', 'efficientnet-b2', 'efficientnet-b3',
'efficientnet-b4', 'efficientnet-b5', 'efficientnet-b6', 'efficientnet-b7',
'efficientnet-b8',
# Support the construction of 'efficientnet-l2' without pretrained weights
'efficientnet-l2'
)
def film(x, gamma, beta):
gamma = gamma[None, :, None, None]
beta = beta[None, :, None, None]
return gamma * x + beta
class MBConvBlock(nn.Module):
"""Mobile Inverted Residual Bottleneck Block.
Args:
block_args (namedtuple): BlockArgs, defined in utils.py.
global_params (namedtuple): GlobalParam, defined in utils.py.
image_size (tuple or list): [image_height, image_width].
References:
[1] https://arxiv.org/abs/1704.04861 (MobileNet v1)
[2] https://arxiv.org/abs/1801.04381 (MobileNet v2)
[3] https://arxiv.org/abs/1905.02244 (MobileNet v3)
"""
def __init__(self, block_args, global_params, image_size=None):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum # pytorch's difference from tensorflow
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # whether to use skip connection and drop connect
# Expansion phase (Inverted Bottleneck)
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
if self._block_args.expand_ratio != 1:
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# image_size = calculate_output_image_size(image_size, 1) <-- this wouldn't modify image_size
# Depthwise convolution phase
k = self._block_args.kernel_size
s = self._block_args.stride
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
image_size = calculate_output_image_size(image_size, s)
# Squeeze and Excitation layer, if desired
if self.has_se:
Conv2d = get_same_padding_conv2d(image_size=(1, 1))
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
# Pointwise convolution phase
final_oup = self._block_args.output_filters
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
"""MBConvBlock's forward function.
Args:
inputs (tensor): Input tensor.
drop_connect_rate (bool): Drop connect rate (float, between 0 and 1).
Returns:
Output of this block after processing.
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = self._expand_conv(inputs)
x = self._bn0(x)
x = self._swish(x)
x = self._depthwise_conv(x)
x = self._bn1(x)
x = self._swish(x)
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_reduce(x_squeezed)
x_squeezed = self._swish(x_squeezed)
x_squeezed = self._se_expand(x_squeezed)
x = torch.sigmoid(x_squeezed) * x
# Pointwise Convolution
x = self._project_conv(x)
x = self._bn2(x)
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
# The combination of skip connection and drop connect brings about stochastic depth.
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export).
Args:
memory_efficient (bool): Whether to use memory-efficient version of swish.
"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
class FilmMBConvBlock(MBConvBlock):
def __init__(self, block_args, global_params, image_size=None):
MBConvBlock.__init__(self, block_args, global_params, image_size)
def forward(self, inputs, drop_connect_rate=None, gamma=None, beta=None):
"""MBConvBlock's forward function.
Args:
inputs (tensor): Input tensor.
drop_connect_rate (bool): Drop connect rate (float, between 0 and 1).
Returns:
Output of this block after processing.
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = self._expand_conv(inputs)
x = self._bn0(x)
x = self._swish(x)
x = self._depthwise_conv(x)
x = self._bn1(x)
x = film(x, gamma, beta)
x = self._swish(x)
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_reduce(x_squeezed)
x_squeezed = self._swish(x_squeezed)
x_squeezed = self._se_expand(x_squeezed)
x = torch.sigmoid(x_squeezed) * x
# Pointwise Convolution
x = self._project_conv(x)
x = self._bn2(x)
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
# The combination of skip connection and drop connect brings about stochastic depth.
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
class EfficientNet(nn.Module):
"""EfficientNet model.
Most easily loaded with the .from_name or .from_pretrained methods.
Args:
blocks_args (list[namedtuple]): A list of BlockArgs to construct blocks.
global_params (namedtuple): A set of GlobalParams shared between blocks.
References:
[1] https://arxiv.org/abs/1905.11946 (EfficientNet)
Example:
>>> import torch
>>> from efficientnet.model import EfficientNet
>>> inputs = torch.rand(1, 3, 224, 224)
>>> model = EfficientNet.from_pretrained('efficientnet-b0')
>>> model.eval()
>>> outputs = model(inputs)
"""
def __init__(self, blocks_args=None, global_params=None, block_fn=MBConvBlock):
super().__init__()
assert isinstance(blocks_args, list), 'blocks_args should be a list'
assert len(blocks_args) > 0, 'block args must be greater than 0'
self._global_params = global_params
self._blocks_args = blocks_args
# Batch norm parameters
bn_mom = 1 - self._global_params.batch_norm_momentum
bn_eps = self._global_params.batch_norm_epsilon
# Get stem static or dynamic convolution depending on image size
image_size = global_params.image_size
Conv2d = get_same_padding_conv2d(image_size=image_size)
# Stem
in_channels = 3 # rgb
out_channels = round_filters(32, self._global_params) # number of output channels
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
image_size = calculate_output_image_size(image_size, 2)
# Build blocks
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters, self._global_params),
output_filters=round_filters(block_args.output_filters, self._global_params),
num_repeat=round_repeats(block_args.num_repeat, self._global_params)
)
# The first block needs to take care of stride and filter size increase.
self._blocks.append(block_fn(block_args, self._global_params, image_size=image_size))
image_size = calculate_output_image_size(image_size, block_args.stride)
if block_args.num_repeat > 1: # modify block_args to keep same output size
block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
for _ in range(block_args.num_repeat - 1):
self._blocks.append(block_fn(block_args, self._global_params, image_size=image_size))
# image_size = calculate_output_image_size(image_size, block_args.stride) # stride = 1
# Head
in_channels = block_args.output_filters # output of final block
out_channels = round_filters(1280, self._global_params)
Conv2d = get_same_padding_conv2d(image_size=image_size)
self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# Final linear layer
self._avg_pooling = nn.AdaptiveAvgPool2d(1)
if self._global_params.include_top:
self._dropout = nn.Dropout(self._global_params.dropout_rate)
self._fc = nn.Linear(out_channels, self._global_params.num_classes)
# set activation to memory efficient swish by default
self._swish = MemoryEfficientSwish()
self.thop_custom_ops = {
Conv2dDynamicSamePadding: count_convNd,
Conv2dStaticSamePadding: count_convNd,
MemoryEfficientSwish: zero_ops,
}
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export).
Args:
memory_efficient (bool): Whether to use memory-efficient version of swish.
"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
for block in self._blocks:
block.set_swish(memory_efficient)
def extract_endpoints(self, inputs):
"""Use convolution layer to extract features
from reduction levels i in [1, 2, 3, 4, 5].
Args:
inputs (tensor): Input tensor.
Returns:
Dictionary of last intermediate features
with reduction levels i in [1, 2, 3, 4, 5].
Example:
>>> import torch
>>> from efficientnet.model import EfficientNet
>>> inputs = torch.rand(1, 3, 224, 224)
>>> model = EfficientNet.from_pretrained('efficientnet-b0')
>>> endpoints = model.extract_endpoints(inputs)
>>> print(endpoints['reduction_1'].shape) # torch.Size([1, 16, 112, 112])
>>> print(endpoints['reduction_2'].shape) # torch.Size([1, 24, 56, 56])
>>> print(endpoints['reduction_3'].shape) # torch.Size([1, 40, 28, 28])
>>> print(endpoints['reduction_4'].shape) # torch.Size([1, 112, 14, 14])
>>> print(endpoints['reduction_5'].shape) # torch.Size([1, 320, 7, 7])
>>> print(endpoints['reduction_6'].shape) # torch.Size([1, 1280, 7, 7])
"""
endpoints = dict()
# Stem
x = self._swish(self._bn0(self._conv_stem(inputs)))
prev_x = x
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate
x = block(x, drop_connect_rate=drop_connect_rate)
if prev_x.size(2) > x.size(2):
endpoints['reduction_{}'.format(len(endpoints) + 1)] = prev_x
elif idx == len(self._blocks) - 1:
endpoints['reduction_{}'.format(len(endpoints) + 1)] = x
prev_x = x
# Head
x = self._swish(self._bn1(self._conv_head(x)))
endpoints['reduction_{}'.format(len(endpoints) + 1)] = x
return endpoints
def extract_features(self, inputs):
"""use convolution layer to extract feature .
Args:
inputs (tensor): Input tensor.
Returns:
Output of the final convolution
layer in the efficientnet model.
"""
# Stem
x = self._swish(self._bn0(self._conv_stem(inputs)))
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate
x = block(x, drop_connect_rate=drop_connect_rate)
# Head
x = self._swish(self._bn1(self._conv_head(x)))
return x
def forward(self, inputs, film_params=None):
"""EfficientNet's forward function.
Calls extract_features to extract features, applies final linear layer, and returns logits.
Args:
inputs (tensor): Input tensor.
Returns:
Output of this model after processing.
"""
x = self._flatten(inputs)
# Convolution layers
x = self.extract_features(x)
# Pooling and final linear layer
x = self._avg_pooling(x)
x = x.view(x.size(0), -1)
return x
@property
def output_size(self):
return 1280
def _change_in_channels(self, in_channels):
"""Adjust model's first convolution layer to in_channels, if in_channels not equals 3.
Args:
in_channels (int): Input data's channel number.
"""
if in_channels != 3:
Conv2d = get_same_padding_conv2d(image_size=self._global_params.image_size)
out_channels = round_filters(32, self._global_params)
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
def _flatten(self, x):
sz = x.size()
return x.view(-1, sz[-3], sz[-2], sz[-1]) if x.dim() >=5 else x
class FilmEfficientNet(EfficientNet):
def __init__(self, blocks_args=None, global_params=None, block_fn=FilmMBConvBlock):
EfficientNet.__init__(self, blocks_args, global_params, block_fn=block_fn)
def _get_adaptation_layer(self, generatable=False):
if generatable:
return FilmLayerGenerator
else:
return FilmLayer
def _get_adaptation_config(self):
num_maps_per_layer, num_blocks_per_layer = [], []
num_maps_per_layer.append([self._conv_stem.out_channels])
num_blocks_per_layer.append(1)
for block in self._blocks:
num_maps_per_layer.append([block._depthwise_conv.out_channels])
num_blocks_per_layer.append(1)
num_blocks_per_layer.append(1)
num_maps_per_layer.append([self._conv_head.out_channels])
param_dict = {
'num_maps_per_layer' : num_maps_per_layer,
'num_blocks_per_layer' : num_blocks_per_layer
}
return param_dict
def extract_features(self, inputs, param_dict):
"""use convolution layer to extract feature .
Args:
inputs (tensor): Input tensor.
Returns:
Output of the final convolution
layer in the efficientnet model.
"""
# Stem
layer_idx = 0
x = self._bn0(self._conv_stem(inputs))
x = film(x, param_dict[layer_idx][0]['gamma'], param_dict[layer_idx][0]['beta'])
layer_idx += 1
x = self._swish(x)
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate
x = block(x, drop_connect_rate=drop_connect_rate, gamma=param_dict[layer_idx][0]['gamma'],
beta=param_dict[layer_idx][0]['beta'])
layer_idx += 1
# Head
x = self._bn1(self._conv_head(x))
x = film(x, param_dict[layer_idx][0]['gamma'], param_dict[layer_idx][0]['beta'])
x = self._swish(x)
return x
def forward(self, inputs, film_params=None):
"""EfficientNet's forward function.
Calls extract_features to extract features, applies final linear layer, and returns logits.
Args:
inputs (tensor): Input tensor.
Returns:
Output of this model after processing.
"""
x = self._flatten(inputs)
# Convolution layers
x = self.extract_features(x, film_params)
# Pooling and final linear layer
x = self._avg_pooling(x)
x = x.view(x.size(0), -1)
return x
def efficientnetb0(pretrained=False, pretrained_model_path=None, batch_norm='basic', with_film=False, **override_params):
"""
Constructs an EfficientNet-b0 model.
"""
assert batch_norm == 'basic', 'TaskNorm not implemented for EfficientNets'
model_type = 'efficientnet-b0'
blocks_args, global_params = get_model_params(model_type, override_params)
if with_film:
model = FilmEfficientNet(blocks_args, global_params)
else:
model = EfficientNet(blocks_args, global_params)
model._change_in_channels(in_channels=3)
if pretrained:
load_pretrained_weights(model, model_type, weights_path=pretrained_model_path, load_fc=False)
model._change_in_channels(in_channels=3)
return model
| 41.381743
| 122
| 0.646094
|
ac1061716324c8c57307790fab625e9b49e64d01
| 4,810
|
py
|
Python
|
vkikriging/kriging_v3.py
|
rdwight/vkikriging
|
91c9d274916d77a6729bf0fe426f980cc16c2c48
|
[
"MIT"
] | 3
|
2021-05-19T13:12:27.000Z
|
2022-03-19T03:18:37.000Z
|
vkikriging/kriging_v3.py
|
rdwight/vkikriging
|
91c9d274916d77a6729bf0fe426f980cc16c2c48
|
[
"MIT"
] | null | null | null |
vkikriging/kriging_v3.py
|
rdwight/vkikriging
|
91c9d274916d77a6729bf0fe426f980cc16c2c48
|
[
"MIT"
] | 1
|
2019-03-21T18:30:52.000Z
|
2019-03-21T18:30:52.000Z
|
"""
Universal Kriging - version 3 (`kriging_v3`)
============================================
Universal Kriging in d-dimensions. This differs from `kriging_v1` and `kriging_v2`
which implement only simple Kriging.
"""
import numpy as np
from .mylib import Timing
from .covariance import covariance_squaredexponential, covariance_squaredexponential_dxi, covariance_squaredexponential_dxidxi
def F_linear(xi):
"""
Basis functions for parameterization of non-stationary mean. This version of the
function implements a linear basis.
Args:
xi (ndarray): Coordinates of points in parameter space, shape `(n, d)`
Return:
out (ndarray): Matrix F shape `(n, M)`, where `M` is the number of basis functions.
"""
n, d = xi.shape
return np.hstack((np.ones((n, 1)), xi))
def dF_linear(xi):
"""
Derivatives of basis functions defined in F_linear(). (Would be) needed for
non-stationary mean with GEK.
Args:
xi (ndarray): Coordinates of points in parameter space, shape `(n, d)`
Return:
out (ndarray): Tensor of derivatives, shape `(n, M, d)`.
"""
n, d = xi.shape
M = d + 1 # Must be equal to M = F_linear(xi).shape[1]
out = np.zeros((n, M, d))
for i in range(n):
out[i, 1:, :] = np.identity(d)
return out
def kriging(xi, x, observed, sigma_y, F_mean, sd_x, gamma):
"""
Function kriging_v1.kriging() modified for universal Kriging (spatially variable
mean based on general regression). This is achived by introducing a function-basis
F (e.g. `F_linear()`) for representing the *variable* mean, and new unknown vector
\lambda. The mean is then \lambda . F, and the unknown vector x is augmented:
x_a = [x, \lambda],
given which the new observation operator is:
H_a = [H, F].
The prior mean (of the Gaussian process) is now always zero, instead of specifying
the mean `mu_x`, the function-basis must be specified in the argument `F_mean`.
Args:
xi (ndarray): Sample locations (both observations and predictions), shape `(n,d)`
x (ndarray): Sample values (values not at observation locations are not used).
Shape `n`.
observed (ndarray): Bool array specifying which values are observed. Shape `n`,
`True` - observed, `False` - not observed.
sigma_y (float): Standard-deviation of observation error. Scalar.
F_mean (function): A function in the template of F_linear(), providing a basis for
the description of the non-stationary mean (in d-dimensions).
sd_x (float): (Sample) standard-deviation of the approximated function,
used in the prior. Scalars.
gamma (float): Correlation coefficient in all directions. Scalar.
Return:
out (dict): Dictionary of prior and posterior statistics.
"""
### Determine problem dimensions from input.
n, d = xi.shape #
H = np.identity(n)[observed] # Observation operator
y = np.dot(H, x) # Observations
m = y.size # Number of observations
F = F_mean(xi) # Basis for non-stationary mean
Fy = F[observed] # Restricted to observation locations
M = F.shape[1] # Size of basis
Ha = np.hstack((H, Fy)) # Augmented observation operator
### Observation error covar matrix
R = np.diag(np.ones(m) * max(sigma_y, 1.e-4) ** 2)
### Prior mean and covariance at the sample locations. Augmented
### with priors of coefficients (TODO: atm normal dist with large
### std, should be non-informative).
t = Timing()
mua_prior = np.zeros(n + M)
Pa = np.zeros((n + M, n + M))
Pa[:n, :n] = sd_x ** 2 * covariance_squaredexponential(xi, xi, gamma)
Pa[n:, n:] = 1.e6 * np.identity(M) # Prior on mean coefficients
t.monitor('Build prior covariance')
### The gain matrix.
Aa = R + np.dot(Ha, np.dot(Pa, Ha.T))
Ka = np.dot(Pa, np.dot(Ha.T, np.linalg.inv(Aa)))
t.monitor('Invert K')
### Posterior mean and covariance (prediction):
# E(x|y) ("predictor")
muahat = mua_prior + np.dot(Ka, y - np.dot(Ha, mua_prior))
muhat = np.dot(F, muahat[n:]) + muahat[:n]
t.monitor('Evaluate posterior mean')
# Cov(x|y) ("mean-squared error estimator")
covahat = np.dot(np.identity(n + M) - np.dot(Ka, Ha), Pa)
covPhat, covFhat = covahat[:n, :n], covahat[n:, n:]
# covhat = np.dot(F, np.dot(covFhat, F.T)) + covPhat
covhat = covPhat
t.monitor('Evaluate posterior covariance')
### Return all this statistical information.
return {
'mua_prior': mua_prior,
'cov_prior': Pa, # Prior (augmented)
'muahat': muahat,
'covahat': covahat, # Posterior (augmented)
'muhat': muhat,
'Sigmahat': covhat, # Posterior
}
| 37.286822
| 126
| 0.632432
|
72b344200279c920408a69f965b23a9a13f68004
| 772
|
py
|
Python
|
test_StraetoGT.py
|
StFS/StraetoGT
|
8e3c88ca9baccd02e03d15ca7095621dddabcbd1
|
[
"MIT"
] | 1
|
2015-05-04T03:47:38.000Z
|
2015-05-04T03:47:38.000Z
|
test_StraetoGT.py
|
StFS/StraetoGT
|
8e3c88ca9baccd02e03d15ca7095621dddabcbd1
|
[
"MIT"
] | null | null | null |
test_StraetoGT.py
|
StFS/StraetoGT
|
8e3c88ca9baccd02e03d15ca7095621dddabcbd1
|
[
"MIT"
] | null | null | null |
import appengine_config
import os
import unittest
import threading
import requests
import xml.etree.ElementTree as ET
import SimpleHTTPServer
import SocketServer
import StraetoGT
class StraetoGT_TestCase(unittest.TestCase):
def setUp(self):
handler = SimpleHTTPServer.SimpleHTTPRequestHandler
self.httpd = SocketServer.TCPServer(("", 0), handler)
self.address = "http://localhost:{0}/test_data/".format(self.httpd.server_address[1])
threading.Thread(target=self.httpd.serve_forever).start()
def tearDown(self):
self.httpd.shutdown()
def testAll(self):
worker = StraetoGT.Worker(self.address)
worker.generate_stops()
worker.generate_routes()
if __name__ == '__main__':
unittest.main()
| 24.125
| 93
| 0.717617
|
0aa14dee7c42090d089d0e3561942cb2829f2c39
| 108
|
py
|
Python
|
multiProcess/ThreadTest2.py
|
BenoitYU/inventory-hunter
|
60901503009490f86f9bf9a9e7bb0c3b73fdf12d
|
[
"MIT"
] | null | null | null |
multiProcess/ThreadTest2.py
|
BenoitYU/inventory-hunter
|
60901503009490f86f9bf9a9e7bb0c3b73fdf12d
|
[
"MIT"
] | null | null | null |
multiProcess/ThreadTest2.py
|
BenoitYU/inventory-hunter
|
60901503009490f86f9bf9a9e7bb0c3b73fdf12d
|
[
"MIT"
] | null | null | null |
import time
for i in range(5):
time.sleep(2)
print(f'this is in thread 2, the cycle number is {i}')
| 21.6
| 58
| 0.648148
|
a0ce3fdda4a97442e2c8a69f85387096ea29f929
| 1,290
|
py
|
Python
|
aiida/backends/sqlalchemy/migrations/versions/3d6190594e19_remove_dbcomputer_enabled.py
|
azadoks/aiida-core
|
b806b7fef8fc79090deccfe2019b77cb922e0581
|
[
"MIT",
"BSD-3-Clause"
] | 180
|
2019-07-12T07:45:26.000Z
|
2022-03-22T13:16:57.000Z
|
aiida/backends/sqlalchemy/migrations/versions/3d6190594e19_remove_dbcomputer_enabled.py
|
azadoks/aiida-core
|
b806b7fef8fc79090deccfe2019b77cb922e0581
|
[
"MIT",
"BSD-3-Clause"
] | 2,325
|
2019-07-04T13:41:44.000Z
|
2022-03-31T12:17:10.000Z
|
aiida/backends/sqlalchemy/migrations/versions/3d6190594e19_remove_dbcomputer_enabled.py
|
azadoks/aiida-core
|
b806b7fef8fc79090deccfe2019b77cb922e0581
|
[
"MIT",
"BSD-3-Clause"
] | 88
|
2019-07-06T01:42:39.000Z
|
2022-03-18T14:20:09.000Z
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name
"""Remove `DbComputer.enabled`
Revision ID: 3d6190594e19
Revises: 5a49629f0d45
Create Date: 2019-04-03 14:38:50.585639
"""
# Remove when https://github.com/PyCQA/pylint/issues/1931 is fixed
# pylint: disable=no-member,no-name-in-module,import-error
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3d6190594e19'
down_revision = '5a49629f0d45'
branch_labels = None
depends_on = None
def upgrade():
op.drop_column('db_dbcomputer', 'enabled')
def downgrade():
op.add_column('db_dbcomputer', sa.Column('enabled', sa.BOOLEAN(), autoincrement=False, nullable=True))
| 34.864865
| 106
| 0.566667
|
135a8cf6d5b919e7e070836aca5b0befc8370d1a
| 22,071
|
py
|
Python
|
django/utils/translation/trans_real.py
|
mitar/django
|
aa757ac22de3e657df49086cf01a26f6c73b8dfb
|
[
"BSD-3-Clause"
] | 1
|
2016-05-09T02:41:07.000Z
|
2016-05-09T02:41:07.000Z
|
django/utils/translation/trans_real.py
|
akaihola/django
|
169b1a404c8118bb75840523d5fb3543de9c8889
|
[
"BSD-3-Clause"
] | null | null | null |
django/utils/translation/trans_real.py
|
akaihola/django
|
169b1a404c8118bb75840523d5fb3543de9c8889
|
[
"BSD-3-Clause"
] | null | null | null |
"""Translation helper functions."""
import locale
import os
import re
import sys
import gettext as gettext_module
from io import BytesIO
from threading import local
from django.utils.importlib import import_module
from django.utils.safestring import mark_safe, SafeData
# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = u"\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9.
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z]{1,8})*|\*) # "en", "en-au", "x-y-z", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_prefix_re = re.compile(r'^/([\w-]+)(/|$)')
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower()+'_'+language[p+1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p+1:]) > 2:
return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower()
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset.
"""
def __init__(self, *args, **kw):
gettext_module.GNUTranslations.__init__(self, *args, **kw)
self.set_output_charset('utf-8')
self.django_output_charset = 'utf-8'
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
self.__to_language = to_language(language)
def language(self):
return self.__language
def to_language(self):
return self.__to_language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
def _fetch(lang, fallback=None):
global _translations
res = _translations.get(lang, None)
if res is not None:
return res
loc = to_locale(lang)
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], DjangoTranslation)
t.set_language(lang)
return t
except IOError:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in _translations]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for appname in reversed(settings.INSTALLED_APPS):
app = import_module(appname)
apppath = os.path.join(os.path.dirname(app.__file__), 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
for localepath in reversed(settings.LOCALE_PATHS):
if os.path.isdir(localepath):
res = _merge(localepath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
_active.value = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from django.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
from django.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
eol_message = message.replace('\r\n', '\n').replace('\r', '\n')
t = getattr(_active, "value", None)
if t is not None:
result = getattr(t, translation_function)(eol_message)
else:
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
result = getattr(_default, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
return do_translate(message, 'gettext')
def ugettext(message):
return do_translate(message, 'ugettext')
def pgettext(context, message):
result = do_translate(
u"%s%s%s" % (context, CONTEXT_SEPARATOR, message), 'ugettext')
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
return result
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a UTF-8 bytestring of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def npgettext(context, singular, plural, number):
result = do_ntranslate(u"%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
u"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number, 'ungettext')
if CONTEXT_SEPARATOR in result:
# Translation not found
result = do_ntranslate(singular, plural, number, 'ungettext')
return result
def all_locale_paths():
"""
Returns a list of paths to user-provides languages files.
"""
from django.conf import settings
globalpath = os.path.join(
os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
return [globalpath] + list(settings.LOCALE_PATHS)
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available. This is only used for language codes from either the cookies
or session and during format localization.
"""
for path in all_locale_paths():
if gettext_module.find('django', path, [to_locale(lang_code)]) is not None:
return True
return False
def get_language_from_path(path, supported=None):
"""
Returns the language-code if there is a valid language-code
found in the `path`.
"""
if supported is None:
from django.conf import settings
supported = dict(settings.LANGUAGES)
regex_match = language_code_prefix_re.match(path)
if regex_match:
lang_code = regex_match.group(1)
if lang_code in supported and check_for_language(lang_code):
return lang_code
def get_language_from_request(request, check_path=False):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
global _accepted
from django.conf import settings
supported = dict(settings.LANGUAGES)
if check_path:
lang_code = get_language_from_path(request.path_info, supported)
if lang_code is not None:
return lang_code
if hasattr(request, 'session'):
lang_code = request.session.get('django_language', None)
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if lang_code and lang_code not in supported:
lang_code = lang_code.split('-')[0] # e.g. if fr-ca is not supported fallback to fr
if lang_code and lang_code in supported and check_for_language(lang_code):
return lang_code
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
# We have a very restricted form for our language files (no encoding
# specifier, since they all must be UTF-8 and only one possible
# language each time. So we avoid the overhead of gettext.find() and
# work out the MO file manually.
# 'normalized' is the root name of the locale in POSIX format (which is
# the format used for the directories holding the MO files).
normalized = locale.locale_alias.get(to_locale(accept_lang, True))
if not normalized:
continue
# Remove the default encoding from locale_alias.
normalized = normalized.split('.')[0]
if normalized in _accepted:
# We've seen this locale before and have an MO file for it, so no
# need to check again.
return _accepted[normalized]
for lang, dirname in ((accept_lang, normalized),
(accept_lang.split('-')[0], normalized.split('_')[0])):
if lang.lower() not in supported:
continue
for path in all_locale_paths():
if os.path.exists(os.path.join(path, dirname, 'LC_MESSAGES', 'django.mo')):
_accepted[normalized] = lang
return lang
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
inline_re = re.compile(r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))(\s+.*context\s+(?:"[^"]*?")|(?:'[^']*?'))?\s*""")
block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+(?:"[^"]*?")|(?:'[^']*?'))?(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
one_percent_re = re.compile(r"""(?<!%)%(?!%)""")
def templatize(src, origin=None):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK,
TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK)
out = BytesIO()
message_context = None
intrans = False
inplural = False
singular = []
plural = []
incomment = False
comment = []
for t in Lexer(src, origin).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
content = b''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if translators_comment_start is not None and lineno >= translators_comment_start:
out.write(b' # %s' % line)
else:
out.write(b' #\n')
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
if message_context:
out.write(b' npgettext(%r, %r, %r,count) ' % (message_context, ''.join(singular), ''.join(plural)))
else:
out.write(b' ngettext(%r, %r, count) ' % (''.join(singular), ''.join(plural)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
if message_context:
out.write(b' pgettext(%r, %r) ' % (message_context, ''.join(singular)))
else:
out.write(b' gettext(%r) ' % ''.join(singular))
for part in singular:
out.write(blankout(part, 'S'))
message_context = None
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno))
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = one_percent_re.sub('%%', t.contents)
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"':
g = g.strip('"')
elif g[0] == "'":
g = g.strip("'")
g = one_percent_re.sub('%%', g)
if imatch.group(2):
# A context is provided
context_match = context_re.match(imatch.group(2))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
out.write(b' pgettext(%r, %r) ' % (message_context, g))
message_context = None
else:
out.write(b' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
if bmatch.group(1):
# A context is provided
context_match = context_re.match(bmatch.group(1))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
intrans = True
inplural = False
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(b' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(b' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(b' %s ' % p.split(':',1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
out.write(b' # %s' % t.contents)
else:
out.write(blankout(t.contents, 'X'))
return out.getvalue()
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string)
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return []
priority = priority and float(priority) or 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return result
| 37.345178
| 143
| 0.586063
|
499382cde17c6c4b24b30dfa0346f9b8935a22e8
| 6,097
|
py
|
Python
|
pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py
|
apcurrier/kic-reference-architectures
|
50be4282fde08e3142de1aa698e566c4436a5104
|
[
"Apache-2.0"
] | null | null | null |
pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py
|
apcurrier/kic-reference-architectures
|
50be4282fde08e3142de1aa698e566c4436a5104
|
[
"Apache-2.0"
] | null | null | null |
pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py
|
apcurrier/kic-reference-architectures
|
50be4282fde08e3142de1aa698e566c4436a5104
|
[
"Apache-2.0"
] | 1
|
2022-03-24T23:50:45.000Z
|
2022-03-24T23:50:45.000Z
|
import os
import pulumi
from pulumi import Output
import pulumi_kubernetes as k8s
from pulumi_kubernetes.core.v1 import Service
from pulumi_kubernetes.helm.v3 import Release, ReleaseArgs, RepositoryOptsArgs
from pulumi_kubernetes.yaml import ConfigFile
from kic_util import pulumi_config
#
# We default to the OSS IC; if the user wants Plus they need to enable it in the config file
# along with the Plus flag, and the addition of a JWT.
#
config = pulumi.Config('kic-helm')
chart_name = config.get('chart_name')
if not chart_name:
chart_name = 'nginx-ingress'
chart_version = config.get('chart_version')
if not chart_version:
chart_version = '0.12.0'
helm_repo_name = config.get('helm_repo_name')
if not helm_repo_name:
helm_repo_name = 'nginx-stable'
helm_repo_url = config.get('helm_repo_url')
if not helm_repo_url:
helm_repo_url = 'https://helm.nginx.com/stable'
nginx_repository = config.get('nginx_repository')
if not nginx_repository:
nginx_repository = "nginx/nginx-ingress"
nginx_tag = config.get('nginx_tag')
if not nginx_tag:
nginx_tag = "2.1.0"
nginx_plus_flag = config.get_bool('nginx_plus_flag')
if not nginx_plus_flag:
nginx_plus_flag = False
#
# Allow the user to set timeout per helm chart; otherwise
# we default to 5 minutes.
#
helm_timeout = config.get_int('helm_timeout')
if not helm_timeout:
helm_timeout = 300
# Get the FQDN
fqdn = config.get('fqdn')
def project_name_from_project_dir(dirname: str):
script_dir = os.path.dirname(os.path.abspath(__file__))
project_path = os.path.join(script_dir, '..', '..', '..', 'infrastructure', dirname)
return pulumi_config.get_pulumi_project_name(project_path)
def k8_manifest_location():
script_dir = os.path.dirname(os.path.abspath(__file__))
k8_manifest_path = os.path.join(script_dir, 'manifests', 'regcred.yaml')
return k8_manifest_path
k8_manifest = k8_manifest_location()
registrycred = ConfigFile(
"regcred",
file=k8_manifest)
chart_values = {
'controller': {
'nginxplus': nginx_plus_flag,
'healthStatus': True,
'appprotect': {
'enable': False
},
"image": {
"repository": nginx_repository,
"tag": nginx_tag,
"pullPolicy": "Always"
},
"serviceAccount": {
"imagePullSecretName": "regcred"
},
'config': {
'name': 'nginx-config',
'entries': {
'log-format': '$remote_addr - $remote_user [$time_local] \"$request\" $status $body_bytes_sent '
'\"$http_referer\" \"$http_user_agent\" $upstream_response_time $upstream_status '
'\"$uri\" $request_length $request_time [$proxy_host] [] $upstream_addr '
'$upstream_bytes_sent $upstream_response_time $upstream_status $request_id '
}
},
'service': {
'annotations': {
'co.elastic.logs/module': 'nginx'
},
"extraLabels": {
"app": "kic-nginx-ingress"
},
"customPorts": [
{
"name": "dashboard",
"targetPort": 8080,
"protocol": "TCP",
"port": 8080
},
{
"name": "prometheus",
"targetPort": 9113,
"protocol": "TCP",
"port": 9113
}
]
},
'pod': {
'annotations': {
'co.elastic.logs/module': 'nginx'
}
}
},
'prometheus': {
'create': True,
'port': 9113
}
}
stack_name = pulumi.get_stack()
project_name = pulumi.get_project()
pulumi_user = pulumi_config.get_pulumi_user()
kube_project_name = project_name_from_project_dir('kubeconfig')
kube_stack_ref_id = f"{pulumi_user}/{kube_project_name}/{stack_name}"
kube_stack_ref = pulumi.StackReference(kube_stack_ref_id)
kubeconfig = kube_stack_ref.require_output('kubeconfig').apply(lambda c: str(c))
k8s_provider = k8s.Provider(resource_name=f'ingress-controller-repo-only',
kubeconfig=kubeconfig)
# This is required for the service monitor from the Prometheus namespace
ns = k8s.core.v1.Namespace(resource_name='nginx-ingress',
metadata={'name': 'nginx-ingress',
'labels': {
'prometheus': 'scrape'}
},
opts=pulumi.ResourceOptions(provider=k8s_provider))
kic_release_args = ReleaseArgs(
chart=chart_name,
repository_opts=RepositoryOptsArgs(
repo=helm_repo_url
),
version=chart_version,
namespace=ns.metadata.name,
# Values from Chart's parameters specified hierarchically,
values=chart_values,
# User configurable timeout
timeout=helm_timeout,
# By default Release resource will wait till all created resources
# are available. Set this to true to skip waiting on resources being
# available.
skip_await=False,
# If we fail, clean up
cleanup_on_fail=True,
# Provide a name for our release
name="kic",
# Lint the chart before installing
lint=True,
# Force update if required
force_update=True)
kic_chart = Release("kic", args=kic_release_args, opts=pulumi.ResourceOptions(depends_on=[ns]))
pstatus = kic_chart.status
srv = Service.get("nginx-ingress",
Output.concat("nginx-ingress", "/", pstatus.name, "-nginx-ingress"))
ingress_service = srv.status
#
# Some LB's give us a hostname (which is cool) and some just an IP. We need to capture
# both, and then make a determination on what the user needs to do based on what they have
# been given.
#
pulumi.export('lb_ingress_hostname', fqdn)
pulumi.export('lb_ingress_ip', pulumi.Output.unsecret(ingress_service.load_balancer.ingress[0].ip))
# Print out our status
pulumi.export("kic_status", pstatus)
| 32.089474
| 112
| 0.62539
|
00147ef2bc089e5ef551a4fd09bc71d82de01863
| 809
|
py
|
Python
|
tests/models/test_hooks.py
|
mistasse/pytorch-lightning
|
3a642601e84c3abf1f1b438f9acc932a1f150f7f
|
[
"Apache-2.0"
] | 1
|
2021-07-29T18:44:58.000Z
|
2021-07-29T18:44:58.000Z
|
tests/models/test_hooks.py
|
mistasse/pytorch-lightning
|
3a642601e84c3abf1f1b438f9acc932a1f150f7f
|
[
"Apache-2.0"
] | null | null | null |
tests/models/test_hooks.py
|
mistasse/pytorch-lightning
|
3a642601e84c3abf1f1b438f9acc932a1f150f7f
|
[
"Apache-2.0"
] | null | null | null |
import pytest
import tests.base.utils as tutils
from pytorch_lightning import Trainer
from tests.base import EvalModelTemplate
@pytest.mark.parametrize('max_steps', [1, 2, 3])
def test_on_before_zero_grad_called(max_steps):
class CurrentTestModel(EvalModelTemplate):
on_before_zero_grad_called = 0
def on_before_zero_grad(self, optimizer):
self.on_before_zero_grad_called += 1
model = CurrentTestModel(tutils.get_default_hparams())
trainer = Trainer(
max_steps=max_steps,
num_sanity_val_steps=5,
)
assert 0 == model.on_before_zero_grad_called
trainer.fit(model)
assert max_steps == model.on_before_zero_grad_called
model.on_before_zero_grad_called = 0
trainer.test(model)
assert 0 == model.on_before_zero_grad_called
| 26.966667
| 58
| 0.742892
|
252deee0ba37333758cf792313f7e20e8c4d8bfa
| 5,859
|
py
|
Python
|
bin2c.py
|
Kepler-Br/bin2c
|
b1c8903cf482309b1868c322547d4338730c4f50
|
[
"MIT"
] | null | null | null |
bin2c.py
|
Kepler-Br/bin2c
|
b1c8903cf482309b1868c322547d4338730c4f50
|
[
"MIT"
] | null | null | null |
bin2c.py
|
Kepler-Br/bin2c
|
b1c8903cf482309b1868c322547d4338730c4f50
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import binascii
import sys
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='Convert binary file to C-style array initializer.')
parser.add_argument('-i', '--input-file', help='the file to be converted. If not specified, stdin is used.')
parser.add_argument('-o', '--output', help='write output to a file')
parser.add_argument('-m', '--max-symbols', type=int, default=80, help='max symbols in the line, defaults to 80')
parser.add_argument('-L', '--linebreak-string', default='\n', help='use what to break link, defaults to "\\n"')
parser.add_argument('-S', '--separator-string', default=', ',
help='use what to separate elements, defaults to ", "')
parser.add_argument('-H', '--element-prefix', default='0x',
help='string to be added to the head of element, defaults to "0x"')
parser.add_argument('-T', '--element-suffix', default='',
help='string to be added to the tail of element, defaults to none')
parser.add_argument('-U', '--force-uppercase', action='store_true', help='force uppercase HEX representation')
parser.add_argument('-n', '--newline', action='store_true', help='add a newline on file end')
parser.add_argument('-c', '--write-comments', action='store_true', help='write text representation of the data')
parser.add_argument('-C', '--comment-string', default='// ',
help='what to use as begin of comment block, defaults to "// "')
parser.add_argument('-s', '--size', action='store_true',
help='print array length in the end. May lie if you close stdout on the other end')
return parser.parse_args()
def to_printable_string(content: bytes) -> str:
result = [str()] * len(content)
for i in range(len(content)):
character = chr(content[i])
character = character if character.isprintable() else '.'
result[i] = character
return ''.join(result)
def to_hex_string(content: bytes, prefix: str, suffix: str, uppercase: bool) -> list[str]:
result = [str()] * len(content)
hexified_content = binascii.hexlify(content).decode('UTF-8')
if uppercase:
hexified_content = hexified_content.upper()
for i in range(0, len(hexified_content), 2):
hex_string = hexified_content[i: i + 2]
result[i // 2] = f'{prefix}{hex_string}{suffix}'
return result
def calculate_element_length(prefix: str, suffix: str, separator: str) -> int:
meaningful_part_len = len('00')
return len(prefix) + meaningful_part_len + len(suffix) + len(separator)
def replace_char_by_index(string: str, target: str, index: int) -> str:
if index < 0 or index >= len(string):
return string
if len(string) == 1:
return target
if len(string) - 1 == index:
return string[:index] + target
return string[:index] + target + string[index + 1:]
def calculate_elements_per_line(element_length: int, comment_block_length: int, max_line_length: int,
write_comments: bool) -> int:
comment_min_length = comment_block_length + 1
if max_line_length <= element_length:
return 0
if write_comments and max_line_length <= (element_length + comment_min_length):
return 0
count = 0
symbols_total = 0
if write_comments:
symbols_total = comment_block_length
while (symbols_total + element_length) < max_line_length:
symbols_total += element_length + 1
count += 1
return count
def main():
args = parse_args()
input_file = sys.stdin.buffer
output_file = sys.stdout
try:
if args.input_file:
input_file = open(args.input_file, 'rb')
except OSError as e:
print(f'Cannot open file \'{args.input_file}\': {e.strerror}', file=sys.stderr)
exit(-1)
try:
if args.output:
output_file = open(args.output, 'w')
except OSError as e:
print(f'Cannot open file \'{args.output}\': {e.strerror}', file=sys.stderr)
exit(-1)
element_length = calculate_element_length(args.element_prefix, args.element_suffix, args.separator_string)
max_elements_per_line = calculate_elements_per_line(element_length, len(args.comment_string), args.max_symbols,
args.write_comments)
chunk_size = max_elements_per_line
elements_wrote = 0
try:
for chunk in iter(lambda: input_file.read(chunk_size), b''):
hex_strings = to_hex_string(chunk, args.element_prefix, args.element_suffix, args.force_uppercase)
hex_string = args.separator_string.join(hex_strings)
print(f'{hex_string}{args.separator_string}', file=output_file, end='')
elements_wrote += len(hex_strings)
if args.write_comments:
printable_strings = to_printable_string(chunk)
if printable_strings.endswith('\\'):
printable_strings = replace_char_by_index(printable_strings, '.', len(printable_strings) - 1)
padding = ''
if len(hex_strings) < max_elements_per_line:
padding_count = (max_elements_per_line - len(hex_strings)) * element_length
padding = ' ' * padding_count
print(f'{padding}{args.comment_string}{printable_strings}', file=output_file, end='')
print(file=output_file, end=args.linebreak_string)
output_file.flush()
except BrokenPipeError:
pass
if args.size:
print(f'\nElements wrote: {elements_wrote}', file=sys.stderr)
if output_file != sys.stdout:
output_file.close()
if input_file != sys.stdin:
input_file.close()
if __name__ == '__main__':
main()
| 43.4
| 116
| 0.638846
|
9ecb42fa2b5fa20cc1c3a7c3e3672ab38a8091c4
| 4,660
|
py
|
Python
|
nginx_router/backend/synth_project/settings.py
|
BennettDixon/sleep_tracker_full_stack
|
83b46635762db857f0c291c62c4415a1a550ec3d
|
[
"MIT"
] | null | null | null |
nginx_router/backend/synth_project/settings.py
|
BennettDixon/sleep_tracker_full_stack
|
83b46635762db857f0c291c62c4415a1a550ec3d
|
[
"MIT"
] | null | null | null |
nginx_router/backend/synth_project/settings.py
|
BennettDixon/sleep_tracker_full_stack
|
83b46635762db857f0c291c62c4415a1a550ec3d
|
[
"MIT"
] | null | null | null |
"""
Django settings for synth_proj project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# REDIS
REDIS_URL = "redis://{host}:{port}/1".format(
host=os.getenv('REDIS_HOST', 'redis'),
port=os.getenv('REDIS_PORT', '6379')
)
# CELERY STUFF
CELERY_BROKER_URL = REDIS_URL
CELERY_RESULT_BACKEND = REDIS_URL
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=k!m7m1hj6jgw!xsihuv4s(9yy0c4%h#f+te!@0-08$x80@i8^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'0.0.0.0',
'localhost',
# backend is the service definition for this in docker-compose
'backend',
'frontend',
# minikube cluster ip
'192.168.99.100'
]
CORS_ORIGIN_WHITELIST = [
'http://localhost',
'https://localhost',
'http://127.0.0.1',
'https://127.0.0.1',
]
CORS_ALLOW_CREDENTIALS = True
# strict slashing for more info visit:
# https://docs.djangoproject.com/en/dev/ref/settings/#append-slash
APPEND_SLASH = True
# Application definition
INSTALLED_APPS = [
'graphene_django',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# custom apps like our synth_app api
'synth_app',
'corsheaders'
]
GRAPHENE = {
'SCHEMA': 'synth_project.schema.schema'
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'corsheaders.middleware.CorsMiddleware'
]
AUTHENTICATION_BACKENDS = (
'social_core.backends.google.GoogleOAuth2',
)
ROOT_URLCONF = 'synth_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'synth_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.getenv('PGDATABASE', 'brandless_dev'),
'USER': os.getenv('PGUSER', 'postgres'),
'PASSWORD': os.getenv('PGPASSWORD', 'postgres_password'),
'HOST': os.getenv('PGHOST', 'postgres'),
'PORT': os.getenv('PGPORT', '5432')
}
}
# Cache
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": REDIS_URL,
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient"
},
"KEY_PREFIX": "example"
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| 25.745856
| 91
| 0.68176
|
4ee0c3a2b35e966385d9b5d4eb7c8d304d092f69
| 7,207
|
py
|
Python
|
src/game_stats.py
|
soyuka/botty
|
cc1670ea1db72c5e0ac91685897bf63c89b18896
|
[
"MIT"
] | null | null | null |
src/game_stats.py
|
soyuka/botty
|
cc1670ea1db72c5e0ac91685897bf63c89b18896
|
[
"MIT"
] | null | null | null |
src/game_stats.py
|
soyuka/botty
|
cc1670ea1db72c5e0ac91685897bf63c89b18896
|
[
"MIT"
] | null | null | null |
import numpy as np
import time
import threading
import inspect
from beautifultable import BeautifulTable
from logger import Logger
from config import Config
from messages import Messenger
from utils.misc import hms
from version import __version__
class GameStats:
def __init__(self):
self._messenger = Messenger()
self._start_time = time.time()
self._timer = None
self._timepaused = None
self._paused = False
self._game_counter = 0
self._chicken_counter = 0
self._death_counter = 0
self._merc_death_counter = 0
self._runs_failed = 0
self._run_counter = 1
self._consecutive_runs_failed = 0
self._failed_game_time = 0
self._location = None
self._location_stats = {}
self._location_stats["totals"] = { "items": 0, "deaths": 0, "chickens": 0, "merc_deaths": 0, "failed_runs": 0 }
self._stats_filename = f'stats_{time.strftime("%Y%m%d_%H%M%S")}.log'
self._nopickup_active = False
def update_location(self, loc: str):
if self._location != loc:
self._location = str(loc)
self.populate_location_stat()
def populate_location_stat(self):
if self._location not in self._location_stats:
self._location_stats[self._location] = { "items": [], "deaths": 0, "chickens": 0, "merc_deaths": 0, "failed_runs": 0 }
def log_item_keep(self, item_name: str, send_message: bool, img: np.ndarray, ocr_text: str = None):
Logger.debug(f"Stashed and logged: {item_name}")
filtered_items = ["_potion", "misc_gold"]
if self._location is not None and not any(substring in item_name for substring in filtered_items):
self._location_stats[self._location]["items"].append(item_name)
self._location_stats["totals"]["items"] += 1
if send_message:
self._messenger.send_item(item_name, img, self._location, ocr_text)
def log_death(self, img: str):
self._death_counter += 1
if self._location is not None:
self._location_stats[self._location]["deaths"] += 1
self._location_stats["totals"]["deaths"] += 1
self._messenger.send_death(self._location, img)
def log_chicken(self, img: str):
self._chicken_counter += 1
if self._location is not None:
self._location_stats[self._location]["chickens"] += 1
self._location_stats["totals"]["chickens"] += 1
self._messenger.send_chicken(self._location, img)
def log_merc_death(self):
self._merc_death_counter += 1
if self._location is not None:
self._location_stats[self._location]["merc_deaths"] += 1
self._location_stats["totals"]["merc_deaths"] += 1
def log_start_game(self):
if self._game_counter > 0:
self._save_stats_to_file()
if Config().general["discord_status_count"] and self._game_counter % Config().general["discord_status_count"] == 0:
# every discord_status_count game send a message update about current status
self._send_status_update()
self._game_counter += 1
self._timer = time.time()
Logger.info(f"Starting game #{self._game_counter}")
def log_end_game(self, failed: bool = False):
elapsed_time = 0
if self._timer is not None:
elapsed_time = time.time() - self._timer
self._timer = None
if failed:
self._runs_failed += 1
self._consecutive_runs_failed += 1
if self._location is not None:
self._location_stats[self._location]["failed_runs"] += 1
self._location_stats["totals"]["failed_runs"] += 1
self._failed_game_time += elapsed_time
Logger.warning(f"End failed game: Elapsed time: {elapsed_time:.2f}s Fails: {self._consecutive_runs_failed}")
else:
self._consecutive_runs_failed = 0
Logger.info(f"End game. Elapsed time: {elapsed_time:.2f}s")
def pause_timer(self):
if self._timer is None or self._paused:
return
self._timepaused = time.time()
self._paused = True
def resume_timer(self):
if self._timer is None or not self._paused:
return
pausetime = time.time() - self._timepaused
self._timer = self._timer + pausetime
self._paused = False
def get_current_game_length(self):
if self._timer is None:
return 0
if self._paused:
return self._timepaused - self._timer
else:
return time.time() - self._timer
def get_consecutive_runs_failed(self):
return self._consecutive_runs_failed
def _create_msg(self):
elapsed_time = time.time() - self._start_time
elapsed_time_str = hms(elapsed_time)
avg_length_str = "n/a"
good_games_count = self._game_counter - self._runs_failed
if good_games_count > 0:
good_games_time = elapsed_time - self._failed_game_time
avg_length = good_games_time / float(good_games_count)
avg_length_str = hms(avg_length)
msg = f'\nSession length: {elapsed_time_str}\nGames: {self._game_counter}\nAvg Game Length: {avg_length_str}'
table = BeautifulTable()
table.set_style(BeautifulTable.STYLE_BOX_ROUNDED)
for location in self._location_stats:
if location == "totals":
continue
stats = self._location_stats[location]
table.rows.append([location, len(stats["items"]), stats["chickens"], stats["deaths"], stats["merc_deaths"], stats["failed_runs"]])
table.rows.append([
"T" if Config().general['discord_status_condensed'] else "Total",
self._location_stats["totals"]["items"],
self._location_stats["totals"]["chickens"],
self._location_stats["totals"]["deaths"],
self._location_stats["totals"]["merc_deaths"],
self._location_stats["totals"]["failed_runs"]
])
if Config().general['discord_status_condensed']:
table.columns.header = ["Run", "I", "C", "D", "MD", "F"]
else:
table.columns.header = ["Run", "Items", "Chicken", "Death", "Merc Death", "Failed Runs"]
msg += f"\n{str(table)}\n"
return msg
def _send_status_update(self):
msg = f"Status Report\n{self._create_msg()}\nVersion: {__version__}"
self._messenger.send_message(msg)
def _save_stats_to_file(self):
msg = self._create_msg()
msg += "\nItems:"
for location in self._location_stats:
if location == "totals":
continue
stats = self._location_stats[location]
msg += f"\n {location}:"
for item_name in stats["items"]:
msg += f"\n {item_name}"
with open(file=f"stats/{self._stats_filename}", mode="w+", encoding="utf-8") as f:
f.write(msg)
if __name__ == "__main__":
game_stats = GameStats()
game_stats.log_item_keep("rune_12", True)
game_stats._save_stats_to_file()
| 38.540107
| 142
| 0.621202
|
be46f177f30580a08efedbb0e3b194ae52852d53
| 5,035
|
py
|
Python
|
docs/2notebook/1_Introduction_and_Transformations.py
|
xinzhel/TextAttack
|
635a76429743d7bd050aa26cc306e01748e1e4c7
|
[
"MIT"
] | null | null | null |
docs/2notebook/1_Introduction_and_Transformations.py
|
xinzhel/TextAttack
|
635a76429743d7bd050aa26cc306e01748e1e4c7
|
[
"MIT"
] | null | null | null |
docs/2notebook/1_Introduction_and_Transformations.py
|
xinzhel/TextAttack
|
635a76429743d7bd050aa26cc306e01748e1e4c7
|
[
"MIT"
] | null | null | null |
from textattack.transformations import WordSwap
class BananaWordSwap(WordSwap):
""" Transforms an input by replacing any word with 'banana'.
"""
# We don't need a constructor, since our class doesn't require any parameters.
def _get_replacement_words(self, word):
""" Returns 'banana', no matter what 'word' was originally.
Returns a list with one item, since `_get_replacement_words` is intended to
return a list of candidate replacement words.
"""
return ['banana']
# Import the model
import transformers
from textattack.models.wrappers import HuggingFaceModelWrapper
model = transformers.AutoModelForSequenceClassification.from_pretrained("textattack/bert-base-uncased-ag-news")
tokenizer = transformers.AutoTokenizer.from_pretrained("textattack/bert-base-uncased-ag-news")
model_wrapper = HuggingFaceModelWrapper(model, tokenizer)
# Create the goal function using the model
from textattack.goal_functions import UntargetedClassification
goal_function = UntargetedClassification(model_wrapper)
# Import the dataset
from textattack.datasets import HuggingFaceDataset
dataset = HuggingFaceDataset("ag_news", None, "test")
# %% [markdown]
# ### Creating the attack
# Let's keep it simple: let's use a greedy search method, and let's not use any constraints for now.
# %%
from textattack.search_methods import GreedySearch
from textattack.constraints.pre_transformation import RepeatModification, StopwordModification
from textattack import Attack
# We're going to use our Banana word swap class as the attack transformation.
transformation = BananaWordSwap()
# We'll constrain modification of already modified indices and stopwords
constraints = [RepeatModification(),
StopwordModification()]
# We'll use the Greedy search method
search_method = GreedySearch()
# Now, let's make the attack from the 4 components:
attack = Attack(goal_function, constraints, transformation, search_method)
from tqdm import tqdm # tqdm provides us a nice progress bar.
from textattack.loggers import CSVLogger # tracks a dataframe for us.
from textattack.attack_results import SuccessfulAttackResult
from textattack import Attacker
from textattack import AttackArgs
from textattack.datasets import Dataset
attack_args = AttackArgs(num_examples=10, checkpoint_interval=2)
attacker = Attacker(attack, dataset, attack_args)
attack_results = attacker.attack_dataset()
#The following legacy tutorial code shows how the Attack API works in detail.
#logger = CSVLogger(color_method='html')
#num_successes = 0
#i = 0
#while num_successes < 10:
#result = next(results_iterable)
# example, ground_truth_output = dataset[i]
# i += 1
# result = attack.attack(example, ground_truth_output)
# if isinstance(result, SuccessfulAttackResult):
# logger.log_attack_result(result)
# num_successes += 1
# print(f'{num_successes} of 10 successes complete.')
# %% [markdown]
# ### Visualizing attack results
#
# We are logging `AttackResult` objects using a `CSVLogger`. This logger stores all attack results in a dataframe, which we can easily access and display. Since we set `color_method` to `'html'`, the attack results will display their differences, in color, in HTML. Using `IPython` utilities and `pandas`
# %%
import pandas as pd
pd.options.display.max_colwidth = 480 # increase colum width so we can actually read the examples
logger = CSVLogger(color_method='html')
for result in attack_results:
logger.log_attack_result(result)
from IPython.core.display import display, HTML
display(HTML(logger.df[['original_text', 'perturbed_text']].to_html(escape=False)))
# %% [markdown]
# ### Conclusion
# We can examine these examples for a good idea of how many words had to be changed to "banana" to change the prediction score from the correct class to another class. The examples without perturbed words were originally misclassified, so they were skipped by the attack. Looks like some examples needed only a couple "banana"s, while others needed up to 17 "banana" substitutions to change the class score. Wow! 🍌
# %% [markdown]
# ### Bonus: Attacking Custom Samples
#
# We can also attack custom data samples, like these ones I just made up!
# %%
# For AG News, labels are 0: World, 1: Sports, 2: Business, 3: Sci/Tech
custom_dataset = [
('Malaria deaths in Africa fall by 5% from last year', 0),
('Washington Nationals defeat the Houston Astros to win the World Series', 1),
('Exxon Mobil hires a new CEO', 2),
('Microsoft invests $1 billion in OpenAI', 3),
]
attack_args = AttackArgs(num_examples=4)
dataset = Dataset(custom_dataset)
attacker = Attacker(attack, dataset, attack_args)
results_iterable = attacker.attack_dataset()
logger = CSVLogger(color_method='html')
for result in results_iterable:
logger.log_attack_result(result)
from IPython.core.display import display, HTML
display(HTML(logger.df[['original_text', 'perturbed_text']].to_html(escape=False)))
| 36.751825
| 414
| 0.758491
|
c02a9e8b5fef7007bb2f6d70b53a274c59ebed70
| 2,220
|
py
|
Python
|
var/spack/repos/builtin/packages/benchmark/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 9
|
2018-04-18T07:51:40.000Z
|
2021-09-10T03:56:57.000Z
|
var/spack/repos/builtin/packages/benchmark/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 907
|
2018-04-18T11:17:57.000Z
|
2022-03-31T13:20:25.000Z
|
var/spack/repos/builtin/packages/benchmark/package.py
|
xiki-tempula/spack
|
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 29
|
2018-11-05T16:14:23.000Z
|
2022-02-03T16:07:09.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Benchmark(CMakePackage):
"""A microbenchmark support library"""
homepage = "https://github.com/google/benchmark"
url = "https://github.com/google/benchmark/archive/v1.1.0.tar.gz"
git = "https://github.com/google/benchmark.git"
# first properly installed CMake config packages in
# 1.2.0 release: https://github.com/google/benchmark/issues/363
version('develop', branch='master')
version('1.5.0', sha256='3c6a165b6ecc948967a1ead710d4a181d7b0fbcaa183ef7ea84604994966221a')
version('1.4.1', sha256='f8e525db3c42efc9c7f3bc5176a8fa893a9a9920bbd08cef30fb56a51854d60d')
version('1.4.0', sha256='616f252f37d61b15037e3c2ef956905baf9c9eecfeab400cb3ad25bae714e214')
version('1.3.0', sha256='f19559475a592cbd5ac48b61f6b9cedf87f0b6775d1443de54cfe8f53940b28d')
version('1.2.0', sha256='3dcc90c158838e2ac4a7ad06af9e28eb5877cf28252a81e55eb3c836757d3070')
version('1.1.0', sha256='e7334dd254434c6668e33a54c8f839194c7c61840d52f4b6258eee28e9f3b20e')
version('1.0.0', sha256='d2206c263fc1a7803d4b10e164e0c225f6bcf0d5e5f20b87929f137dee247b54')
variant('build_type', default='RelWithDebInfo',
description='The build type to build',
values=('Debug', 'Release', 'RelWithDebInfo',
'MinSizeRel', 'Coverage'))
depends_on("cmake@2.8.11:", type="build", when="@:1.1.0")
depends_on("cmake@2.8.12:", type="build", when="@1.2.0:1.4")
depends_on("cmake@3.5.1:", type="build", when="@1.5.0:")
def cmake_args(self):
# No need for testing for the install
args = ["-DBENCHMARK_ENABLE_TESTING=OFF"]
return args
def patch(self):
filter_file(
r'add_cxx_compiler_flag..fstrict.aliasing.',
r'##### add_cxx_compiler_flag(-fstrict-aliasing)',
'CMakeLists.txt'
)
filter_file(
r'add_cxx_compiler_flag..Werror',
r'##### add_cxx_compiler_flag(-Werror',
'CMakeLists.txt'
)
| 41.886792
| 95
| 0.686036
|
b5b13d7aa9c6b76f7b28ed8dddea689bf698509f
| 2,113
|
py
|
Python
|
compile_templates.py
|
jschmer/jMatic
|
de13a61a3d9c012c02ad98414f4fecd3a524e228
|
[
"MIT"
] | null | null | null |
compile_templates.py
|
jschmer/jMatic
|
de13a61a3d9c012c02ad98414f4fecd3a524e228
|
[
"MIT"
] | null | null | null |
compile_templates.py
|
jschmer/jMatic
|
de13a61a3d9c012c02ad98414f4fecd3a524e228
|
[
"MIT"
] | null | null | null |
import os, sys, re
def regex_walk(regex, top='.'):
matches = []
matcher = re.compile(regex);
#print(matcher.pattern)
for dirpath, dirnames, filenames in os.walk(top):
full_relative_filepaths = [os.path.join(dirpath, name) for name in filenames]
for filepath in full_relative_filepaths:
if matcher.match(filepath):
matches.append(filepath)
return matches
def compile_template(template_name, template_content, module_name='templates'):
template_name = template_name.replace("\\", "/")
print("Compiling {}".format(template_name))
template_content_lines = template_content.splitlines()
template_content_lines = [x.replace(r"'", r"\'") for x in template_content_lines]
javascript_template_content = [r"'{}\n'".format(x) for x in template_content_lines[:-1]]
javascript_template_content.append(r"'{}'".format(template_content_lines[-1]));
javascript_template_content_string = " +\n ".join(javascript_template_content)
return r"""
(function(module) {{
try {{
module = angular.module('{0}');
}} catch (e) {{
module = angular.module('{0}', []);
}}
module.run(['$templateCache', function($templateCache) {{
$templateCache.put('{1}',
{2});
}}]);
}})();
""".format(module_name, template_name, javascript_template_content_string)
def compile(templates_regex, stripPrefix, outputpath, moduleName):
templateFiles = regex_walk(templates_regex, 'app')
#print(templateFiles)
#print()
with open(outputpath, "w") as templateFile:
for template in templateFiles:
with open(template, "r") as templateInputFile:
templateContent = templateInputFile.read()
templateName = re.sub(stripPrefix, "", template)
# putting the templates into the templatecache of jMaticApp
compiledTemplate = compile_template(templateName, templateContent, moduleName)
#print(compiledTemplate)
templateFile.write(compiledTemplate);
def compile_jMatic():
compile(r'app\\views\\partials\\.+\.html', r'app\\', 'app\scripts\compiled_templates.js', 'jMaticApp')
if __name__=='__main__':
compile_jMatic()
| 34.639344
| 104
| 0.702319
|
11a2c25625d7526294d5d95f813d269b26287029
| 12,589
|
py
|
Python
|
test/functional/ncc_dgp_block_size_sync.py
|
nccproject/ncc
|
068ccc82a73d28136546095261ad8ccef7e541a3
|
[
"MIT"
] | null | null | null |
test/functional/ncc_dgp_block_size_sync.py
|
nccproject/ncc
|
068ccc82a73d28136546095261ad8ccef7e541a3
|
[
"MIT"
] | null | null | null |
test/functional/ncc_dgp_block_size_sync.py
|
nccproject/ncc
|
068ccc82a73d28136546095261ad8ccef7e541a3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.ncc import *
from test_framework.address import *
from test_framework.blocktools import *
import io
"""
Note, these tests do not test the functionality of the DGP template contract itself, for tests for the DGP template, see ncc-dgp.py
"""
class NCCDGPBlockSizeSyncTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 8
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def create_block_of_approx_max_size(self, size_in_bytes):
tip = self.node.getblock(self.node.getbestblockhash())
block = create_block(int(self.node.getbestblockhash(), 16), create_coinbase(self.node.getblockcount()+1), tip['time'])
block.hashUTXORoot = int(tip['hashUTXORoot'], 16)
block.hashStateRoot = int(tip['hashStateRoot'], 16)
unspents = self.node.listunspent()
while len(block.serialize()) < size_in_bytes:
unspent = unspents.pop(0)
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(unspent['txid'], 16), unspent['vout']), nSequence=0)]
for i in range(50):
tx.vout.append(CTxOut(int(unspent['amount']*COIN/100 - 11000), scriptPubKey=CScript([OP_TRUE]*10000)))
tx_hex = self.node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex']
f = io.BytesIO(hex_str_to_bytes(tx_hex))
block.vtx.append(CTransaction())
block.vtx[-1].deserialize(f)
while len(block.serialize()) > size_in_bytes:
block.vtx[-1].vout.pop(-1)
if not block.vtx[-1].vout:
block.vtx.pop(-1)
tx_hex = self.node.signrawtransactionwithwallet(bytes_to_hex_str(block.vtx[-1].serialize()))['hex']
f = io.BytesIO(hex_str_to_bytes(tx_hex))
block.vtx[-1] = CTransaction()
block.vtx[-1].deserialize(f)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
print("block size", len(block.serialize()))
return block
def create_proposal_contract(self, block_size=2000000):
"""
pragma solidity ^0.4.11;
contract blockSize {
uint32[1] _blockSize=[
8000000 //block size in bytes
];
function getBlockSize() constant returns(uint32[1] _size){
return _blockSize;
}
}
"""
# The contracts below only differ in the _blockSize variable
if block_size == 32000000:
contract_data = self.node.createcontract("60606040526020604051908101604052806301e8480063ffffffff16815250600090600161002e92919061003f565b50341561003a57600080fd5b610115565b8260016007016008900481019282156100d15791602002820160005b8382111561009f57835183826101000a81548163ffffffff021916908363ffffffff160217905550926020019260040160208160030104928301926001030261005b565b80156100cf5782816101000a81549063ffffffff021916905560040160208160030104928301926001030261009f565b505b5090506100de91906100e2565b5090565b61011291905b8082111561010e57600081816101000a81549063ffffffff0219169055506001016100e8565b5090565b90565b610162806101246000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806392ac3c621461003e575b600080fd5b341561004957600080fd5b610051610090565b6040518082600160200280838360005b8381101561007d5780820151818401525b602081019050610061565b5050505090500191505060405180910390f35b610098610108565b60006001806020026040519081016040528092919082600180156100fd576020028201916000905b82829054906101000a900463ffffffff1663ffffffff16815260200190600401906020826003010492830192600103820291508084116100c05790505b505050505090505b90565b6020604051908101604052806001905b600063ffffffff1681526020019060019003908161011857905050905600a165627a7a72305820322c4456cb00ecc4c7f2878fe22cc7ff6addbf199842e68a4b23e98d51446b080029", 10000000)
elif block_size == 8000000:
contract_data = self.node.createcontract("6060604052602060405190810160405280627a120062ffffff16815250600090600161002c92919061003d565b50341561003857600080fd5b610112565b8260016007016008900481019282156100ce5791602002820160005b8382111561009c57835183826101000a81548163ffffffff021916908362ffffff1602179055509260200192600401602081600301049283019260010302610059565b80156100cc5782816101000a81549063ffffffff021916905560040160208160030104928301926001030261009c565b505b5090506100db91906100df565b5090565b61010f91905b8082111561010b57600081816101000a81549063ffffffff0219169055506001016100e5565b5090565b90565b610162806101216000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806392ac3c621461003e575b600080fd5b341561004957600080fd5b610051610090565b6040518082600160200280838360005b8381101561007d5780820151818401525b602081019050610061565b5050505090500191505060405180910390f35b610098610108565b60006001806020026040519081016040528092919082600180156100fd576020028201916000905b82829054906101000a900463ffffffff1663ffffffff16815260200190600401906020826003010492830192600103820291508084116100c05790505b505050505090505b90565b6020604051908101604052806001905b600063ffffffff1681526020019060019003908161011857905050905600a165627a7a723058209bab110523b5fdedfb12512d3aedc1ba1add53dff85edb77aeec48ebdc01c35c0029", 10000000)
elif block_size == 4000000:
contract_data = self.node.createcontract("6060604052602060405190810160405280623d090062ffffff16815250600090600161002c92919061003d565b50341561003857600080fd5b610112565b8260016007016008900481019282156100ce5791602002820160005b8382111561009c57835183826101000a81548163ffffffff021916908362ffffff1602179055509260200192600401602081600301049283019260010302610059565b80156100cc5782816101000a81549063ffffffff021916905560040160208160030104928301926001030261009c565b505b5090506100db91906100df565b5090565b61010f91905b8082111561010b57600081816101000a81549063ffffffff0219169055506001016100e5565b5090565b90565b610162806101216000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806392ac3c621461003e575b600080fd5b341561004957600080fd5b610051610090565b6040518082600160200280838360005b8381101561007d5780820151818401525b602081019050610061565b5050505090500191505060405180910390f35b610098610108565b60006001806020026040519081016040528092919082600180156100fd576020028201916000905b82829054906101000a900463ffffffff1663ffffffff16815260200190600401906020826003010492830192600103820291508084116100c05790505b505050505090505b90565b6020604051908101604052806001905b600063ffffffff1681526020019060019003908161011857905050905600a165627a7a72305820c5f02b85c3d9d7b93140775449355f53a7cb98dcafc56f07cdb09e9f2dc240550029", 10000000)
elif block_size == 2000000:
contract_data = self.node.createcontract("6060604052602060405190810160405280621e848062ffffff16815250600090600161002c92919061003d565b50341561003857600080fd5b610112565b8260016007016008900481019282156100ce5791602002820160005b8382111561009c57835183826101000a81548163ffffffff021916908362ffffff1602179055509260200192600401602081600301049283019260010302610059565b80156100cc5782816101000a81549063ffffffff021916905560040160208160030104928301926001030261009c565b505b5090506100db91906100df565b5090565b61010f91905b8082111561010b57600081816101000a81549063ffffffff0219169055506001016100e5565b5090565b90565b610162806101216000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806392ac3c621461003e575b600080fd5b341561004957600080fd5b610051610090565b6040518082600160200280838360005b8381101561007d5780820151818401525b602081019050610061565b5050505090500191505060405180910390f35b610098610108565b60006001806020026040519081016040528092919082600180156100fd576020028201916000905b82829054906101000a900463ffffffff1663ffffffff16815260200190600401906020826003010492830192600103820291508084116100c05790505b505050505090505b90565b6020604051908101604052806001905b600063ffffffff1681526020019060019003908161011857905050905600a165627a7a723058201f747ceade404003185ab16248ecd30e8c1a63a811e55d7961ce3a47ddd01b160029", 10000000)
elif block_size == 1000000:
contract_data = self.node.createcontract("6060604052602060405190810160405280620f424062ffffff16815250600090600161002c92919061003d565b50341561003857600080fd5b610112565b8260016007016008900481019282156100ce5791602002820160005b8382111561009c57835183826101000a81548163ffffffff021916908362ffffff1602179055509260200192600401602081600301049283019260010302610059565b80156100cc5782816101000a81549063ffffffff021916905560040160208160030104928301926001030261009c565b505b5090506100db91906100df565b5090565b61010f91905b8082111561010b57600081816101000a81549063ffffffff0219169055506001016100e5565b5090565b90565b610162806101216000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff16806392ac3c621461003e575b600080fd5b341561004957600080fd5b610051610090565b6040518082600160200280838360005b8381101561007d5780820151818401525b602081019050610061565b5050505090500191505060405180910390f35b610098610108565b60006001806020026040519081016040528092919082600180156100fd576020028201916000905b82829054906101000a900463ffffffff1663ffffffff16815260200190600401906020826003010492830192600103820291508084116100c05790505b505050505090505b90565b6020604051908101604052806001905b600063ffffffff1681526020019060019003908161011857905050905600a165627a7a7230582034c00d84f338629f594676d9bc32d5b9d7b92f3b438e9cc82a3efd92805f14730029", 10000000)
self.proposal_address = contract_data['address']
def assert_block_accepted(self, block, with_witness=True):
current_block_count = self.node.getblockcount()
assert_equal(self.node.submitblock(bytes_to_hex_str(block.serialize(with_witness))), None)
assert_equal(self.node.getblockcount(), current_block_count+1)
t = time.time()
while time.time() < t+5:
if self.nodes[0].getbestblockhash() == self.nodes[1].getbestblockhash():
break
else:
assert(False)
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
def assert_block_limits(self, max_accepted_block_size, possible_block_sizes):
accepted_block_sizes = possible_block_sizes[0:possible_block_sizes.index(max_accepted_block_size)+1]
for block_size in accepted_block_sizes:
block = self.create_block_of_approx_max_size(block_size)
self.assert_block_accepted(block)
t = time.time()
while time.time() < t+5:
if self.nodes[0].getbestblockhash() == self.nodes[1].getbestblockhash():
break
else:
assert(False)
# Make sure that both nodes now have the same tip
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
def run_test(self):
# stop 6 nodes that will be used later for IBD
for i in range(2, 8):
self.stop_node(i)
# Generate some blocks to make sure we have enough spendable outputs
self.node = self.nodes[0]
self.node.generate(1000 + COINBASE_MATURITY)
self.BLOCK_SIZE_DGP = DGPState(self.node, "0000000000000000000000000000000000000081")
self.is_network_split = False
connect_nodes_bi(self.nodes, 0, 1)
# Start off by setting ourself as admin
admin_address = self.node.getnewaddress()
# Set ourself up as admin
self.BLOCK_SIZE_DGP.send_set_initial_admin(admin_address)
self.node.generate(1)
possible_block_sizes = [1000000, 2000000, 4000000, 8000000]
ascending_block_sizes = sorted(possible_block_sizes)
for max_block_size in possible_block_sizes:
self.create_proposal_contract(max_block_size)
self.BLOCK_SIZE_DGP.send_add_address_proposal(self.proposal_address, 2, admin_address)
self.node.generate(2) # We need to generate 2 blocks now for it to activate
self.assert_block_limits(max_block_size, ascending_block_sizes)
# Bring the last nodes online and make sure that they sync with node 0 and 1 (A and B)
for i in range(2, 8):
self.start_node(i)
connect_nodes_bi(self.nodes, 0, i)
connect_nodes_bi(self.nodes, 1, i)
self.sync_all()
if __name__ == '__main__':
NCCDGPBlockSizeSyncTest().main()
| 83.926667
| 1,358
| 0.83001
|
2e5ee1f346a0c59765a91b796dadbe3c51d68fcf
| 1,180
|
py
|
Python
|
detection_3d/tools/statics.py
|
Dtananaev/lidar_dynamic_objects_detection
|
3b8f3d5fcce0fb914bb83e5d43a3ca652739139e
|
[
"MIT"
] | 21
|
2020-07-12T04:08:39.000Z
|
2022-03-04T09:06:29.000Z
|
detection_3d/tools/statics.py
|
Dtananaev/lidar_dynamic_objects_detection
|
3b8f3d5fcce0fb914bb83e5d43a3ca652739139e
|
[
"MIT"
] | 2
|
2020-12-19T01:32:21.000Z
|
2021-06-30T14:40:01.000Z
|
detection_3d/tools/statics.py
|
Dtananaev/lidar_dynamic_objects_detection
|
3b8f3d5fcce0fb914bb83e5d43a3ca652739139e
|
[
"MIT"
] | 6
|
2020-08-24T08:14:16.000Z
|
2022-03-04T09:06:22.000Z
|
#!/usr/bin/env python
__copyright__ = """
Copyright (c) 2020 Tananaev Denis
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions: The above copyright notice and this permission
notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
NO_SCHEDULER = "no_scheduler"
RESTARTS_SCHEDULER = "restarts"
ADAM = "adam"
| 49.166667
| 89
| 0.795763
|
a9c76b02b8207847c2ff2eb20c08bbe8c27d3840
| 2,055
|
py
|
Python
|
dotaservice/dotautil.py
|
Andrea-MariaDB-2/LastOrder-Dota2
|
d2fb53a7166218eda1e402efd422586f3adaadff
|
[
"MIT"
] | 332
|
2021-08-02T07:37:41.000Z
|
2022-03-26T17:44:53.000Z
|
dotaservice/dotautil.py
|
Andrea-MariaDB-2/LastOrder-Dota2
|
d2fb53a7166218eda1e402efd422586f3adaadff
|
[
"MIT"
] | 13
|
2021-08-04T12:04:24.000Z
|
2021-09-26T06:57:46.000Z
|
dotaservice/dotautil.py
|
Andrea-MariaDB-2/LastOrder-Dota2
|
d2fb53a7166218eda1e402efd422586f3adaadff
|
[
"MIT"
] | 34
|
2021-08-02T09:08:39.000Z
|
2022-03-27T18:32:31.000Z
|
import numpy as np
import math
from dotaservice.protos.dota_gcmessages_common_bot_script_pb2 import CMsgBotWorldState
# 护甲减伤
def armor_filter(armor):
return 1 - ((0.06 * armor) / (1.0 + 0.06 * abs(armor)))
# 普通攻击
def attack_damage(attack, armor):
return attack * armor_filter(armor)
# 普通攻击致死次数
def attack_to_death_times(attack, armor, hp):
if hp <= 0:
return 0
damage = attack_damage(attack, armor)
times = int(hp / damage) + 1
return times
# 攻击速度
# https://dota2.gamepedia.com/Attack_speed
def attack_per_second(ias, bat=1.7):
if ias < 0.2:
ias = 0.2
if ias > 7:
ias = 7
return ias / 1.7
# 每次攻击时间
# https://dota2-zh.gamepedia.com/index.php?title=%E6%94%BB%E5%87%BB%E5%8A%A8%E4%BD%9C&variant=zh
def attack_time(ias, bat=1.7):
return 1 / attack_per_second(ias, bat=bat)
# 计算平面距离
def cal_distance(p1, p2):
if p1 is None or p2 is None:
return -1
return np.sqrt(np.power(p1.x - p2.x, 2) + np.power(p1.y - p2.y, 2))
# 计算平面距离和纵向距离
def cal_distance_with_z(p1, p2):
if p1 is None or p2 is None:
return -1, -1
else:
return cal_distance(p1, p2), p2.z - p1.z
# 计算两个坐标之间的角度,保持和action degree的执行方式一致
def location_to_degree(hero_location, target_location):
direct_x = target_location.x - hero_location.x
direct_y = target_location.y - hero_location.y
degree = np.degrees(np.arctan2(direct_y, direct_x))
if degree < 0:
return degree + 360
else:
return degree
# 在面前距离内
def in_facing_distance(hero, u, distance, r=250, normalization=False):
x = math.cos(math.radians(hero.facing)) * distance + \
hero.location.x
y = math.sin(math.radians(hero.facing)) * distance + \
hero.location.y
location = CMsgBotWorldState.Vector(x=x, y=y, z=512.0)
d = cal_distance(u.location, location) # 技能中心点距离单位的位置
if d < r and u.team_id != hero.team_id:
if normalization:
return (r - d) / r # 在中心点时为1,靠近边缘趋近与0,出范围为0
else:
return 1
else:
return 0
| 24.176471
| 96
| 0.645255
|
3c35f9eeec4e5acd78d2ba6e43c7178b337fc4a6
| 1,147
|
py
|
Python
|
UI/Wrappers/MapGenerator.py
|
mjbogusz/TSPGen
|
4916cf6276fda41b73ebdf24a7969167c63d0650
|
[
"MIT"
] | null | null | null |
UI/Wrappers/MapGenerator.py
|
mjbogusz/TSPGen
|
4916cf6276fda41b73ebdf24a7969167c63d0650
|
[
"MIT"
] | null | null | null |
UI/Wrappers/MapGenerator.py
|
mjbogusz/TSPGen
|
4916cf6276fda41b73ebdf24a7969167c63d0650
|
[
"MIT"
] | null | null | null |
from PyQt5.QtCore import QThread, pyqtSignal
from Map import Map
class MapGenerator(QThread):
mapGenerated = pyqtSignal(Map)
mapGenerationFailed = pyqtSignal(Exception)
def __init__(self, parent = None):
QThread.__init__(self, parent)
self.fileMode = False
self.fileName = ""
self.citiesCount = 0
self.connectionsCount = 0
def setGenerationMode(self, fileMode = False, fileName = "", citiesCount = 0, connectionsCount = 0):
self.fileMode = fileMode
self.fileName = fileName
self.citiesCount = citiesCount
self.connectionsCount = connectionsCount
def run(self):
newMap = None
if self.fileName:
try:
newMap = Map.readFromFile(self.fileName)
except Exception as e:
self.mapGenerationFailed.emit(e)
return
else:
try:
newMap = Map.generateCNN(self.citiesCount, self.connectionsCount)
except Exception as e:
self.mapGenerationFailed.emit(e)
return
self.mapGenerated.emit(newMap)
| 30.184211
| 104
| 0.605929
|
be1f2fde9e62f446c3c085cdcdd00491836ea573
| 1,709
|
py
|
Python
|
config/wsgi.py
|
tuvapp/tuvappcom
|
5ca2be19f4b0c86a1d4a9553711a4da9d3f32841
|
[
"MIT"
] | null | null | null |
config/wsgi.py
|
tuvapp/tuvappcom
|
5ca2be19f4b0c86a1d4a9553711a4da9d3f32841
|
[
"MIT"
] | null | null | null |
config/wsgi.py
|
tuvapp/tuvappcom
|
5ca2be19f4b0c86a1d4a9553711a4da9d3f32841
|
[
"MIT"
] | null | null | null |
"""
WSGI config for tuvappcom project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| 46.189189
| 79
| 0.801638
|
c8a43b1b1e905492f6d317b6d34c48d4921f4f38
| 1,055
|
py
|
Python
|
clic2022/example_video_submission/decode.py
|
fab-jul/clic-devkit
|
4e0ab072b3fe5907c521b10cf3879c585d654c19
|
[
"MIT"
] | null | null | null |
clic2022/example_video_submission/decode.py
|
fab-jul/clic-devkit
|
4e0ab072b3fe5907c521b10cf3879c585d654c19
|
[
"MIT"
] | null | null | null |
clic2022/example_video_submission/decode.py
|
fab-jul/clic-devkit
|
4e0ab072b3fe5907c521b10cf3879c585d654c19
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from typing import Sequence
import decode_helper
import os
import glob
from zipfile import ZipFile
FILES_ZIP = 'outputs.zip'
def unpack_encoded() -> Sequence[str]:
assert os.path.isfile(FILES_ZIP), 'Expected {}. {}'.format(
FILES_ZIP, os.listdir("."))
print('Unzipping', FILES_ZIP, '...')
with ZipFile(FILES_ZIP) as zipfile:
zipfile.extractall()
encoded_files = sorted(glob.glob("output/*.mp4"))
if len(encoded_files) != 30:
files = os.listdir(".")
files_output = os.listdir("output")
raise ValueError(
f'Expected 30 .mp4 files, found {len(encoded_files)}. '
f'Files in cwd: {files} // '
f'Files in output: {files_output}')
return encoded_files
def main():
encoded = unpack_encoded()
print(f'Got {len(encoded)} files, mapping to pngs...')
for i, p in enumerate(encoded, 1):
decode_helper.convert_video_to_pngs(p)
print(f"Converted {i}/{len(encoded)}")
if __name__ == '__main__':
main()
| 23.444444
| 67
| 0.62654
|
dceb00c0ca848973c0f1d2c671219e0b15ca7aa8
| 1,019
|
py
|
Python
|
autumn/projects/tuberculosis/calibration_utils.py
|
jtrauer/AuTuMN
|
2e1defd0104bbecfe667b8ea5ecaf4bc6741905c
|
[
"BSD-2-Clause-FreeBSD"
] | 14
|
2020-03-11T06:15:30.000Z
|
2022-03-09T03:38:35.000Z
|
autumn/projects/tuberculosis/calibration_utils.py
|
jtrauer/AuTuMN
|
2e1defd0104bbecfe667b8ea5ecaf4bc6741905c
|
[
"BSD-2-Clause-FreeBSD"
] | 96
|
2020-01-29T05:10:29.000Z
|
2022-03-31T01:48:46.000Z
|
autumn/projects/tuberculosis/calibration_utils.py
|
monash-emu/AuTuMN
|
fa3b81ef54cf561e0e7364a48f4ff96585dc3310
|
[
"BSD-2-Clause-FreeBSD"
] | 10
|
2020-04-24T00:38:00.000Z
|
2021-08-19T16:19:03.000Z
|
from autumn.tools.calibration.priors import UniformPrior
# Not currently used anywhere.
LOGNORMAL_PARAMS = {
"early_activation_rate": {
"unstratified": [-6.78, 0.15],
"age_0": [-5.00, 0.19],
"age_5": [-5.91, 0.21],
"age_15": [-8.05, 0.28],
},
"stabilisation_rate": {
"unstratified": [-4.50, 0.13],
"age_0": [-4.38, 0.19],
"age_5": [-4.46, 0.18],
"age_15": [-5.00, 0.28],
},
"late_activation_rate": {
"unstratified": [-11.99, 0.34],
"age_0": [-12.36, 1.13],
"age_5": [-11.68, 0.67],
"age_15": [-12.11, 0.45],
},
}
CID_ESTIMATES = {
"infect_death_rate": {
"smear_positive": [0.335, 0.449],
"smear_negative": [0.017, 0.035],
},
"self_recovery_rate": {"smear_positive": [0.177, 0.288], "smear_negative": [0.073, 0.209]},
}
def get_natural_history_priors_from_cid(param_name, organ):
return UniformPrior(f"{param_name}_dict.{organ}", CID_ESTIMATES[param_name][organ])
| 28.305556
| 95
| 0.554465
|
b24749c69e343abb1a56fc5469dcc9786d6a5ce8
| 334
|
py
|
Python
|
variable_and_data_type/string_demo/strings_are_arrays.py
|
pysga1996/python-basic-programming
|
5fe817986fbef2649b4b03955f07b59d2a2035d8
|
[
"MIT"
] | null | null | null |
variable_and_data_type/string_demo/strings_are_arrays.py
|
pysga1996/python-basic-programming
|
5fe817986fbef2649b4b03955f07b59d2a2035d8
|
[
"MIT"
] | null | null | null |
variable_and_data_type/string_demo/strings_are_arrays.py
|
pysga1996/python-basic-programming
|
5fe817986fbef2649b4b03955f07b59d2a2035d8
|
[
"MIT"
] | null | null | null |
# Like many other popular programming languages, strings in Python are arrays of bytes representing unicode characters.
#
# However, Python does not have a character data type, a single character is simply a string with a length of 1.
#
# Square brackets can be used to access elements of the string.
a = "Hello, World!"
print(a[1])
| 37.111111
| 119
| 0.763473
|
949235f0cf0696916b7d33d281bb4fb07fed94df
| 9,537
|
py
|
Python
|
chapter10_Computer_Vision/model/faster_rcnn_trainer.py
|
iroan/Practicing-Federated-Learning
|
5db163e1f07be4b47aac7902102e022da779b86e
|
[
"Apache-2.0"
] | 227
|
2020-12-28T09:20:42.000Z
|
2022-03-31T12:28:45.000Z
|
chapter10_Computer_Vision/model/faster_rcnn_trainer.py
|
iroan/Practicing-Federated-Learning
|
5db163e1f07be4b47aac7902102e022da779b86e
|
[
"Apache-2.0"
] | 7
|
2021-03-12T09:06:25.000Z
|
2022-02-07T10:31:04.000Z
|
chapter10_Computer_Vision/model/faster_rcnn_trainer.py
|
iroan/Practicing-Federated-Learning
|
5db163e1f07be4b47aac7902102e022da779b86e
|
[
"Apache-2.0"
] | 101
|
2021-04-26T02:25:54.000Z
|
2022-03-25T14:07:42.000Z
|
from __future__ import absolute_import
import os
import time
from collections import namedtuple
import torch as t
t.manual_seed(42)
from torch import nn
from torch.nn import functional as F
from model.utils.creator_tool import AnchorTargetCreator, ProposalTargetCreator
from torchnet.meter import AverageValueMeter, ConfusionMeter
from utils import array_tool as at
from utils.config import opt
from utils.vis_tool import Visualizer
LossTuple = namedtuple('LossTuple',
['rpn_loc_loss',
'rpn_cls_loss',
'roi_loc_loss',
'roi_cls_loss',
'total_loss'
])
class FasterRCNNTrainer(nn.Module):
"""wrapper for conveniently training. return losses
The losses include:
* :obj:`rpn_loc_loss`: The localization loss for \
Region Proposal Network (RPN).
* :obj:`rpn_cls_loss`: The classification loss for RPN.
* :obj:`roi_loc_loss`: The localization loss for the head module.
* :obj:`roi_cls_loss`: The classification loss for the head module.
* :obj:`total_loss`: The sum of 4 loss above.
Args:
faster_rcnn (model.FasterRCNN):
A Faster R-CNN model that is going to be trained.
"""
def __init__(self, faster_rcnn, log_filename=opt.log_filename):
super(FasterRCNNTrainer, self).__init__()
self.faster_rcnn = faster_rcnn
self.rpn_sigma = opt.rpn_sigma
self.roi_sigma = opt.roi_sigma
# target creator create gt_bbox gt_label etc as training targets.
self.anchor_target_creator = AnchorTargetCreator()
self.proposal_target_creator = ProposalTargetCreator()
self.loc_normalize_mean = faster_rcnn.loc_normalize_mean
self.loc_normalize_std = faster_rcnn.loc_normalize_std
self.optimizer = self.faster_rcnn.get_optimizer()
# visdom wrapper
self.vis = Visualizer(env=opt.env, log_to_filename=log_filename)
# indicators for training status
self.rpn_cm = ConfusionMeter(2)
self.roi_cm = ConfusionMeter(21)
self.meters = {k: AverageValueMeter() for k in LossTuple._fields} # average loss
def forward(self, imgs, bboxes, labels, scale):
"""Forward Faster R-CNN and calculate losses.
Here are notations used.
* :math:`N` is the batch size.
* :math:`R` is the number of bounding boxes per image.
Currently, only :math:`N=1` is supported.
Args:
imgs (~torch.autograd.Variable): A variable with a batch of images.
bboxes (~torch.autograd.Variable): A batch of bounding boxes.
Its shape is :math:`(N, R, 4)`.
labels (~torch.autograd..Variable): A batch of labels.
Its shape is :math:`(N, R)`. The background is excluded from
the definition, which means that the range of the value
is :math:`[0, L - 1]`. :math:`L` is the number of foreground
classes.
scale (float): Amount of scaling applied to
the raw image during preprocessing.
Returns:
namedtuple of 5 losses
"""
n = bboxes.shape[0]
if n != 1:
raise ValueError('Currently only batch size 1 is supported.')
_, _, H, W = imgs.shape
img_size = (H, W)
features = self.faster_rcnn.extractor(imgs)
rpn_locs, rpn_scores, rois, roi_indices, anchor = \
self.faster_rcnn.rpn(features, img_size, scale)
# Since batch size is one, convert variables to singular form
bbox = bboxes[0]
label = labels[0]
rpn_score = rpn_scores[0]
rpn_loc = rpn_locs[0]
roi = rois
# Sample RoIs and forward
# it's fine to break the computation graph of rois,
# consider them as constant input
sample_roi, gt_roi_loc, gt_roi_label = self.proposal_target_creator(
roi,
at.tonumpy(bbox),
at.tonumpy(label),
self.loc_normalize_mean,
self.loc_normalize_std)
# NOTE it's all zero because now it only support for batch=1 now
sample_roi_index = t.zeros(len(sample_roi))
roi_cls_loc, roi_score = self.faster_rcnn.head(
features,
sample_roi,
sample_roi_index)
# ------------------ RPN losses -------------------#
gt_rpn_loc, gt_rpn_label = self.anchor_target_creator(
at.tonumpy(bbox),
anchor,
img_size)
gt_rpn_label = at.totensor(gt_rpn_label).long()
gt_rpn_loc = at.totensor(gt_rpn_loc)
rpn_loc_loss = _fast_rcnn_loc_loss(
rpn_loc,
gt_rpn_loc,
gt_rpn_label.data,
self.rpn_sigma)
# NOTE: default value of ignore_index is -100 ...
rpn_cls_loss = F.cross_entropy(rpn_score, gt_rpn_label.cuda(), ignore_index=-1)
_gt_rpn_label = gt_rpn_label[gt_rpn_label > -1]
_rpn_score = at.tonumpy(rpn_score)[at.tonumpy(gt_rpn_label) > -1]
self.rpn_cm.add(at.totensor(_rpn_score, False), _gt_rpn_label.data.long())
# ------------------ ROI losses (fast rcnn loss) -------------------#
n_sample = roi_cls_loc.shape[0]
roi_cls_loc = roi_cls_loc.view(n_sample, -1, 4)
roi_loc = roi_cls_loc[t.arange(0, n_sample).long().cuda(), \
at.totensor(gt_roi_label).long()]
gt_roi_label = at.totensor(gt_roi_label).long()
gt_roi_loc = at.totensor(gt_roi_loc)
roi_loc_loss = _fast_rcnn_loc_loss(
roi_loc.contiguous(),
gt_roi_loc,
gt_roi_label.data,
self.roi_sigma)
roi_cls_loss = nn.CrossEntropyLoss()(roi_score, gt_roi_label.cuda())
self.roi_cm.add(at.totensor(roi_score, False), gt_roi_label.data.long())
losses = [rpn_loc_loss, rpn_cls_loss, roi_loc_loss, roi_cls_loss]
losses = losses + [sum(losses)]
return LossTuple(*losses)
def train_step(self, imgs, bboxes, labels, scale):
self.optimizer.zero_grad()
losses = self.forward(imgs, bboxes, labels, scale)
losses.total_loss.backward()
self.optimizer.step()
self.update_meters(losses)
return losses
def save(self, save_optimizer=False, save_path=None, **kwargs):
"""serialize models include optimizer and other info
return path where the model-file is stored.
Args:
save_optimizer (bool): whether save optimizer.state_dict().
save_path (string): where to save model, if it's None, save_path
is generate using time str and info from kwargs.
Returns:
save_path(str): the path to save models.
"""
save_dict = dict()
save_dict['model'] = self.faster_rcnn.state_dict()
save_dict['config'] = opt._state_dict()
save_dict['other_info'] = kwargs
save_dict['vis_info'] = self.vis.state_dict()
if save_optimizer:
save_dict['optimizer'] = self.optimizer.state_dict()
if save_path is None:
timestr = time.strftime('%m%d%H%M')
save_path = 'checkpoints/fasterrcnn_%s' % timestr
for k_, v_ in kwargs.items():
save_path += '_%s' % v_
save_dir = os.path.dirname(save_path)
if not os.path.exists(save_dir):
os.makedirs(save_dir)
t.save(save_dict, save_path)
self.vis.save([self.vis.env])
return save_path
def load(self, path, load_optimizer=True, parse_opt=False, ):
state_dict = t.load(path)
if 'model' in state_dict:
self.faster_rcnn.load_state_dict(state_dict['model'])
else: # legacy way, for backward compatibility
self.faster_rcnn.load_state_dict(state_dict)
return self
if parse_opt:
opt._parse(state_dict['config'])
if 'optimizer' in state_dict and load_optimizer:
self.optimizer.load_state_dict(state_dict['optimizer'])
return self
def update_meters(self, losses):
loss_d = {k: at.scalar(v) for k, v in losses._asdict().items()}
for key, meter in self.meters.items():
meter.add(loss_d[key])
def reset_meters(self):
for key, meter in self.meters.items():
meter.reset()
self.roi_cm.reset()
self.rpn_cm.reset()
def get_meter_data(self):
return {k: v.value()[0] for k, v in self.meters.items()}
def _smooth_l1_loss(x, t, in_weight, sigma):
sigma2 = sigma ** 2
diff = in_weight * (x - t)
abs_diff = diff.abs()
flag = (abs_diff.data < (1. / sigma2)).float()
y = (flag * (sigma2 / 2.) * (diff ** 2) +
(1 - flag) * (abs_diff - 0.5 / sigma2))
return y.sum()
def _fast_rcnn_loc_loss(pred_loc, gt_loc, gt_label, sigma):
in_weight = t.zeros(gt_loc.shape).cuda()
# Localization loss is calculated only for positive rois.
# NOTE: unlike origin implementation,
# we don't need inside_weight and outside_weight, they can calculate by gt_label
in_weight[(gt_label > 0).view(-1, 1).expand_as(in_weight).cuda()] = 1
loc_loss = _smooth_l1_loss(pred_loc, gt_loc, in_weight.detach(), sigma)
# Normalize by total number of negtive and positive rois.
loc_loss /= ((gt_label >= 0).sum().float()) # ignore gt_label==-1 for rpn_loss
return loc_loss
| 36.54023
| 89
| 0.614554
|
de98258f6cb5f63469ac2ca4ca9eba3f7fce1695
| 765
|
py
|
Python
|
sorting/test_sorting.py
|
springmaple/sorting
|
10c1efd0af9dbe78834977b75aef504e6a0d96a8
|
[
"Apache-2.0"
] | null | null | null |
sorting/test_sorting.py
|
springmaple/sorting
|
10c1efd0af9dbe78834977b75aef504e6a0d96a8
|
[
"Apache-2.0"
] | null | null | null |
sorting/test_sorting.py
|
springmaple/sorting
|
10c1efd0af9dbe78834977b75aef504e6a0d96a8
|
[
"Apache-2.0"
] | null | null | null |
import sorting
from random import randint
class TestClass:
def setup_method(self, _):
self._ls = []
for ln in range(10, 1000, 100):
self._ls.append([randint(0, x) for x in range(ln)])
def teardown_method(self, _):
del self._ls
def test_builtin_sorted(self):
for ls in self._ls:
assert sorted(list(ls)) == sorted(ls)
def test_merge_sort(self):
for ls in self._ls:
assert sorting.merge_sort(list(ls)) == sorted(ls)
def test_insertion_sort(self):
for ls in self._ls:
assert sorting.insertion_sort(list(ls)) == sorted(ls)
def test_bubble_sort(self):
for ls in self._ls:
assert sorting.bubble_sort(list(ls)) == sorted(ls)
| 25.5
| 65
| 0.602614
|
2bb87645a15025c60c630800f9da37f65fc2b288
| 1,116
|
py
|
Python
|
tests/autokeras/tuners/greedy_test.py
|
thwang1231/autokeras
|
2b8a1c5506896102ed49a3e82db811ada44a1d6c
|
[
"MIT"
] | 3
|
2020-10-20T05:32:34.000Z
|
2020-12-22T08:37:02.000Z
|
tests/autokeras/tuners/greedy_test.py
|
thwang1231/autokeras
|
2b8a1c5506896102ed49a3e82db811ada44a1d6c
|
[
"MIT"
] | null | null | null |
tests/autokeras/tuners/greedy_test.py
|
thwang1231/autokeras
|
2b8a1c5506896102ed49a3e82db811ada44a1d6c
|
[
"MIT"
] | 3
|
2020-03-31T07:53:54.000Z
|
2022-01-28T11:29:07.000Z
|
from unittest import mock
import kerastuner
from autokeras.tuners import greedy
from tests import utils
def test_random_oracle_state():
graph = utils.build_graph()
oracle = greedy.GreedyOracle(
hypermodel=graph,
objective='val_loss',
)
oracle.hypermodel = graph
oracle.set_state(oracle.get_state())
assert oracle.hypermodel is graph
@mock.patch('autokeras.tuners.greedy.GreedyOracle.get_best_trials')
def test_random_oracle(fn):
graph = utils.build_graph()
oracle = greedy.GreedyOracle(
hypermodel=graph,
objective='val_loss',
)
oracle.hypermodel = graph
trial = mock.Mock()
hp = kerastuner.HyperParameters()
trial.hyperparameters = hp
fn.return_value = [trial]
oracle.update_space(hp)
for i in range(2000):
oracle._populate_space(str(i))
assert 'optimizer' in oracle._hp_names[greedy.GreedyOracle.OPT]
assert 'classification_head_1/dropout_rate' in oracle._hp_names[
greedy.GreedyOracle.ARCH]
assert 'image_block_1/block_type' in oracle._hp_names[
greedy.GreedyOracle.HYPER]
| 26.571429
| 68
| 0.709677
|
9862551044283fe522fa8045578ac8b5d482023e
| 3,297
|
py
|
Python
|
licenses/views.py
|
snehal199/cc-licenses
|
d64c7293eb7be15ff3cd74cc5ff1536eb16794de
|
[
"MIT"
] | null | null | null |
licenses/views.py
|
snehal199/cc-licenses
|
d64c7293eb7be15ff3cd74cc5ff1536eb16794de
|
[
"MIT"
] | null | null | null |
licenses/views.py
|
snehal199/cc-licenses
|
d64c7293eb7be15ff3cd74cc5ff1536eb16794de
|
[
"MIT"
] | null | null | null |
import re
from django.shortcuts import get_object_or_404, render
from i18n import DEFAULT_LANGUAGE_CODE
from i18n.utils import active_translation, get_language_for_jurisdiction
from licenses.models import LegalCode, License
DEED_TEMPLATE_MAPPING = { # CURRENTLY UNUSED
# license_code : template name
"sampling": "licenses/sampling_deed.html",
"sampling+": "licenses/sampling_deed.html",
"nc-sampling+": "licenses/sampling_deed.html",
"devnations": "licenses/devnations_deed.html",
"CC0": "licenses/zero_deed.html",
"mark": "licenses/pdmark_deed.html",
"publicdomain": "licenses/publicdomain_deed.html",
# others use "licenses/standard_deed.html"
}
# For removing the deed.foo section of a deed url
REMOVE_DEED_URL_RE = re.compile(r"^(.*?/)(?:deed)?(?:\..*)?$")
def home(request):
# Get the list of license codes and languages that occur among the 4.0 licenses
# to let the template iterate over them as it likes.
codes_for_40 = (
License.objects.filter(version="4.0")
.order_by("license_code")
.distinct("license_code")
.values_list("license_code", flat=True)
)
languages_for_40 = (
LegalCode.objects.filter(license__version="4.0")
.order_by("language_code")
.distinct("language_code")
.values_list("language_code", flat=True)
)
licenses_by_version = [
("4.0", codes_for_40, languages_for_40),
]
context = {
"licenses_by_version": licenses_by_version,
# "licenses_by_code": licenses_by_code,
"legalcodes": LegalCode.objects.filter(
license__version="4.0", language_code__in=["en", "es", "ar", "de"]
).order_by("license__license_code", "language_code"),
}
return render(request, "home.html", context)
def view_license(request, license_code, version, jurisdiction=None, language_code=None):
if language_code is None and jurisdiction:
language_code = get_language_for_jurisdiction(jurisdiction)
language_code = language_code or DEFAULT_LANGUAGE_CODE
legalcode = get_object_or_404(
LegalCode,
license__license_code=license_code,
license__version=version,
license__jurisdiction_code=jurisdiction or "",
language_code=language_code,
)
translation = legalcode.get_translation_object()
with active_translation(translation):
return render(
request,
"legalcode_40_page.html",
{"legalcode": legalcode, "license": legalcode.license,},
)
def view_deed(request, license_code, version, jurisdiction=None, language_code=None):
if language_code is None and jurisdiction:
language_code = get_language_for_jurisdiction(jurisdiction)
language_code = language_code or DEFAULT_LANGUAGE_CODE
legalcode = get_object_or_404(
LegalCode,
license__license_code=license_code,
license__version=version,
license__jurisdiction_code=jurisdiction or "",
language_code=language_code,
)
translation = legalcode.get_translation_object()
with active_translation(translation):
return render(
request,
"deed_40.html",
{"legalcode": legalcode, "license": legalcode.license,},
)
| 34.34375
| 88
| 0.687291
|
d2948441c59ca3b045b7f44d72c97db902c0e7ad
| 5,807
|
py
|
Python
|
mirheo/__init__.py
|
LMNS3d/Mirheo
|
e710291502eb3d1b4001e3811f7b7d105af82c86
|
[
"MIT"
] | null | null | null |
mirheo/__init__.py
|
LMNS3d/Mirheo
|
e710291502eb3d1b4001e3811f7b7d105af82c86
|
[
"MIT"
] | null | null | null |
mirheo/__init__.py
|
LMNS3d/Mirheo
|
e710291502eb3d1b4001e3811f7b7d105af82c86
|
[
"MIT"
] | 1
|
2021-07-14T13:24:05.000Z
|
2021-07-14T13:24:05.000Z
|
#!/usr/bin/env python
import inspect
import functools
import sys
import weakref
import re
from libmirheo import *
from libmirheo import __file__ as _libmirheo_file # For `make make_and_copy`.
__all__ = ["version", "tools", "_libmirheo_file"]
# Global variable for the mirheo coordination class
# Used in decorators to access compute task status
# This variable made a weak reference to not prevent
# cleanup of the simulation
__coordinator = None
# Wrap the __init__ or __new__ method of all the simulation handlers and particle vectors
# If we are not a compute task, just return None
# pass the state if needState is True
def decorate_object(f, needState = True):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
global __coordinator
if __coordinator is None:
raise Exception('No coordinator created yet!')
if __coordinator().isComputeTask():
if needState:
return f(self, __coordinator().getState(), *args, **kwargs)
else:
return f(self, *args, **kwargs)
else:
return None
return wrapper
# Wrap the creation of the coordinator
def decorate_coordinator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
global __coordinator
f(self, *args, **kwargs)
if __coordinator is not None and __coordinator() is not None:
raise Exception('There can only be one coordinator at a time!')
__coordinator = weakref.ref(self)
return wrapper
# Wrap the registration of the plugins
def decorate_register_plugins(f):
@functools.wraps(f)
def wrapper(self, plugins_tuple):
return f(self, plugins_tuple[0], plugins_tuple[1])
return wrapper
# Wrap the creation of plugins
# Pass the compute task status into the creation function
# Pass the common global state associated to the coordinator
def decorate_plugins(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
global __coordinator
if __coordinator is None:
raise Exception('No coordinator created yet!')
return f(__coordinator().isComputeTask(),
__coordinator().getState(),
*args, **kwargs)
return wrapper
# Make MPI abort the program if an exception occurs
# https://groups.google.com/forum/#!topic/mpi4py/RovYzJ8qkbc
def handle_exception(exc_type, exc_value, exc_traceback):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
sys.stdout.flush()
sys.stderr.flush()
if __coordinator is not None and __coordinator() is not None:
abort()
def __init__():
# Setup exception handling
sys.excepthook = handle_exception
# Wrap everything except for plugins and non-GPU stuff
# Make the __init__ functions return None if we are not a compute task
nonGPU_names = [['Interactions', 'MembraneParameters'],
['Interactions', 'KantorBendingParameters'],
['Interactions', 'JuelicherBendingParameters']]
needing_state = ['Plugins', 'Integrators', 'ParticleVectors',
'Interactions', 'BelongingCheckers', 'Bouncers', 'Walls']
not_needing_state = [['ParticleVectors', 'MembraneMesh'],
['ParticleVectors', 'Mesh']]
classes = {}
submodules = inspect.getmembers(sys.modules[__name__],
lambda member: inspect.ismodule(member)
and 'mirheo' in member.__name__ )
for m in submodules:
classes[m[0]] = inspect.getmembers(sys.modules[m[1].__name__],
lambda member: inspect.isclass(member)
and 'mirheo' in member.__module__ )
for module in classes.keys():
if module != 'Plugins':
for cls in classes[module]:
if [module, cls[0]] not in nonGPU_names:
need_state = module in needing_state
if [module, cls[0]] in not_needing_state:
need_state = False
setattr(cls[1], '__init__', decorate_object(cls[1].__init__, need_state))
setattr(cls[1], '__new__', decorate_object(cls[1].__new__ , need_state))
getattr(cls[1], '__init__').__doc__ = re.sub('state: libmirheo.MirState, ',
'',
getattr(cls[1], '__init__')
.__doc__)
# Now wrap plugins creation
# Also change the names of the function
# by removing the double underscore
for m in submodules:
if m[0] == 'Plugins':
funcs = inspect.getmembers(sys.modules[m[1].__name__],
lambda member: inspect.isbuiltin(member)
and 'mirheo' in member.__module__)
for f in funcs:
if '__create' in f[0]:
newname = f[0][2:]
setattr(m[1], newname, decorate_plugins(f[1]))
getattr(m[1], newname).__doc__ = re.sub('__' + newname, newname, getattr(m[1], newname).__doc__)
getattr(m[1], newname).__doc__ = re.sub('compute_task: bool, ', '', getattr(m[1], newname).__doc__)
# Wrap initialization of the mirheo coordinator
Mirheo.__init__ = decorate_coordinator(Mirheo.__init__)
# Wrap registration of the plugins
Mirheo.registerPlugins = decorate_register_plugins(Mirheo.registerPlugins)
__init__()
| 37.224359
| 119
| 0.58705
|
a44a4ba4fad39204df1a3345c47eddc7bc5f04eb
| 1,385
|
py
|
Python
|
mlp/migrations/0009_auto_20190404_0425.py
|
paleocore/paleocore110
|
754f3248ab22a2996b43bd224bd4ba15462edf7d
|
[
"MIT"
] | null | null | null |
mlp/migrations/0009_auto_20190404_0425.py
|
paleocore/paleocore110
|
754f3248ab22a2996b43bd224bd4ba15462edf7d
|
[
"MIT"
] | 7
|
2020-02-05T20:54:24.000Z
|
2021-12-13T20:13:20.000Z
|
mlp/migrations/0009_auto_20190404_0425.py
|
paleocore/paleocore110
|
754f3248ab22a2996b43bd224bd4ba15462edf7d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-04 04:25
from __future__ import unicode_literals
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mlp', '0008_auto_20190404_0407'),
]
operations = [
migrations.AlterField(
model_name='identificationqualifier',
name='remarks',
field=ckeditor.fields.RichTextField(blank=True, help_text='General remarks about this database record.', null=True, verbose_name='Record Remarks'),
),
migrations.AlterField(
model_name='occurrence',
name='remarks',
field=ckeditor.fields.RichTextField(blank=True, help_text='General remarks about this database record.', null=True, verbose_name='Record Remarks'),
),
migrations.AlterField(
model_name='taxon',
name='remarks',
field=ckeditor.fields.RichTextField(blank=True, help_text='General remarks about this database record.', null=True, verbose_name='Record Remarks'),
),
migrations.AlterField(
model_name='taxonrank',
name='remarks',
field=ckeditor.fields.RichTextField(blank=True, help_text='General remarks about this database record.', null=True, verbose_name='Record Remarks'),
),
]
| 37.432432
| 159
| 0.651264
|
803bda5cd24065fe0c24894144faba1c1f747f0c
| 21,143
|
py
|
Python
|
tests/test_feature.py
|
audeering/audinterface
|
2acb81e210efb9d3e5b085064dada5665ed27b6f
|
[
"MIT"
] | null | null | null |
tests/test_feature.py
|
audeering/audinterface
|
2acb81e210efb9d3e5b085064dada5665ed27b6f
|
[
"MIT"
] | 28
|
2021-04-28T10:02:57.000Z
|
2022-02-07T14:05:02.000Z
|
tests/test_feature.py
|
audeering/audinterface
|
2acb81e210efb9d3e5b085064dada5665ed27b6f
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pandas as pd
import pytest
import audformat
import audinterface
import audiofile as af
SAMPLING_RATE = 8000
NUM_CHANNELS = 2
NUM_FEATURES = 3
NUM_FRAMES = 5
SIGNAL_1D = np.ones((1, SAMPLING_RATE))
SIGNAL_2D = np.ones((NUM_CHANNELS, SAMPLING_RATE))
SEGMENT = audinterface.Segment(
process_func=lambda x, sr:
audinterface.utils.signal_index(
pd.to_timedelta(0),
pd.to_timedelta(x.shape[1] / sr, unit='s') / 2,
)
)
def feature_extractor(signal, _):
return np.ones((NUM_CHANNELS, NUM_FEATURES))
def features_extractor_sliding_window(signal, _, hop_size):
num_time_steps = int(np.ceil(signal.shape[1] / hop_size))
return np.ones((NUM_CHANNELS, NUM_FEATURES, num_time_steps))
def test_feature():
# You have to specify sampling rate with unit == 'samples' and win_dur
with pytest.raises(ValueError):
audinterface.Feature(
feature_names=('o1', 'o2', 'o3'),
sampling_rate=None,
unit='samples',
win_dur=2048,
)
# If no win_dur is given, no error should occur
audinterface.Feature(
feature_names=('o1', 'o2', 'o3'),
unit='samples',
sampling_rate=None,
)
# Only hop_dur is given
with pytest.raises(ValueError):
audinterface.Feature(
feature_names=('o1', 'o2', 'o3'),
hop_dur=0.1,
)
audinterface.Feature(
feature_names=('o1', 'o2', 'o3'),
win_dur=2048,
unit='samples',
sampling_rate=8000,
)
@pytest.mark.parametrize(
'signal, feature, expected',
[
(
SIGNAL_1D,
audinterface.Feature(
feature_names=['f1', 'f2', 'f3'],
process_func=lambda x, sr: np.ones(3),
),
np.ones((1, 3, 1)),
),
(
SIGNAL_1D,
audinterface.Feature(
feature_names=['f1', 'f2', 'f3'],
process_func=lambda x, sr: np.ones((1, 3)),
),
np.ones((1, 3, 1)),
),
(
SIGNAL_1D,
audinterface.Feature(
feature_names=['f1', 'f2', 'f3'],
process_func=lambda x, sr: np.ones((3, 1)),
),
np.ones((1, 3, 1)),
),
(
SIGNAL_1D,
audinterface.Feature(
feature_names=['f1', 'f2', 'f3'],
process_func=lambda x, sr: np.ones((1, 3, 5)),
),
np.ones((1, 3, 5)),
),
(
SIGNAL_2D,
audinterface.Feature(
feature_names=['f1', 'f2', 'f3'],
process_func=lambda x, sr: np.ones((1, 3, 5)),
channels=1,
),
np.ones((1, 3, 5)),
),
(
SIGNAL_2D,
audinterface.Feature(
feature_names=['f1', 'f2', 'f3'],
process_func=lambda x, sr: np.ones((2, 3)),
channels=range(2),
),
np.ones((2, 3, 1)),
),
(
SIGNAL_2D,
audinterface.Feature(
feature_names=['f1', 'f2', 'f3'],
process_func=lambda x, sr: np.ones((2, 3, 5)),
channels=range(2),
),
np.ones((2, 3, 5)),
),
(
SIGNAL_2D,
audinterface.Feature(
feature_names=['f1', 'f2', 'f3'],
process_func=lambda x, sr: np.ones(3),
channels=range(2),
process_func_is_mono=True,
),
np.ones((2, 3, 1)),
),
(
SIGNAL_2D,
audinterface.Feature(
feature_names=['f1', 'f2', 'f3'],
process_func=lambda x, sr: np.ones((1, 3)),
channels=range(2),
process_func_is_mono=True,
),
np.ones((2, 3, 1)),
),
(
SIGNAL_2D,
audinterface.Feature(
feature_names=['f1', 'f2', 'f3'],
process_func=lambda x, sr: np.ones((1, 3, 1)),
channels=range(2),
process_func_is_mono=True,
),
np.ones((2, 3, 1)),
),
(
SIGNAL_2D,
audinterface.Feature(
feature_names=['f1', 'f2', 'f3'],
process_func=lambda x, sr: np.ones((3, 5)),
channels=range(2),
process_func_is_mono=True,
),
np.ones((2, 3, 5)),
),
(
SIGNAL_2D,
audinterface.Feature(
feature_names=['f1', 'f2', 'f3'],
process_func=lambda x, sr: np.ones((1, 3, 5)),
channels=range(2),
process_func_is_mono=True,
),
np.ones((2, 3, 5)),
),
]
)
def test_process_call(signal, feature, expected):
np.testing.assert_array_equal(
feature(signal, SAMPLING_RATE),
expected,
)
@pytest.mark.parametrize(
'start, end, segment',
[
(None, None, None),
(None, None, SEGMENT),
(pd.NaT, pd.NaT, None),
(pd.to_timedelta('0.25s'), None, None),
(pd.to_timedelta('0.25s'), pd.NaT, None),
(None, pd.to_timedelta('0.75s'), None),
(pd.NaT, pd.to_timedelta('0.75s'), None),
(pd.to_timedelta('0.25s'), pd.to_timedelta('0.75s'), None),
]
)
def test_process_file(tmpdir, start, end, segment):
start_org = start
end_org = end
feature = audinterface.Feature(
feature_names=('o1', 'o2', 'o3'),
process_func=feature_extractor,
sampling_rate=None,
channels=range(NUM_CHANNELS),
resample=False,
segment=segment,
verbose=False,
)
y_expected = np.ones((1, NUM_CHANNELS * NUM_FEATURES))
# create test file
root = str(tmpdir.mkdir('wav'))
file = 'file.wav'
path = os.path.join(root, file)
af.write(path, SIGNAL_2D, SAMPLING_RATE)
# test absolute path
start = start_org
end = end_org
y = feature.process_file(path, start=start, end=end)
if start is None or pd.isna(start):
start = pd.to_timedelta(0)
if end is None or pd.isna(end):
end = pd.to_timedelta(af.duration(path), unit='s')
if segment is not None:
index = segment.process_file(path)
start = index[0][1]
end = index[0][2]
assert y.index.levels[0][0] == path
assert y.index.levels[1][0] == start
assert y.index.levels[2][0] == end
np.testing.assert_array_equal(y, y_expected)
# test relative path
start = start_org
end = end_org
y = feature.process_file(file, start=start, end=end, root=root)
if start is None or pd.isna(start):
start = pd.to_timedelta(0)
if end is None or pd.isna(end):
end = pd.to_timedelta(af.duration(path), unit='s')
if segment is not None:
index = segment.process_file(file, root=root)
start = index[0][1]
end = index[0][2]
assert y.index.levels[0][0] == file
assert y.index.levels[1][0] == start
assert y.index.levels[2][0] == end
np.testing.assert_array_equal(y, y_expected)
def test_process_folder(tmpdir):
index = audinterface.utils.signal_index(0, 1)
feature = audinterface.Feature(
feature_names=('o1', 'o2', 'o3'),
process_func=feature_extractor,
sampling_rate=None,
channels=range(NUM_CHANNELS),
resample=False,
verbose=False,
)
path = str(tmpdir.mkdir('wav'))
files = [
os.path.join(path, f'file{n}.wav') for n in range(3)
]
for file in files:
af.write(file, SIGNAL_2D, SAMPLING_RATE)
y = feature.process_folder(path)
y_expected = np.ones((3, NUM_CHANNELS * NUM_FEATURES))
assert all(y.index.levels[0] == files)
assert all(y.index.levels[1] == index.levels[0])
assert all(y.index.levels[2] == index.levels[1])
np.testing.assert_array_equal(y.values, y_expected)
def test_process_func_args():
def process_func(s, sr, arg1, arg2):
assert arg1 == 'foo'
assert arg2 == 'bar'
audinterface.Feature(
feature_names=('o1', 'o2', 'o3'),
process_func=process_func,
process_func_args={
'arg1': 'foo',
'arg2': 'bar',
}
)
with pytest.warns(UserWarning):
audinterface.Feature(
feature_names=('o1', 'o2', 'o3'),
process_func=process_func,
arg1='foo',
arg2='bar',
)
@pytest.mark.parametrize(
'process_func, num_feat, signal, start, end, expand, expected',
[
# no process function
(
None,
3,
SIGNAL_2D,
None,
None,
False,
np.zeros((1, 2 * 3)),
),
# 1 channel, 1 feature
(
lambda s, sr: 1,
1,
SIGNAL_1D,
None,
None,
False,
np.ones((1, 1)),
),
(
lambda s, sr: np.ones(1),
1,
SIGNAL_1D,
None,
None,
False,
np.ones((1, 1)),
),
# 1 channel, 1 feature
(
lambda s, sr: [1],
1,
SIGNAL_1D,
None,
None,
False,
np.ones((1, 1)),
),
(
lambda s, sr: np.ones((1, 1)),
1,
SIGNAL_1D,
None,
None,
False,
np.ones((1, 1)),
),
# 1 channel, 3 features
(
lambda s, sr: [1, 1, 1],
3,
SIGNAL_1D,
None,
None,
False,
np.ones((1, 3)),
),
(
lambda s, sr: np.ones(3),
3,
SIGNAL_1D,
None,
None,
False,
np.ones((1, 3)),
),
# 1 channel, 3 features
(
lambda s, sr: [[1, 1, 1]],
3,
SIGNAL_1D,
None,
None,
False,
np.ones((1, 3)),
),
(
lambda s, sr: np.ones((1, 3)),
3,
SIGNAL_1D,
None,
None,
False,
np.ones((1, 3)),
),
# 2 channels, 1 feature
(
lambda s, sr: [[1], [1]],
1,
SIGNAL_2D,
None,
None,
False,
np.ones((1, 2)),
),
(
lambda s, sr: np.ones((2, 1)),
1,
SIGNAL_2D,
None,
None,
False,
np.ones((1, 2)),
),
# 2 channels, 3 features
(
lambda s, sr: [[1, 1, 1], [1, 1, 1]],
3,
SIGNAL_2D,
None,
None,
False,
np.ones((1, 2 * 3)),
),
(
lambda s, sr: np.ones((2, 3)),
3,
SIGNAL_2D,
None,
None,
False,
np.ones((1, 2 * 3)),
),
# 2 channels, 3 features + start, end
(
lambda s, sr: np.ones((2, 3)),
3,
SIGNAL_2D,
pd.to_timedelta('1s'),
pd.to_timedelta('10s'),
False,
np.ones((1, 2 * 3)),
),
# 2 channels, 3 features, 5 frames
(
lambda s, sr: [[[1] * 5] * 3] * 2,
3,
SIGNAL_2D,
None,
None,
False,
np.ones((5, 2 * 3)),
),
(
lambda s, sr: np.ones((2, 3, 5)),
3,
SIGNAL_2D,
None,
None,
False,
np.ones((5, 2 * 3)),
),
# 1 channel, 1 feature + mono processing
(
lambda s, sr: 1,
1,
SIGNAL_1D,
None,
None,
True,
np.ones((1, 1)),
),
(
lambda s, sr: np.ones(1),
1,
SIGNAL_1D,
None,
None,
True,
np.ones((1, 1)),
),
(
lambda s, sr: np.ones((1, 1)),
1,
SIGNAL_1D,
None,
None,
True,
np.ones((1, 1)),
),
# 2 channels, 1 feature + mono processing
(
lambda s, sr: [1],
1,
SIGNAL_2D,
None,
None,
True,
np.ones((1, 2)),
),
(
lambda s, sr: np.ones(1),
1,
SIGNAL_2D,
None,
None,
True,
np.ones((1, 2)),
),
(
lambda s, sr: np.ones((1, 1)),
1,
SIGNAL_2D,
None,
None,
True,
np.ones((1, 2)),
),
# 2 channels, 3 features + mono processing
(
lambda s, sr: [1, 1, 1],
3,
SIGNAL_2D,
None,
None,
True,
np.ones((1, 2 * 3)),
),
(
lambda s, sr: np.ones(3),
3,
SIGNAL_2D,
None,
None,
True,
np.ones((1, 2 * 3)),
),
(
lambda s, sr: np.ones((1, 3, 1)),
3,
SIGNAL_2D,
None,
None,
True,
np.ones((1, 2 * 3)),
),
# 2 channels, 3 features, 5 frames + mono processing
(
lambda s, sr: [
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
],
3,
SIGNAL_2D,
None,
None,
True,
np.ones((5, 2 * 3)),
),
(
lambda s, sr: np.ones((3, 5)),
3,
SIGNAL_2D,
None,
None,
True,
np.ones((5, 2 * 3)),
),
(
lambda s, sr: np.ones((1, 3, 5)),
3,
SIGNAL_2D,
None,
None,
True,
np.ones((5, 2 * 3)),
),
# Feature extractor function returns too less dimensions
pytest.param(
lambda s, sr: np.ones(1),
3,
SIGNAL_2D,
None,
None,
None,
False,
marks=pytest.mark.xfail(raises=RuntimeError),
),
# Feature extractor function returns too many dimensions
pytest.param(
lambda s, sr: np.ones((1, 1, 1, 1)),
3,
SIGNAL_2D,
None,
None,
None,
False,
marks=pytest.mark.xfail(raises=RuntimeError),
),
# Feature extractor function returns wrong number of channels
pytest.param(
lambda s, sr: np.ones((1, 3)),
3,
SIGNAL_2D,
None,
None,
None,
False,
marks=pytest.mark.xfail(raises=RuntimeError),
),
# Feature extractor function returns wrong number of channels
pytest.param(
lambda s, sr: np.ones((2 + 1, 3)),
3,
SIGNAL_2D,
None,
None,
None,
False,
marks=pytest.mark.xfail(raises=RuntimeError),
),
# Feature extractor function returns wrong number of features
pytest.param(
lambda s, sr: np.ones((2, 3 + 1)),
3,
SIGNAL_2D,
None,
None,
None,
False,
marks=pytest.mark.xfail(raises=RuntimeError),
),
# Feature extractor function returns wrong number of features
pytest.param(
lambda s, sr: np.ones((2, 3 + 1, 1)),
3,
SIGNAL_2D,
None,
None,
None,
False,
marks=pytest.mark.xfail(raises=RuntimeError),
),
]
)
def test_process_signal(
process_func, num_feat, signal, start, end, expand, expected,
):
feature = audinterface.Feature(
feature_names=[f'f{i}' for i in range(num_feat)],
process_func=process_func,
channels=range(signal.shape[0]),
process_func_is_mono=expand,
win_dur=1,
)
y = feature.process_signal(
signal,
SAMPLING_RATE,
start=start,
end=end,
)
np.testing.assert_array_equal(y.values, expected)
@pytest.mark.parametrize(
'index,expected_features',
[
(
audinterface.utils.signal_index(
[pd.to_timedelta('0s'), pd.to_timedelta('1s')],
[pd.to_timedelta('2s'), pd.to_timedelta('3s')],
),
np.ones((2, NUM_CHANNELS * NUM_FEATURES)),
),
],
)
def test_process_signal_from_index(index, expected_features):
extractor = audinterface.Feature(
feature_names=('o1', 'o2', 'o3'),
process_func=feature_extractor,
channels=range(NUM_CHANNELS),
)
features = extractor.process_signal_from_index(
SIGNAL_2D,
SAMPLING_RATE,
index,
)
np.testing.assert_array_equal(features.values, expected_features)
def test_process_index(tmpdir):
feature = audinterface.Feature(
feature_names=('o1', 'o2', 'o3'),
process_func=feature_extractor,
channels=range(NUM_CHANNELS),
)
# empty
index = audformat.segmented_index()
y = feature.process_index(index)
assert y.empty
assert y.columns.tolist() == feature.column_names
# non-empty
# create file
root = str(tmpdir.mkdir('wav'))
file = 'file.wav'
path = os.path.join(root, file)
af.write(path, SIGNAL_2D, SAMPLING_RATE)
y_expected = np.ones((2, NUM_CHANNELS * NUM_FEATURES))
# absolute paths
index = audformat.segmented_index([path] * 2, [0, 1], [2, 3])
y = feature.process_index(index)
assert y.index.get_level_values('file')[0] == path
np.testing.assert_array_equal(y.values, y_expected)
assert y.columns.tolist() == feature.column_names
# relative paths
index = audformat.segmented_index([file] * 2, [0, 1], [2, 3])
y = feature.process_index(index, root=root)
assert y.index.get_level_values('file')[0] == file
np.testing.assert_array_equal(y.values, y_expected)
assert y.columns.tolist() == feature.column_names
@pytest.mark.parametrize(
'win_dur, hop_dur, unit',
[
(1, 0.5, 'seconds'),
(1, None, 'seconds'),
(16000, None, 'samples'),
(1000, 500, 'milliseconds'),
(SAMPLING_RATE, SAMPLING_RATE // 2, 'samples'),
pytest.param( # multiple frames, but win_dur is None
None, None, 'seconds',
marks=pytest.mark.xfail(raises=RuntimeError),
),
],
)
def test_signal_sliding_window(win_dur, hop_dur, unit):
# Test sliding window with two time steps
expected_features = np.ones((NUM_CHANNELS, 2 * NUM_FEATURES))
extractor = audinterface.Feature(
feature_names=('o1', 'o2', 'o3'),
process_func=features_extractor_sliding_window,
process_func_args={
'hop_size': SAMPLING_RATE // 2, # argument to process_func
},
channels=range(NUM_CHANNELS),
win_dur=win_dur,
hop_dur=hop_dur,
sampling_rate=SAMPLING_RATE,
unit=unit,
)
features = extractor.process_signal(
SIGNAL_2D,
SAMPLING_RATE,
)
n_time_steps = len(features)
if unit == 'samples':
win_dur = win_dur / SAMPLING_RATE
if hop_dur is not None:
hop_dur /= SAMPLING_RATE
unit = 'seconds'
if hop_dur is None:
hop_dur = win_dur / 2
starts = pd.timedelta_range(
pd.to_timedelta(0),
freq=pd.to_timedelta(hop_dur, unit=unit),
periods=n_time_steps,
)
ends = starts + pd.to_timedelta(win_dur, unit=unit)
index = audinterface.utils.signal_index(starts, ends)
pd.testing.assert_frame_equal(
features,
pd.DataFrame(
expected_features,
index=index,
columns=extractor.column_names,
),
)
def test_to_numpy():
expected_features = np.ones((NUM_CHANNELS, NUM_FEATURES, 1))
extractor = audinterface.Feature(
feature_names=('o1', 'o2', 'o3'),
process_func=feature_extractor,
channels=range(NUM_CHANNELS),
)
features = extractor.process_signal(
SIGNAL_2D,
SAMPLING_RATE,
)
features = extractor.to_numpy(features)
np.testing.assert_array_equal(features, expected_features)
| 26.134734
| 74
| 0.471078
|
a120e9928945797ca6044b948b5281d9b1da6c62
| 768
|
py
|
Python
|
alipay/aop/api/response/KoubeiMarketingDataBizadviserMemberprofileQueryResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/KoubeiMarketingDataBizadviserMemberprofileQueryResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
alipay/aop/api/response/KoubeiMarketingDataBizadviserMemberprofileQueryResponse.py
|
articuly/alipay-sdk-python-all
|
0259cd28eca0f219b97dac7f41c2458441d5e7a6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class KoubeiMarketingDataBizadviserMemberprofileQueryResponse(AlipayResponse):
def __init__(self):
super(KoubeiMarketingDataBizadviserMemberprofileQueryResponse, self).__init__()
self._result = None
@property
def result(self):
return self._result
@result.setter
def result(self, value):
self._result = value
def parse_response_content(self, response_content):
response = super(KoubeiMarketingDataBizadviserMemberprofileQueryResponse, self).parse_response_content(response_content)
if 'result' in response:
self.result = response['result']
| 29.538462
| 128
| 0.731771
|
bc26e255c1805c2983f54df474992f5ad7765823
| 8,859
|
py
|
Python
|
tools/c7n_azure/tests/test_app_service_plan.py
|
gileshinchcliff/cloud-custodian
|
54e548af02f3afe0919dcce21f0796212f5d1a4f
|
[
"Apache-2.0"
] | null | null | null |
tools/c7n_azure/tests/test_app_service_plan.py
|
gileshinchcliff/cloud-custodian
|
54e548af02f3afe0919dcce21f0796212f5d1a4f
|
[
"Apache-2.0"
] | 79
|
2019-03-20T12:27:06.000Z
|
2019-08-14T14:07:04.000Z
|
tools/c7n_azure/tests/test_app_service_plan.py
|
gileshinchcliff/cloud-custodian
|
54e548af02f3afe0919dcce21f0796212f5d1a4f
|
[
"Apache-2.0"
] | 2
|
2019-04-22T15:20:23.000Z
|
2019-08-27T12:37:51.000Z
|
# Copyright 2015-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from azure.mgmt.web import WebSiteManagementClient
from azure_common import BaseTest, arm_template, cassette_name
from c7n_azure.session import Session
from jsonschema import ValidationError
from mock import patch
from c7n.utils import local_session
class AppServicePlanTest(BaseTest):
def setUp(self):
super(AppServicePlanTest, self).setUp()
self.session = local_session(Session)
self.client = local_session(Session).client(
'azure.mgmt.web.WebSiteManagementClient') # type: WebSiteManagementClient
def test_app_service_plan_schema_validate(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-azure-appserviceplan-win',
'resource': 'azure.appserviceplan',
'filters': [
{'type': 'offhour',
'default_tz': "pt",
'offhour': 18,
'tag': 'schedule'},
{'type': 'onhour',
'default_tz': "pt",
'onhour': 18,
'tag': 'schedule'}],
'actions': [
{'type': 'resize-plan',
'size': 'F1'}],
}, validate=True)
self.assertTrue(p)
# size and count are missing
with self.assertRaises(ValidationError):
self.load_policy({
'name': 'test-azure-appserviceplan',
'resource': 'azure.appserviceplan',
'actions': [
{'type': 'resize-plan'}
]
}, validate=True)
@patch('azure.mgmt.web.operations.app_service_plans_operations.'
'AppServicePlansOperations.update')
@arm_template('appserviceplan.json')
@cassette_name('window_plans')
def test_resize_plan_win(self, update_mock):
p = self.load_policy({
'name': 'test-azure-appserviceplan-win',
'resource': 'azure.appserviceplan',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctest-appserviceplan-win'},
{'type': 'value',
'key': 'sku.name',
'op': 'eq',
'value': 'S1'}
],
'actions': [
{'type': 'resize-plan',
'size': 'B1',
'count': 2}]
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
name, args, kwargs = update_mock.mock_calls[0]
self.assertEqual('cctest-appserviceplan-win', args[1])
self.assertEqual('B1', args[2].sku.name)
self.assertEqual('BASIC', args[2].sku.tier)
self.assertEqual(2, args[2].sku.capacity)
@patch('azure.mgmt.web.operations.app_service_plans_operations.'
'AppServicePlansOperations.update')
@arm_template('appserviceplan-linux.json')
@cassette_name('linux_plans')
def test_resize_plan_linux(self, update_mock):
p = self.load_policy({
'name': 'test-azure-appserviceplan-linux',
'resource': 'azure.appserviceplan',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctest-appserviceplan-linux'},
{'type': 'value',
'key': 'sku.name',
'op': 'eq',
'value': 'S1'}
],
'actions': [
{'type': 'resize-plan',
'size': 'B1',
'count': 3}]
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
name, args, kwargs = update_mock.mock_calls[0]
self.assertEqual('cctest-appserviceplan-linux', args[1])
self.assertEqual('B1', args[2].sku.name)
self.assertEqual('BASIC', args[2].sku.tier)
self.assertEqual(3, args[2].sku.capacity)
@patch('azure.mgmt.web.operations.app_service_plans_operations.'
'AppServicePlansOperations.update')
@arm_template('appserviceplan.json')
@cassette_name('window_plans')
def test_resize_plan_from_resource_tag(self, update_mock):
p = self.load_policy({
'name': 'test-azure-appserviceplan',
'resource': 'azure.appserviceplan',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctest-appserviceplan-win'}],
'actions': [
{'type': 'resize-plan',
'size': {
'type': 'resource',
'key': 'tags.sku'
}}],
})
resources = p.run()
self.assertEqual(1, len(resources))
name, args, kwargs = update_mock.mock_calls[0]
self.assertEqual('cctest-appserviceplan-win', args[1])
self.assertEqual('B1', args[2].sku.name)
self.assertEqual('BASIC', args[2].sku.tier)
@arm_template('appserviceplan.json')
@patch('c7n_azure.resources.appserviceplan.ResizePlan.log.info')
@cassette_name('window_plans')
def test_resize_consumption_win(self, logger):
p = self.load_policy({
'name': 'test-azure-consumption-win',
'resource': 'azure.appserviceplan',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctest-consumption-win'}
],
'actions': [
{'type': 'resize-plan',
'size': 'F1'}]
}, validate=True)
p.run()
logger.assert_called_once_with(
'Skipping cctest-consumption-win, '
'because this App Service Plan is for Consumption Azure Functions.')
@arm_template('appserviceplan-linux.json')
@patch('c7n_azure.resources.appserviceplan.ResizePlan.log.info')
@cassette_name('linux_plans')
def test_resize_consumption_linux(self, logger):
p = self.load_policy({
'name': 'test-azure-appserviceplan-linux',
'resource': 'azure.appserviceplan',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctest-consumption-linux'}
],
'actions': [
{'type': 'resize-plan',
'size': 'F1'}]
}, validate=True)
p.run()
logger.assert_called_once_with(
'Skipping cctest-consumption-linux, '
'because this App Service Plan is for Consumption Azure Functions.')
@patch('azure.mgmt.web.operations.app_service_plans_operations.'
'AppServicePlansOperations.update')
@arm_template('appserviceplan.json')
@cassette_name('window_plans')
def test_resize_plan_win_only_count(self, update_mock):
p = self.load_policy({
'name': 'test-azure-appserviceplan-win',
'resource': 'azure.appserviceplan',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'eq',
'value_type': 'normalize',
'value': 'cctest-appserviceplan-win'},
{'type': 'value',
'key': 'sku.name',
'op': 'eq',
'value': 'S1'}
],
'actions': [
{'type': 'resize-plan',
'count': 3}]
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
name, args, kwargs = update_mock.mock_calls[0]
self.assertEqual('cctest-appserviceplan-win', args[1])
self.assertEqual('S1', args[2].sku.name)
self.assertEqual('Standard', args[2].sku.tier)
self.assertEqual(3, args[2].sku.capacity)
| 37.538136
| 86
| 0.533243
|
8c6f1b75e357096cec3af31b761bd4482fcf25b4
| 431
|
py
|
Python
|
bioresources/tables.py
|
ezequieljsosa/sndg-web
|
7763c8fbc83dc92abb9c53326e2fe227bcabf607
|
[
"MIT"
] | null | null | null |
bioresources/tables.py
|
ezequieljsosa/sndg-web
|
7763c8fbc83dc92abb9c53326e2fe227bcabf607
|
[
"MIT"
] | null | null | null |
bioresources/tables.py
|
ezequieljsosa/sndg-web
|
7763c8fbc83dc92abb9c53326e2fe227bcabf607
|
[
"MIT"
] | null | null | null |
import django_tables2 as tables
from .models import Publication
class PublicationTable(tables.Table):
# counter = tables.TemplateColumn("{{ row_counter }}")
class Meta:
model = Publication
template_name = 'django_tables2/bootstrap.html'
exclude = ["type", "pubmed_id", "electronic_id", "scopus_id", "issn",
"ncbi_tax", "deprecated", "created_at", "updated_at","resource_ptr"]
| 30.785714
| 87
| 0.663573
|
b3c8c2f8ea3fd36a063ed33c7eaa0b32d20f804d
| 10,589
|
py
|
Python
|
tests/test_api.py
|
tirkarthi/importlib_metadata
|
81bccbce14dcddffa128b155260e9bd4e3dcc4c7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_api.py
|
tirkarthi/importlib_metadata
|
81bccbce14dcddffa128b155260e9bd4e3dcc4c7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_api.py
|
tirkarthi/importlib_metadata
|
81bccbce14dcddffa128b155260e9bd4e3dcc4c7
|
[
"Apache-2.0"
] | 1
|
2021-01-09T20:02:06.000Z
|
2021-01-09T20:02:06.000Z
|
import re
import textwrap
import unittest
import warnings
import importlib
from . import fixtures
from importlib_metadata import (
Distribution,
PackageNotFoundError,
distribution,
entry_points,
files,
metadata,
requires,
version,
)
class APITests(
fixtures.EggInfoPkg,
fixtures.DistInfoPkg,
fixtures.DistInfoPkgWithDot,
fixtures.EggInfoFile,
unittest.TestCase,
):
version_pattern = r'\d+\.\d+(\.\d)?'
def test_retrieves_version_of_self(self):
pkg_version = version('egginfo-pkg')
assert isinstance(pkg_version, str)
assert re.match(self.version_pattern, pkg_version)
def test_retrieves_version_of_distinfo_pkg(self):
pkg_version = version('distinfo-pkg')
assert isinstance(pkg_version, str)
assert re.match(self.version_pattern, pkg_version)
def test_for_name_does_not_exist(self):
with self.assertRaises(PackageNotFoundError):
distribution('does-not-exist')
def test_name_normalization(self):
names = 'pkg.dot', 'pkg_dot', 'pkg-dot', 'pkg..dot', 'Pkg.Dot'
for name in names:
with self.subTest(name):
assert distribution(name).metadata['Name'] == 'pkg.dot'
def test_prefix_not_matched(self):
prefixes = 'p', 'pkg', 'pkg.'
for prefix in prefixes:
with self.subTest(prefix):
with self.assertRaises(PackageNotFoundError):
distribution(prefix)
def test_for_top_level(self):
self.assertEqual(
distribution('egginfo-pkg').read_text('top_level.txt').strip(), 'mod'
)
def test_read_text(self):
top_level = [
path for path in files('egginfo-pkg') if path.name == 'top_level.txt'
][0]
self.assertEqual(top_level.read_text(), 'mod\n')
def test_entry_points(self):
eps = entry_points()
assert 'entries' in eps.groups
entries = eps.select(group='entries')
assert 'main' in entries.names
ep = entries['main']
self.assertEqual(ep.value, 'mod:main')
self.assertEqual(ep.extras, [])
def test_entry_points_distribution(self):
entries = entry_points(group='entries')
for entry in ("main", "ns:sub"):
ep = entries[entry]
self.assertIn(ep.dist.name, ('distinfo-pkg', 'egginfo-pkg'))
self.assertEqual(ep.dist.version, "1.0.0")
def test_entry_points_unique_packages(self):
"""
Entry points should only be exposed for the first package
on sys.path with a given name.
"""
alt_site_dir = self.fixtures.enter_context(fixtures.tempdir())
self.fixtures.enter_context(self.add_sys_path(alt_site_dir))
alt_pkg = {
"distinfo_pkg-1.1.0.dist-info": {
"METADATA": """
Name: distinfo-pkg
Version: 1.1.0
""",
"entry_points.txt": """
[entries]
main = mod:altmain
""",
},
}
fixtures.build_files(alt_pkg, alt_site_dir)
entries = entry_points(group='entries')
assert not any(
ep.dist.name == 'distinfo-pkg' and ep.dist.version == '1.0.0'
for ep in entries
)
# ns:sub doesn't exist in alt_pkg
assert 'ns:sub' not in entries
def test_entry_points_missing_name(self):
with self.assertRaises(KeyError):
entry_points(group='entries')['missing']
def test_entry_points_missing_group(self):
assert entry_points(group='missing') == ()
def test_entry_points_dict_construction(self):
"""
Prior versions of entry_points() returned simple lists and
allowed casting those lists into maps by name using ``dict()``.
Capture this now deprecated use-case.
"""
with warnings.catch_warnings(record=True) as caught:
eps = dict(entry_points(group='entries'))
assert 'main' in eps
assert eps['main'] == entry_points(group='entries')['main']
# check warning
expected = next(iter(caught))
assert expected.category is DeprecationWarning
assert "Construction of dict of EntryPoints is deprecated" in str(expected)
def test_entry_points_groups_getitem(self):
"""
Prior versions of entry_points() returned a dict. Ensure
that callers using '.__getitem__()' are supported but warned to
migrate.
"""
with warnings.catch_warnings(record=True):
entry_points()['entries'] == entry_points(group='entries')
with self.assertRaises(KeyError):
entry_points()['missing']
def test_entry_points_groups_get(self):
"""
Prior versions of entry_points() returned a dict. Ensure
that callers using '.get()' are supported but warned to
migrate.
"""
with warnings.catch_warnings(record=True):
entry_points().get('missing', 'default') == 'default'
entry_points().get('entries', 'default') == entry_points()['entries']
entry_points().get('missing', ()) == ()
def test_metadata_for_this_package(self):
md = metadata('egginfo-pkg')
assert md['author'] == 'Steven Ma'
assert md['LICENSE'] == 'Unknown'
assert md['Name'] == 'egginfo-pkg'
classifiers = md.get_all('Classifier')
assert 'Topic :: Software Development :: Libraries' in classifiers
def test_importlib_metadata_version(self):
resolved = version('importlib-metadata')
assert re.match(self.version_pattern, resolved)
@staticmethod
def _test_files(files):
root = files[0].root
for file in files:
assert file.root == root
assert not file.hash or file.hash.value
assert not file.hash or file.hash.mode == 'sha256'
assert not file.size or file.size >= 0
assert file.locate().exists()
assert isinstance(file.read_binary(), bytes)
if file.name.endswith('.py'):
file.read_text()
def test_file_hash_repr(self):
try:
assertRegex = self.assertRegex
except AttributeError:
# Python 2
assertRegex = self.assertRegexpMatches
util = [p for p in files('distinfo-pkg') if p.name == 'mod.py'][0]
assertRegex(repr(util.hash), '<FileHash mode: sha256 value: .*>')
def test_files_dist_info(self):
self._test_files(files('distinfo-pkg'))
def test_files_egg_info(self):
self._test_files(files('egginfo-pkg'))
def test_version_egg_info_file(self):
self.assertEqual(version('egginfo-file'), '0.1')
def test_requires_egg_info_file(self):
requirements = requires('egginfo-file')
self.assertIsNone(requirements)
def test_requires_egg_info(self):
deps = requires('egginfo-pkg')
assert len(deps) == 2
assert any(dep == 'wheel >= 1.0; python_version >= "2.7"' for dep in deps)
def test_requires_dist_info(self):
deps = requires('distinfo-pkg')
assert len(deps) == 2
assert all(deps)
assert 'wheel >= 1.0' in deps
assert "pytest; extra == 'test'" in deps
def test_more_complex_deps_requires_text(self):
requires = textwrap.dedent(
"""
dep1
dep2
[:python_version < "3"]
dep3
[extra1]
dep4
[extra2:python_version < "3"]
dep5
"""
)
deps = sorted(Distribution._deps_from_requires_text(requires))
expected = [
'dep1',
'dep2',
'dep3; python_version < "3"',
'dep4; extra == "extra1"',
'dep5; (python_version < "3") and extra == "extra2"',
]
# It's important that the environment marker expression be
# wrapped in parentheses to avoid the following 'and' binding more
# tightly than some other part of the environment expression.
assert deps == expected
def test_as_json(self):
md = metadata('distinfo-pkg').json
assert 'name' in md
assert md['keywords'] == ['sample', 'package']
desc = md['description']
assert desc.startswith('Once upon a time\nThere was')
assert len(md['requires_dist']) == 2
def test_as_json_egg_info(self):
md = metadata('egginfo-pkg').json
assert 'name' in md
assert md['keywords'] == ['sample', 'package']
desc = md['description']
assert desc.startswith('Once upon a time\nThere was')
assert len(md['classifier']) == 2
def test_as_json_odd_case(self):
self.make_uppercase()
md = metadata('distinfo-pkg').json
assert 'name' in md
assert len(md['requires_dist']) == 2
assert md['keywords'] == ['SAMPLE', 'PACKAGE']
class LegacyDots(fixtures.DistInfoPkgWithDotLegacy, unittest.TestCase):
def test_name_normalization(self):
names = 'pkg.dot', 'pkg_dot', 'pkg-dot', 'pkg..dot', 'Pkg.Dot'
for name in names:
with self.subTest(name):
assert distribution(name).metadata['Name'] == 'pkg.dot'
def test_name_normalization_versionless_egg_info(self):
names = 'pkg.lot', 'pkg_lot', 'pkg-lot', 'pkg..lot', 'Pkg.Lot'
for name in names:
with self.subTest(name):
assert distribution(name).metadata['Name'] == 'pkg.lot'
class OffSysPathTests(fixtures.DistInfoPkgOffPath, unittest.TestCase):
def test_find_distributions_specified_path(self):
dists = Distribution.discover(path=[str(self.site_dir)])
assert any(dist.metadata['Name'] == 'distinfo-pkg' for dist in dists)
def test_distribution_at_pathlib(self):
"""Demonstrate how to load metadata direct from a directory."""
dist_info_path = self.site_dir / 'distinfo_pkg-1.0.0.dist-info'
dist = Distribution.at(dist_info_path)
assert dist.version == '1.0.0'
def test_distribution_at_str(self):
dist_info_path = self.site_dir / 'distinfo_pkg-1.0.0.dist-info'
dist = Distribution.at(str(dist_info_path))
assert dist.version == '1.0.0'
class InvalidateCache(unittest.TestCase):
def test_invalidate_cache(self):
# No externally observable behavior, but ensures test coverage...
importlib.invalidate_caches()
| 34.37987
| 83
| 0.607517
|
99a003bd9b9bbbaee9e690765f38c43c1cdd39f1
| 1,643
|
py
|
Python
|
ietf/nomcom/management/commands/feedback_email.py
|
ekr/ietfdb
|
8d936836b0b9ff31cda415b0a423e3f5b33ab695
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2
|
2021-11-20T03:40:56.000Z
|
2021-11-20T03:40:59.000Z
|
ietf/nomcom/management/commands/feedback_email.py
|
ekr/ietfdb
|
8d936836b0b9ff31cda415b0a423e3f5b33ab695
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
ietf/nomcom/management/commands/feedback_email.py
|
ekr/ietfdb
|
8d936836b0b9ff31cda415b0a423e3f5b33ab695
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
import sys
from django.core.management.base import BaseCommand, CommandError
from ietf.utils.log import log
from ietf.nomcom.models import NomCom
from ietf.nomcom.utils import create_feedback_email
from ietf.nomcom.fields import EncryptedException
import debug # pyflakes:ignore
class Command(BaseCommand):
help = (u"Receive nomcom email, encrypt and save it.")
def add_arguments(self, parser):
parser.add_argument('--nomcom-year', dest='year', help='NomCom year')
parser.add_argument('--email-file', dest='email', help='File containing email (default: stdin)')
def handle(self, *args, **options):
email = options.get('email', None)
year = options.get('year', None)
msg = None
nomcom = None
help_message = 'Usage: feeback_email --nomcom-year <nomcom-year> --email-file <email-file>'
if not year:
log("Error: missing nomcom-year")
raise CommandError("Missing nomcom-year\n\n"+help_message)
if not email:
msg = sys.stdin.read()
else:
msg = open(email, "r").read()
try:
nomcom = NomCom.objects.get(group__acronym__icontains=year,
group__state__slug='active')
except NomCom.DoesNotExist:
raise CommandError("NomCom %s does not exist or it isn't active" % year)
try:
feedback = create_feedback_email(nomcom, msg)
log(u"Received nomcom email from %s" % feedback.author)
except (EncryptedException, ValueError) as e:
raise CommandError(e)
| 35.717391
| 105
| 0.62325
|
9de3ce7b99d1a287bcccde2084c4033fa5a9e7c0
| 505
|
py
|
Python
|
setup.py
|
sarusso/Timeseria
|
fdf9990ab68e20f75a64f090a2c43979266dcba9
|
[
"Apache-2.0"
] | 8
|
2021-01-02T17:43:13.000Z
|
2022-02-22T09:07:22.000Z
|
setup.py
|
sarusso/Timeseria
|
fdf9990ab68e20f75a64f090a2c43979266dcba9
|
[
"Apache-2.0"
] | 20
|
2020-07-15T11:29:41.000Z
|
2022-03-29T22:51:52.000Z
|
setup.py
|
sarusso/Timeseria
|
fdf9990ab68e20f75a64f090a2c43979266dcba9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(name='timeseria',
version='0.1.0',
description='A time series processing library',
author='Stefano Alberto Russo',
author_email='stefano.russo@gmail.com',
packages=['timeseria','timeseria.tests', 'timeseria.models'],
package_data={
'timeseria': ['static/css/*.css', 'static/js/*.js'],
'timeseria.tests': ['test_data/csv/*.csv']
},
license='LICENSE',
)
| 29.705882
| 67
| 0.611881
|
c83e2944c8c7061c8e3c967e3092691093b0ee9d
| 1,731
|
py
|
Python
|
fHDHR/api/hub/pages/index_html.py
|
crackers8199/fHDHR_Locast
|
cad9cc0bf64f70bbcd2e702a938794d4eacad6cf
|
[
"WTFPL"
] | null | null | null |
fHDHR/api/hub/pages/index_html.py
|
crackers8199/fHDHR_Locast
|
cad9cc0bf64f70bbcd2e702a938794d4eacad6cf
|
[
"WTFPL"
] | null | null | null |
fHDHR/api/hub/pages/index_html.py
|
crackers8199/fHDHR_Locast
|
cad9cc0bf64f70bbcd2e702a938794d4eacad6cf
|
[
"WTFPL"
] | null | null | null |
from io import StringIO
class Index_HTML():
def __init__(self, settings, device, page_elements):
self.config = settings
self.device = device
self.page_elements = page_elements
def get_index_html(self, base_url, force_update=False):
fakefile = StringIO()
page_elements = self.page_elements.get()
for line in page_elements["top"]:
fakefile.write(line + "\n")
fakefile.write("<h4 style=\"text-align: center;\">fHDHR Status</h4>")
fakefile.write("\n")
fakefile.write("<table class=\"center\" style=\"width:50%\">\n")
fakefile.write(" <tr>\n")
fakefile.write(" <th></th>\n")
fakefile.write(" <th></th>\n")
fakefile.write(" </tr>\n")
total_channels = self.device.channels.get_station_total()
tuners_in_use = self.device.tuners.inuse_tuner_count()
max_tuners = self.device.tuners.max_tuners
tableguts = [
["Script Directory", str(self.config.dict["filedir"]["script_dir"])],
["Config File", str(self.config.config_file)],
["Cache Path", str(self.config.dict["filedir"]["cache_dir"])],
["Total Channels", str(total_channels)],
["Tuner Usage", "%s/%s" % (str(tuners_in_use), str(max_tuners))]
]
for guts in tableguts:
fakefile.write(" <tr>\n")
fakefile.write(" <td>%s</td>\n" % (guts[0]))
fakefile.write(" <td>%s</td>\n" % (guts[1]))
fakefile.write(" </tr>\n")
for line in page_elements["end"]:
fakefile.write(line + "\n")
return fakefile.getvalue()
| 33.941176
| 89
| 0.548816
|
0b781cd041d9259d086935e7dca0c59a3dafc18d
| 2,218
|
py
|
Python
|
tests/run_ligpargen_regression_tests.py
|
mc-robinson/LigParGen
|
15eaf88e0aff5136904691cd14f6b04b4820b9f7
|
[
"MIT"
] | 4
|
2020-01-07T11:57:35.000Z
|
2021-11-04T18:26:44.000Z
|
tests/run_ligpargen_regression_tests.py
|
mc-robinson/LigParGen
|
15eaf88e0aff5136904691cd14f6b04b4820b9f7
|
[
"MIT"
] | null | null | null |
tests/run_ligpargen_regression_tests.py
|
mc-robinson/LigParGen
|
15eaf88e0aff5136904691cd14f6b04b4820b9f7
|
[
"MIT"
] | 4
|
2017-12-21T05:02:08.000Z
|
2020-07-03T05:53:23.000Z
|
# !/usr/bin/env python
# File name: run_ligpargen_regression_tests.py
# Author: Matt Robinson, Jorgensen Lab @ Yale
# Email: matthew.robinson@yale.edu
# Date created: 07/18/2017
# Python Version: 3.6
# to get the module imports to work, need to add .. to python path
import sys, os
testdir = os.path.dirname(__file__)
srcdir = '../LigParGen'
sys.path.insert(0, os.path.abspath(os.path.join(testdir, srcdir)))
# import the package
#import LigParGen
# import the make_single_topology module
#from LigParGen import Converter
# now import all functions
#from LigParGen.Conve import *
import Converter
molecules_list = [['N#Cc1ccccc1', 'BCN', 0],
['COc1ccccc1OC(=O)c1ccccc1', 'ZINC00000349', 0],
['O=C([O-])CCCNC(=O)NC1CCCCC1', 'ZINC08754389', -1],
['CCC(CO)[NH2+]CC[NH2+]C(CC)CO', 'ZINC19364219', 2],
['O=C([O-])c1cc(Br)ccc1[N-]S(=O)(=O)c1ccc(Cl)cc1', 'ZINC35930493', -2]]
#['CC(C)CC[NH2+]CC1COc2ccccc2O1', 'ZINC04214115', 1],
def run_tests():
FILE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)))
# bool that will go to false if one of the tests failed
passed_all_tests = True
for mol in molecules_list:
smiles_code = mol[0]
ZINC_id = mol[1]
charge = mol[2]
zmat_path = os.path.join(FILE_DIR, 'data', ZINC_id + '.z')
mol_path = os.path.join(FILE_DIR, 'data', ZINC_id + '.mol')
pdb_path = os.path.join(FILE_DIR, 'data', ZINC_id + '.pdb')
try:
Converter.convert(smiles=smiles_code, charge=charge, resname='UNK')
except:
print("SMILES CODE FAILED ON " + ZINC_id)
passed_all_tests = False
try:
Converter.convert(pdb=pdb_path, charge=charge, resname='BCN')
except:
print("PDB CODE FAILED ON " + ZINC_id)
passed_all_tests = False
try:
Converter.convert(mol=mol_path, charge=charge, resname='UNK')
except:
print("MOL CODE FAILED ON " + ZINC_id)
passed_all_tests = False
if passed_all_tests:
print('PASSED ALL TESTS')
if __name__ == '__main__':
run_tests()
| 29.573333
| 91
| 0.608656
|
a1e99d58438f875f2552b192338c58abd55a630b
| 83
|
py
|
Python
|
07_Java_Experiment/PyTest/shell/__init__.py
|
Robert-Stackflow/HUST-Courses
|
300752552e7af035b0e5c7663953850c81871242
|
[
"MIT"
] | 4
|
2021-11-01T09:27:32.000Z
|
2022-03-07T14:24:10.000Z
|
07_Java_Experiment/PyTest/shell/__init__.py
|
Robert-Stackflow/HUST-Courses
|
300752552e7af035b0e5c7663953850c81871242
|
[
"MIT"
] | null | null | null |
07_Java_Experiment/PyTest/shell/__init__.py
|
Robert-Stackflow/HUST-Courses
|
300752552e7af035b0e5c7663953850c81871242
|
[
"MIT"
] | null | null | null |
from shell.executor import ShellExecutionResult
from shell.executor import execute
| 27.666667
| 47
| 0.879518
|
ef71665ba08b8d4fd5a4014b85f8462616c2530a
| 17,714
|
py
|
Python
|
test/python/qobj/test_qobj.py
|
meamy/qiskit-terra
|
353918ba2c92b9d1fdda71d9a1d0262be6389c1f
|
[
"Apache-2.0"
] | 1
|
2019-06-04T12:23:36.000Z
|
2019-06-04T12:23:36.000Z
|
test/python/qobj/test_qobj.py
|
meamy/qiskit-terra
|
353918ba2c92b9d1fdda71d9a1d0262be6389c1f
|
[
"Apache-2.0"
] | 35
|
2019-03-07T02:09:22.000Z
|
2022-03-22T19:55:15.000Z
|
test/python/qobj/test_qobj.py
|
meamy/qiskit-terra
|
353918ba2c92b9d1fdda71d9a1d0262be6389c1f
|
[
"Apache-2.0"
] | null | null | null |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Qobj tests."""
import copy
import uuid
import jsonschema
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.compiler import assemble
from qiskit.providers.basicaer import basicaerjob
from qiskit.qobj import (QasmQobj, PulseQobj, QobjHeader,
PulseQobjInstruction, PulseQobjExperiment,
PulseQobjConfig, QobjMeasurementOption,
PulseLibraryItem, QasmQobjInstruction,
QasmQobjExperiment, QasmQobjConfig,
QasmExperimentCalibrations, GateCalibration)
from qiskit.qobj import validate_qobj_against_schema
from qiskit.validation.jsonschema.exceptions import SchemaValidationError
from qiskit.test import QiskitTestCase
from qiskit.test.mock import FakeRueschlikon
class TestQASMQobj(QiskitTestCase):
"""Tests for QasmQobj."""
def setUp(self):
self.valid_qobj = QasmQobj(
qobj_id='12345',
header=QobjHeader(),
config=QasmQobjConfig(shots=1024, memory_slots=2, max_credits=10),
experiments=[
QasmQobjExperiment(instructions=[
QasmQobjInstruction(name='u1', qubits=[1], params=[0.4]),
QasmQobjInstruction(name='u2', qubits=[1], params=[0.4, 0.2])
])
]
)
self.valid_dict = {
'qobj_id': '12345',
'type': 'QASM',
'schema_version': '1.2.0',
'header': {},
'config': {'max_credits': 10, 'memory_slots': 2, 'shots': 1024},
'experiments': [
{'instructions': [
{'name': 'u1', 'params': [0.4], 'qubits': [1]},
{'name': 'u2', 'params': [0.4, 0.2], 'qubits': [1]}
]}
],
}
self.bad_qobj = copy.deepcopy(self.valid_qobj)
self.bad_qobj.experiments = []
def test_to_dict_against_schema(self):
"""Test dictionary representation of Qobj against its schema."""
try:
validate_qobj_against_schema(self.valid_qobj)
except jsonschema.ValidationError as validation_error:
self.fail(str(validation_error))
def test_from_dict_per_class(self):
"""Test Qobj and its subclass representations given a dictionary."""
test_parameters = {
QasmQobj: (
self.valid_qobj,
self.valid_dict
),
QasmQobjConfig: (
QasmQobjConfig(shots=1, memory_slots=2),
{'shots': 1, 'memory_slots': 2}
),
QasmQobjExperiment: (
QasmQobjExperiment(
instructions=[QasmQobjInstruction(name='u1', qubits=[1], params=[0.4])]),
{'instructions': [{'name': 'u1', 'qubits': [1], 'params': [0.4]}]}
),
QasmQobjInstruction: (
QasmQobjInstruction(name='u1', qubits=[1], params=[0.4]),
{'name': 'u1', 'qubits': [1], 'params': [0.4]}
)
}
for qobj_class, (qobj_item, expected_dict) in test_parameters.items():
with self.subTest(msg=str(qobj_class)):
self.assertEqual(qobj_item, qobj_class.from_dict(expected_dict))
def test_snapshot_instruction_to_dict(self):
"""Test snapshot instruction to dict."""
valid_qobj = QasmQobj(
qobj_id='12345',
header=QobjHeader(),
config=QasmQobjConfig(shots=1024, memory_slots=2, max_credits=10),
experiments=[
QasmQobjExperiment(instructions=[
QasmQobjInstruction(name='u1', qubits=[1], params=[0.4]),
QasmQobjInstruction(name='u2', qubits=[1], params=[0.4, 0.2]),
QasmQobjInstruction(name='snapshot', qubits=[1],
snapshot_type='statevector',
label='my_snap')
])
]
)
res = valid_qobj.to_dict(validate=True)
expected_dict = {
'qobj_id': '12345',
'type': 'QASM',
'schema_version': '1.3.0',
'header': {},
'config': {'max_credits': 10, 'memory_slots': 2, 'shots': 1024},
'experiments': [
{'instructions': [
{'name': 'u1', 'params': [0.4], 'qubits': [1]},
{'name': 'u2', 'params': [0.4, 0.2], 'qubits': [1]},
{'name': 'snapshot', 'qubits': [1],
'snapshot_type': 'statevector', 'label': 'my_snap'}
],
'config': {},
'header': {}}
],
}
self.assertEqual(expected_dict, res)
def test_snapshot_instruction_from_dict(self):
"""Test snapshot instruction from dict."""
expected_qobj = QasmQobj(
qobj_id='12345',
header=QobjHeader(),
config=QasmQobjConfig(shots=1024, memory_slots=2, max_credits=10),
experiments=[
QasmQobjExperiment(instructions=[
QasmQobjInstruction(name='u1', qubits=[1], params=[0.4]),
QasmQobjInstruction(name='u2', qubits=[1], params=[0.4, 0.2]),
QasmQobjInstruction(name='snapshot', qubits=[1],
snapshot_type='statevector',
label='my_snap')
])
]
)
qobj_dict = {
'qobj_id': '12345',
'type': 'QASM',
'schema_version': '1.2.0',
'header': {},
'config': {'max_credits': 10, 'memory_slots': 2, 'shots': 1024},
'experiments': [
{'instructions': [
{'name': 'u1', 'params': [0.4], 'qubits': [1]},
{'name': 'u2', 'params': [0.4, 0.2], 'qubits': [1]},
{'name': 'snapshot', 'qubits': [1],
'snapshot_type': 'statevector', 'label': 'my_snap'}
]}
],
}
self.assertEqual(expected_qobj, QasmQobj.from_dict(qobj_dict))
def test_simjob_raises_error_when_sending_bad_qobj(self):
"""Test SimulatorJob is denied resource request access when given an invalid Qobj instance.
"""
job_id = str(uuid.uuid4())
backend = FakeRueschlikon()
self.bad_qobj.header = QobjHeader(backend_name=backend.name())
with self.assertRaises(SchemaValidationError):
job = basicaerjob.BasicAerJob(backend, job_id, _nop, self.bad_qobj)
job.submit()
def test_change_qobj_after_compile(self):
"""Test modifying Qobj parameters after compile."""
qr = QuantumRegister(3)
cr = ClassicalRegister(3)
qc1 = QuantumCircuit(qr, cr)
qc2 = QuantumCircuit(qr, cr)
qc1.h(qr[0])
qc1.cx(qr[0], qr[1])
qc1.cx(qr[0], qr[2])
qc2.h(qr)
qc1.measure(qr, cr)
qc2.measure(qr, cr)
circuits = [qc1, qc2]
qobj1 = assemble(circuits, shots=1024, seed=88)
qobj1.experiments[0].config.shots = 50
qobj1.experiments[1].config.shots = 1
self.assertTrue(qobj1.experiments[0].config.shots == 50)
self.assertTrue(qobj1.experiments[1].config.shots == 1)
self.assertTrue(qobj1.config.shots == 1024)
def test_gate_calibrations_to_dict(self):
"""Test gate calibrations to dict."""
pulse_library = [PulseLibraryItem(name='test', samples=[1j, 1j])]
valid_qobj = QasmQobj(
qobj_id='12345',
header=QobjHeader(),
config=QasmQobjConfig(shots=1024, memory_slots=2, max_credits=10,
pulse_library=pulse_library),
experiments=[
QasmQobjExperiment(
instructions=[
QasmQobjInstruction(name='u1', qubits=[1], params=[0.4])
],
config=QasmQobjConfig(
calibrations=QasmExperimentCalibrations(
gates=[
GateCalibration(name='u1', qubits=[1],
params=[0.4], instructions=[])
]
)
)
)
]
)
res = valid_qobj.to_dict(validate=True)
expected_dict = {
'qobj_id': '12345',
'type': 'QASM',
'schema_version': '1.3.0',
'header': {},
'config': {'max_credits': 10, 'memory_slots': 2, 'shots': 1024,
'pulse_library': [{'name': 'test', 'samples': [1j, 1j]}]},
'experiments': [
{'instructions': [
{'name': 'u1', 'params': [0.4], 'qubits': [1]}
],
'config': {
'calibrations': {
'gates': [{'name': 'u1', 'qubits': [1],
'params': [0.4], 'instructions': []}]}},
'header': {}}
],
}
self.assertEqual(expected_dict, res)
class TestPulseQobj(QiskitTestCase):
"""Tests for PulseQobj."""
def setUp(self):
self.valid_qobj = PulseQobj(
qobj_id='12345',
header=QobjHeader(),
config=PulseQobjConfig(shots=1024, memory_slots=2, max_credits=10,
meas_level=1,
memory_slot_size=8192,
meas_return='avg',
pulse_library=[
PulseLibraryItem(name='pulse0',
samples=[0.0 + 0.0j,
0.5 + 0.0j,
0.0 + 0.0j])
],
qubit_lo_freq=[4.9],
meas_lo_freq=[6.9],
rep_time=1000),
experiments=[
PulseQobjExperiment(instructions=[
PulseQobjInstruction(name='pulse0', t0=0, ch='d0'),
PulseQobjInstruction(name='fc', t0=5, ch='d0', phase=1.57),
PulseQobjInstruction(name='fc', t0=5, ch='d0', phase=0.),
PulseQobjInstruction(name='fc', t0=5, ch='d0', phase='P1'),
PulseQobjInstruction(name='setp', t0=10, ch='d0', phase=3.14),
PulseQobjInstruction(name='setf', t0=10, ch='d0', frequency=8.0),
PulseQobjInstruction(name='shiftf', t0=10, ch='d0', frequency=4.0),
PulseQobjInstruction(name='acquire', t0=15, duration=5,
qubits=[0], memory_slot=[0],
kernels=[
QobjMeasurementOption(name='boxcar',
params={"start_window": 0,
"stop_window": 5})
])
])
]
)
self.valid_dict = {
'qobj_id': '12345',
'type': 'PULSE',
'schema_version': '1.2.0',
'header': {},
'config': {'max_credits': 10, 'memory_slots': 2, 'shots': 1024,
'meas_level': 1,
'memory_slot_size': 8192,
'meas_return': 'avg',
'pulse_library': [{'name': 'pulse0',
'samples': [0, 0.5, 0]}
],
'qubit_lo_freq': [4.9],
'meas_lo_freq': [6.9],
'rep_time': 1000},
'experiments': [
{'instructions': [
{'name': 'pulse0', 't0': 0, 'ch': 'd0'},
{'name': 'fc', 't0': 5, 'ch': 'd0', 'phase': 1.57},
{'name': 'fc', 't0': 5, 'ch': 'd0', 'phase': 0},
{'name': 'fc', 't0': 5, 'ch': 'd0', 'phase': 'P1'},
{'name': 'setp', 't0': 10, 'ch': 'd0', 'phase': 3.14},
{'name': 'setf', 't0': 10, 'ch': 'd0', 'frequency': 8.0},
{'name': 'shiftf', 't0': 10, 'ch': 'd0', 'frequency': 4.0},
{'name': 'acquire', 't0': 15, 'duration': 5,
'qubits': [0], 'memory_slot': [0],
'kernels': [{'name': 'boxcar',
'params': {'start_window': 0,
'stop_window': 5}}
]
}
]}
]
}
def test_to_dict_against_schema(self):
"""Test dictionary representation of Qobj against its schema."""
try:
validate_qobj_against_schema(self.valid_qobj)
except jsonschema.ValidationError as validation_error:
self.fail(str(validation_error))
def test_from_dict_per_class(self):
"""Test converting to Qobj and its subclass representations given a dictionary."""
test_parameters = {
PulseQobj: (
self.valid_qobj,
self.valid_dict
),
PulseQobjConfig: (
PulseQobjConfig(meas_level=1,
memory_slot_size=8192,
meas_return='avg',
pulse_library=[
PulseLibraryItem(name='pulse0', samples=[0.1 + 0.0j])
],
qubit_lo_freq=[4.9], meas_lo_freq=[6.9],
rep_time=1000),
{'meas_level': 1,
'memory_slot_size': 8192,
'meas_return': 'avg',
'pulse_library': [{'name': 'pulse0', 'samples': [0.1 + 0j]}],
'qubit_lo_freq': [4.9],
'meas_lo_freq': [6.9],
'rep_time': 1000},
),
PulseLibraryItem: (
PulseLibraryItem(name='pulse0', samples=[0.1 + 0.0j]),
{'name': 'pulse0', 'samples': [0.1+0j]}
),
PulseQobjExperiment: (
PulseQobjExperiment(
instructions=[PulseQobjInstruction(name='pulse0', t0=0, ch='d0')]),
{'instructions': [{'name': 'pulse0', 't0': 0, 'ch': 'd0'}]}
),
PulseQobjInstruction: (
PulseQobjInstruction(name='pulse0', t0=0, ch='d0'),
{'name': 'pulse0', 't0': 0, 'ch': 'd0'}
)
}
for qobj_class, (qobj_item, expected_dict) in test_parameters.items():
with self.subTest(msg=str(qobj_class)):
self.assertEqual(qobj_item, qobj_class.from_dict(expected_dict))
def test_to_dict_per_class(self):
"""Test converting from Qobj and its subclass representations given a dictionary."""
test_parameters = {
PulseQobj: (
self.valid_qobj,
self.valid_dict
),
PulseQobjConfig: (
PulseQobjConfig(meas_level=1,
memory_slot_size=8192,
meas_return='avg',
pulse_library=[
PulseLibraryItem(name='pulse0', samples=[0.1 + 0.0j])
],
qubit_lo_freq=[4.9], meas_lo_freq=[6.9],
rep_time=1000),
{'meas_level': 1,
'memory_slot_size': 8192,
'meas_return': 'avg',
'pulse_library': [{'name': 'pulse0', 'samples': [0.1+0j]}],
'qubit_lo_freq': [4.9],
'meas_lo_freq': [6.9],
'rep_time': 1000},
),
PulseLibraryItem: (
PulseLibraryItem(name='pulse0', samples=[0.1 + 0.0j]),
{'name': 'pulse0', 'samples': [0.1+0j]}
),
PulseQobjExperiment: (
PulseQobjExperiment(
instructions=[PulseQobjInstruction(name='pulse0', t0=0, ch='d0')]),
{'instructions': [{'name': 'pulse0', 't0': 0, 'ch': 'd0'}]}
),
PulseQobjInstruction: (
PulseQobjInstruction(name='pulse0', t0=0, ch='d0'),
{'name': 'pulse0', 't0': 0, 'ch': 'd0'}
)
}
for qobj_class, (qobj_item, expected_dict) in test_parameters.items():
with self.subTest(msg=str(qobj_class)):
self.assertEqual(qobj_item.to_dict(), expected_dict)
def _nop():
pass
| 42.07601
| 99
| 0.46308
|
f4e6af7e60683534984dd926d36a717614fb2598
| 570
|
py
|
Python
|
Aulas2-31/Aula8/aula8.2.conta.matematica.py
|
matheusschuetz/TrabalhoPython
|
953957898de633f8f2776681a45a1a15b68e80b9
|
[
"MIT"
] | 1
|
2020-01-21T11:43:12.000Z
|
2020-01-21T11:43:12.000Z
|
Aulas2-31/Aula8/aula8.2.conta.matematica.py
|
matheusschuetz/TrabalhoPython
|
953957898de633f8f2776681a45a1a15b68e80b9
|
[
"MIT"
] | null | null | null |
Aulas2-31/Aula8/aula8.2.conta.matematica.py
|
matheusschuetz/TrabalhoPython
|
953957898de633f8f2776681a45a1a15b68e80b9
|
[
"MIT"
] | null | null | null |
#Contas matematicas
#Quando usado duas barras ele ignora a parte fracionada, portanto 2 // 1 = 0
#Quando usado 10 % 3 ele vai pegar apenas o restante da divisão
#A potenciação é feita através de **(sim, são dois asteriscos)
#Python não tem raiz quadrada, então se usa a fórmula com os dois *
numero1 = 10
numero2 = 3
resultado = numero1 // numero2
resultado2 = numero1 % numero2
resultado3 = 3**3
resultado4 = 9**0.5
inss_maykon = 1000*0.08
IRFF_abioluz = 10000 * 0.275
print(porcentagem)
print(resultado4)
print(resultado3)
print(resultado)
print(resultado2)1;/
| 22.8
| 77
| 0.745614
|
500e88ed14dd871e176109ea25b44da3d33c13ea
| 36,738
|
py
|
Python
|
diofant/integrals/prde.py
|
rajkk1/diofant
|
6b361334569e4ec2e8c7d30dc324387a4ad417c2
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/integrals/prde.py
|
rajkk1/diofant
|
6b361334569e4ec2e8c7d30dc324387a4ad417c2
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/integrals/prde.py
|
rajkk1/diofant
|
6b361334569e4ec2e8c7d30dc324387a4ad417c2
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Algorithms for solving Parametric Risch Differential Equations.
The methods used for solving Parametric Risch Differential Equations parallel
those for solving Risch Differential Equations. See the outline in the
docstring of rde.py for more information.
The Parametric Risch Differential Equation problem is, given f, g1, ..., gm in
K(t), to determine if there exist y in K(t) and c1, ..., cm in Const(K) such
that Dy + f*y == Sum(ci*gi, (i, 1, m)), and to find such y and ci if they exist.
For the algorithms here G is a list of tuples of factions of the terms on the
right hand side of the equation (i.e., gi in k(t)), and Q is a list of terms on
the right hand side of the equation (i.e., qi in k[t]). See the docstring of
each function for more information.
"""
import functools
import math
from ..core import Add, Dummy, Integer, Mul, Pow
from ..matrices import Matrix, eye, zeros
from ..polys import Poly, cancel, lcm, sqf_list
from ..solvers import solve
from .rde import order_at, order_at_oo, solve_poly_rde, spde
from .risch import (DecrementLevel, NonElementaryIntegralException, derivation,
frac_in, gcdex_diophantine, recognize_log_derivative,
residue_reduce, residue_reduce_derivation, splitfactor)
def prde_normal_denom(fa, fd, G, DE):
"""
Parametric Risch Differential Equation - Normal part of the denominator.
Given a derivation D on k[t] and f, g1, ..., gm in k(t) with f weakly
normalized with respect to t, return the tuple (a, b, G, h) such that
a, h in k[t], b in k<t>, G = [g1, ..., gm] in k(t)^m, and for any solution
c1, ..., cm in Const(k) and y in k(t) of Dy + f*y == Sum(ci*gi, (i, 1, m)),
q == y*h in k<t> satisfies a*Dq + b*q == Sum(ci*Gi, (i, 1, m)).
"""
dn, ds = splitfactor(fd, DE)
Gas, Gds = list(zip(*G))
gd = functools.reduce(lambda i, j: i.lcm(j), Gds, Poly(1, DE.t))
en, es = splitfactor(gd, DE)
p = dn.gcd(en)
h = en.gcd(en.diff(DE.t)).quo(p.gcd(p.diff(DE.t)))
a = dn*h
c = a*h
ba = a*fa - dn*derivation(h, DE)*fd
ba, bd = ba.cancel(fd, include=True)
G = [(c*A).cancel(D, include=True) for A, D in G]
return a, (ba, bd), G, h
def real_imag(ba, bd, gen):
"""
Helper function, to get the real and imaginary part of a rational function
evaluated at sqrt(-1) without actually evaluating it at sqrt(-1)
Separates the even and odd power terms by checking the degree of terms wrt
mod 4. Returns a tuple (ba[0], ba[1], bd) where ba[0] is real part
of the numerator ba[1] is the imaginary part and bd is the denominator
of the rational function.
"""
bd = bd.as_poly(gen).as_dict()
ba = ba.as_poly(gen).as_dict()
denom_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in bd.items()]
denom_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in bd.items()]
bd_real = sum(r for r in denom_real)
bd_imag = sum(r for r in denom_imag)
num_real = [value if key[0] % 4 == 0 else -value if key[0] % 4 == 2 else 0 for key, value in ba.items()]
num_imag = [value if key[0] % 4 == 1 else -value if key[0] % 4 == 3 else 0 for key, value in ba.items()]
ba_real = sum(r for r in num_real)
ba_imag = sum(r for r in num_imag)
ba = ((ba_real*bd_real + ba_imag*bd_imag).as_poly(gen), (ba_imag*bd_real - ba_real*bd_imag).as_poly(gen))
bd = (bd_real*bd_real + bd_imag*bd_imag).as_poly(gen)
return ba[0], ba[1], bd
def prde_special_denom(a, ba, bd, G, DE, case='auto'):
"""
Parametric Risch Differential Equation - Special part of the denominator.
case is on of {'exp', 'tan', 'primitive'} for the hyperexponential,
hypertangent, and primitive cases, respectively. For the hyperexponential
(resp. hypertangent) case, given a derivation D on k[t] and a in k[t],
b in k<t>, and g1, ..., gm in k(t) with Dt/t in k (resp. Dt/(t**2 + 1) in
k, sqrt(-1) not in k), a != 0, and gcd(a, t) == 1 (resp.
gcd(a, t**2 + 1) == 1), return the tuple (A, B, GG, h) such that A, B, h in
k[t], GG = [gg1, ..., ggm] in k(t)^m, and for any solution c1, ..., cm in
Const(k) and q in k<t> of a*Dq + b*q == Sum(ci*gi, (i, 1, m)), r == q*h in
k[t] satisfies A*Dr + B*r == Sum(ci*ggi, (i, 1, m)).
For case == 'primitive', k<t> == k[t], so it returns (a, b, G, 1) in this
case.
"""
# TODO: Merge this with the very similar special_denom() in rde.py
if case == 'auto':
case = DE.case
if case == 'exp':
p = Poly(DE.t, DE.t)
elif case == 'tan':
p = Poly(DE.t**2 + 1, DE.t)
elif case in ['primitive', 'base']:
B = ba.quo(bd)
return a, B, G, Poly(1, DE.t)
else:
raise ValueError("case must be one of {'exp', 'tan', 'primitive', "
f"'base'}}, not {case}.")
nb = order_at(ba, p, DE.t) - order_at(bd, p, DE.t)
nc = min(order_at(Ga, p, DE.t) - order_at(Gd, p, DE.t) for Ga, Gd in G)
n = min(0, nc - min(0, nb))
if not nb:
# Possible cancellation.
if case == 'exp':
dcoeff = DE.d.quo(Poly(DE.t, DE.t))
# We are guaranteed to not have problems,
# because case != 'base'.
with DecrementLevel(DE):
alphaa, alphad = frac_in(-ba.eval(0)/bd.eval(0)/a.eval(0), DE.t)
etaa, etad = frac_in(dcoeff, DE.t)
A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE)
if A is not None:
a, m, z = A
if a == 1:
n = min(n, m)
elif case == 'tan':
dcoeff = DE.d.quo(Poly(DE.t**2 + 1, DE.t))
# We are guaranteed to not have problems,
# because case != 'base'.
with DecrementLevel(DE):
betaa, alphaa, alphad = real_imag(ba, bd*a, DE.t)
betad = alphad
etaa, etad = frac_in(dcoeff, DE.t)
if recognize_log_derivative(2*betaa, betad, DE):
A = parametric_log_deriv(alphaa, alphad, etaa, etad, DE)
B = parametric_log_deriv(betaa, betad, etaa, etad, DE)
if A is not None and B is not None:
a, s, z = A
if a == 1:
n = min(n, s/2)
N = max(0, -nb)
pN = p**N
pn = p**-n # This is 1/h
A = a*pN
B = ba*pN.quo(bd) + Poly(n, DE.t)*a*derivation(p, DE).quo(p)*pN
G = [(Ga*pN*pn).cancel(Gd, include=True) for Ga, Gd in G]
h = pn
# (a*p**N, (b + n*a*Dp/p)*p**N, g1*p**(N - n), ..., gm*p**(N - n), p**-n)
return A, B, G, h
def prde_linear_constraints(a, b, G, DE):
"""
Parametric Risch Differential Equation - Generate linear constraints on the constants.
Given a derivation D on k[t], a, b, in k[t] with gcd(a, b) == 1, and
G = [g1, ..., gm] in k(t)^m, return Q = [q1, ..., qm] in k[t]^m and a
matrix M with entries in k(t) such that for any solution c1, ..., cm in
Const(k) and p in k[t] of a*Dp + b*p == Sum(ci*gi, (i, 1, m)),
(c1, ..., cm) is a solution of Mx == 0, and p and the ci satisfy
a*Dp + b*p == Sum(ci*qi, (i, 1, m)).
Because M has entries in k(t), and because Matrix doesn't play well with
Poly, M will be a Matrix of Basic expressions.
"""
m = len(G)
Gns, Gds = list(zip(*G))
d = functools.reduce(lambda i, j: i.lcm(j), Gds)
d = Poly(d, field=True)
Q = [(ga*d.quo(gd)).div(d) for ga, gd in G]
if not all(ri.is_zero for _, ri in Q):
N = max(ri.degree(DE.t) for _, ri in Q)
M = Matrix(N + 1, m, lambda i, j: Q[j][1].coeff_monomial((i,)))
else:
M = Matrix() # No constraints, return the empty matrix.
qs, _ = list(zip(*Q))
return qs, M
def constant_system(A, u, DE):
"""
Generate a system for the constant solutions.
Given a differential field (K, D) with constant field C = Const(K), a Matrix
A, and a vector (Matrix) u with coefficients in K, returns the tuple
(B, v, s), where B is a Matrix with coefficients in C and v is a vector
(Matrix) such that either v has coefficients in C, in which case s is True
and the solutions in C of Ax == u are exactly all the solutions of Bx == v,
or v has a non-constant coefficient, in which case s is False Ax == u has no
constant solution.
This algorithm is used both in solving parametric problems and in
determining if an element a of K is a derivative of an element of K or the
logarithmic derivative of a K-radical using the structure theorem approach.
Because Poly does not play well with Matrix yet, this algorithm assumes that
all matrix entries are Basic expressions.
"""
if not A:
return A, u
Au = A.row_join(u)
Au = Au.rref(simplify=cancel)[0]
# Warning: This will NOT return correct results if cancel() cannot reduce
# an identically zero expression to 0. The danger is that we might
# incorrectly prove that an integral is nonelementary (such as
# risch_integrate(exp((sin(x)**2 + cos(x)**2 - 1)*x**2), x).
# But this is a limitation in computer algebra in general, and implicit
# in the correctness of the Risch Algorithm is the computability of the
# constant field (actually, this same correctness problem exists in any
# algorithm that uses rref()).
#
# We therefore limit ourselves to constant fields that are computable
# via the cancel() function, in order to prevent a speed bottleneck from
# calling some more complex simplification function (rational function
# coefficients will fall into this class). Furthermore, (I believe) this
# problem will only crop up if the integral explicitly contains an
# expression in the constant field that is identically zero, but cannot
# be reduced to such by cancel(). Therefore, a careful user can avoid this
# problem entirely by being careful with the sorts of expressions that
# appear in his integrand in the variables other than the integration
# variable (the structure theorems should be able to completely decide these
# problems in the integration variable).
Au = Au.applyfunc(cancel)
A, u = Au[:, :-1], Au[:, -1]
for j in range(A.cols):
for i in range(A.rows):
if A[i, j].has(*DE.T):
# This assumes that const(F(t0, ..., tn) == const(K) == F
Ri = A[i, :]
# Rm+1; m = A.rows
Rm1 = Ri.applyfunc(lambda x: derivation(x, DE, basic=True) /
derivation(A[i, j], DE, basic=True))
Rm1 = Rm1.applyfunc(cancel)
um1 = cancel(derivation(u[i], DE, basic=True) /
derivation(A[i, j], DE, basic=True))
for s in range(A.rows):
# A[s, :] = A[s, :] - A[s, i]*A[:, m+1]
Asj = A[s, j]
A.row_op(s, lambda r, jj: cancel(r - Asj*Rm1[jj]))
# u[s] = u[s] - A[s, j]*u[m+1
u.row_op(s, lambda r, jj: cancel(r - Asj*um1))
A = A.col_join(Rm1)
u = u.col_join(Matrix([um1]))
return A, u
def prde_spde(a, b, Q, n, DE):
"""
Special Polynomial Differential Equation algorithm: Parametric Version.
Given a derivation D on k[t], an integer n, and a, b, q1, ..., qm in k[t]
with deg(a) > 0 and gcd(a, b) == 1, return (A, B, Q, R, n1), with
Qq = [q1, ..., qm] and R = [r1, ..., rm], such that for any solution
c1, ..., cm in Const(k) and q in k[t] of degree at most n of
a*Dq + b*q == Sum(ci*gi, (i, 1, m)), p = (q - Sum(ci*ri, (i, 1, m)))/a has
degree at most n1 and satisfies A*Dp + B*p == Sum(ci*qi, (i, 1, m))
"""
R, Z = list(zip(*[gcdex_diophantine(b, a, qi) for qi in Q]))
A = a
B = b + derivation(a, DE)
Qq = [zi - derivation(ri, DE) for ri, zi in zip(R, Z)]
R = list(R)
n1 = n - a.degree(DE.t)
return A, B, Qq, R, n1
def prde_no_cancel_b_large(b, Q, n, DE):
"""
Parametric Poly Risch Differential Equation - No cancellation: deg(b) large enough.
Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with
b != 0 and either D == d/dt or deg(b) > max(0, deg(D) - 1), returns
h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that
if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and
Dq + b*q == Sum(ci*qi, (i, 1, m)), then q = Sum(dj*hj, (j, 1, r)), where
d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0.
"""
db = b.degree(DE.t)
m = len(Q)
H = [Poly(0, DE.t)]*m
for N in range(n, -1, -1): # [n, ..., 0]
for i in range(m):
si = Q[i].coeff_monomial((N + db,))/b.LC()
sitn = Poly(si*DE.t**N, DE.t)
H[i] = H[i] + sitn
Q[i] = Q[i] - derivation(sitn, DE) - b*sitn
if all(qi.is_zero for qi in Q):
dc = -1
M = zeros(0, 2)
else:
dc = max(qi.degree(DE.t) for qi in Q)
M = Matrix(dc + 1, m, lambda i, j: Q[j].coeff_monomial((i,)))
A, u = constant_system(M, zeros(dc + 1, 1), DE)
c = eye(m)
A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c))
return H, A
def prde_no_cancel_b_small(b, Q, n, DE):
"""
Parametric Poly Risch Differential Equation - No cancellation: deg(b) small enough.
Given a derivation D on k[t], n in ZZ, and b, q1, ..., qm in k[t] with
deg(b) < deg(D) - 1 and either D == d/dt or deg(D) >= 2, returns
h1, ..., hr in k[t] and a matrix A with coefficients in Const(k) such that
if c1, ..., cm in Const(k) and q in k[t] satisfy deg(q) <= n and
Dq + b*q == Sum(ci*qi, (i, 1, m)) then q = Sum(dj*hj, (j, 1, r)) where
d1, ..., dr in Const(k) and A*Matrix([[c1, ..., cm, d1, ..., dr]]).T == 0.
"""
m = len(Q)
H = [Poly(0, DE.t)]*m
for N in range(n, 0, -1): # [n, ..., 1]
for i in range(m):
si = Q[i].coeff_monomial((N + DE.d.degree(DE.t) - 1,))/(N*DE.d.LC())
sitn = Poly(si*DE.t**N, DE.t)
H[i] = H[i] + sitn
Q[i] = Q[i] - derivation(sitn, DE) - b*sitn
if b.degree(DE.t) > 0:
for i in range(m):
si = Poly(Q[i].coeff_monomial((b.degree(DE.t),))/b.LC(), DE.t)
H[i] = H[i] + si
Q[i] = Q[i] - derivation(si, DE) - b*si
if all(qi.is_zero for qi in Q):
dc = -1
M = Matrix()
else:
dc = max(qi.degree(DE.t) for qi in Q)
M = Matrix(dc + 1, m, lambda i, j: Q[j].coeff_monomial((i,)))
A, u = constant_system(M, zeros(dc + 1, 1), DE)
c = eye(m)
A = A.row_join(zeros(A.rows, m)).col_join(c.row_join(-c))
return H, A
else:
# TODO: implement this (requires recursive param_rischDE() call)
raise NotImplementedError
def limited_integrate_reduce(fa, fd, G, DE):
"""
Simpler version of step 1 & 2 for the limited integration problem.
Given a derivation D on k(t) and f, g1, ..., gn in k(t), return
(a, b, h, N, g, V) such that a, b, h in k[t], N is a non-negative integer,
g in k(t), V == [v1, ..., vm] in k(t)^m, and for any solution v in k(t),
c1, ..., cm in C of f == Dv + Sum(ci*wi, (i, 1, m)), p = v*h is in k<t>, and
p and the ci satisfy a*Dp + b*p == g + Sum(ci*vi, (i, 1, m)). Furthermore,
if S1irr == Sirr, then p is in k[t], and if t is nonlinear or Liouvillian
over k, then deg(p) <= N.
So that the special part is always computed, this function calls the more
general prde_special_denom() automatically if it cannot determine that
S1irr == Sirr. Furthermore, it will automatically call bound_degree() when
t is linear and non-Liouvillian, which for the transcendental case, implies
that Dt == a*t + b with for some a, b in k*.
"""
dn, ds = splitfactor(fd, DE)
E = [splitfactor(gd, DE) for _, gd in G]
En, Es = list(zip(*E))
c = functools.reduce(lambda i, j: i.lcm(j), (dn,) + En) # lcm(dn, en1, ..., enm)
hn = c.gcd(c.diff(DE.t))
a = hn
b = -derivation(hn, DE)
N = 0
# These are the cases where we know that S1irr = Sirr, but there could be
# others, and this algorithm will need to be extended to handle them.
if DE.case in ['base', 'primitive', 'exp', 'tan']:
hs = functools.reduce(lambda i, j: i.lcm(j), (ds,) + Es) # lcm(ds, es1, ..., esm)
a = hn*hs
b = -derivation(hn, DE) - (hn*derivation(hs, DE)).quo(hs)
mu = min(order_at_oo(fa, fd, DE.t), min(order_at_oo(ga, gd, DE.t)
for ga, gd in G))
# So far, all the above are also nonlinear or Liouvillian, but if this
# changes, then this will need to be updated to call bound_degree()
# as per the docstring of this function (DE.case == 'other_linear').
N = hn.degree(DE.t) + hs.degree(DE.t) + max(0, 1 - DE.d.degree(DE.t) - mu)
else:
# TODO: implement this
raise NotImplementedError
V = [(-a*hn*ga).cancel(gd, include=True) for ga, gd in G]
return a, b, a, N, (a*hn*fa).cancel(fd, include=True), V
def limited_integrate(fa, fd, G, DE):
"""
Solves the limited integration problem: f = Dv + Sum(ci*wi, (i, 1, n))
"""
fa, fd = fa*Poly(1/fd.LC(), DE.t), fd.monic()
A, B, h, N, g, V = limited_integrate_reduce(fa, fd, G, DE)
V = [g] + V
g = A.gcd(B)
A, B, V = A.quo(g), B.quo(g), [via.cancel(vid*g, include=True) for
via, vid in V]
Q, M = prde_linear_constraints(A, B, V, DE)
M, _ = constant_system(M, zeros(M.rows, 1), DE)
l = M.nullspace()
if M == Matrix() or len(l) > 1:
# Continue with param_rischDE()
raise NotImplementedError('param_rischDE() is required to solve this '
'integral.')
elif len(l) == 0:
raise NonElementaryIntegralException
elif len(l) == 1:
# The c1 == 1. In this case, we can assume a normal Risch DE
if l[0][0].is_zero:
raise NonElementaryIntegralException
else:
l[0] *= 1/l[0][0]
C = sum(Poly(i, DE.t)*q for (i, q) in zip(l[0], Q))
# Custom version of rischDE() that uses the already computed
# denominator and degree bound from above.
B, C, m, alpha, beta = spde(A, B, C, N, DE)
y = solve_poly_rde(B, C, m, DE)
return (alpha*y + beta, h), list(l[0][1:])
else:
raise NotImplementedError
def parametric_log_deriv_heu(fa, fd, wa, wd, DE, c1=None):
"""
Parametric logarithmic derivative heuristic.
Given a derivation D on k[t], f in k(t), and a hyperexponential monomial
theta over k(t), raises either NotImplementedError, in which case the
heuristic failed, or returns None, in which case it has proven that no
solution exists, or returns a solution (n, m, v) of the equation
n*f == Dv/v + m*Dtheta/theta, with v in k(t)* and n, m in ZZ with n != 0.
If this heuristic fails, the structure theorem approach will need to be
used.
The argument w == Dtheta/theta
"""
# TODO: finish writing this and write tests
c1 = c1 or Dummy('c1')
p, a = fa.div(fd)
q, b = wa.div(wd)
B = max(0, derivation(DE.t, DE).degree(DE.t) - 1)
C = max(p.degree(DE.t), q.degree(DE.t))
if q.degree(DE.t) > B:
eqs = [p.coeff_monomial((i,)) - c1*q.coeff_monomial((i,)) for i in range(B + 1, C + 1)]
s = solve(eqs, c1)
if not s or not s[0][c1].is_Rational:
# deg(q) > B, no solution for c.
return
N, M = s[0][c1].as_numer_denom() # N and M are integers
N, M = Poly(N, DE.t), Poly(M, DE.t)
nfmwa = N*fa*wd - M*wa*fd
nfmwd = fd*wd
Qv = is_log_deriv_k_t_radical_in_field(N*fa*wd - M*wa*fd, fd*wd, DE,
'auto')
if Qv is None:
# (N*f - M*w) is not the logarithmic derivative of a k(t)-radical.
return
Q, e, v = Qv
if e != 1:
return
if Q.is_zero or v.is_zero:
return
return Q*N, Q*M, v
if p.degree(DE.t) > B:
return
c = lcm(fd.as_poly(DE.t).LC(), wd.as_poly(DE.t).LC())
l = fd.monic().lcm(wd.monic())*Poly(c, DE.t)
ln, ls = splitfactor(l, DE)
z = ls*ln.gcd(ln.diff(DE.t))
if not z.has(DE.t):
raise NotImplementedError('parametric_log_deriv_heu() '
'heuristic failed: z in k.')
u1, r1 = (fa*l.quo(fd)).div(z) # (l*f).div(z)
u2, r2 = (wa*l.quo(wd)).div(z) # (l*w).div(z)
eqs = [r1.coeff_monomial((i,)) - c1*r2.coeff_monomial((i,)) for i in range(z.degree(DE.t))]
s = solve(eqs, c1)
if not s or not s[0][c1].is_Rational:
# deg(q) <= B, no solution for c.
return
M, N = s[0][c1].as_numer_denom()
nfmwa = N.as_poly(DE.t)*fa*wd - M.as_poly(DE.t)*wa*fd
nfmwd = fd*wd
Qv = is_log_deriv_k_t_radical_in_field(nfmwa, nfmwd, DE)
if Qv is None:
# (N*f - M*w) is not the logarithmic derivative of a k(t)-radical.
return
Q, v = Qv
if Q.is_zero or v.is_zero:
return
return Q*N, Q*M, v
def parametric_log_deriv(fa, fd, wa, wd, DE):
# TODO: Write the full algorithm using the structure theorems.
# try:
A = parametric_log_deriv_heu(fa, fd, wa, wd, DE)
# except NotImplementedError:
# Heuristic failed, we have to use the full method.
# TODO: This could be implemented more efficiently. It isn't too
# worrisome, because the heuristic handles most difficult cases.
return A
def is_deriv_k(fa, fd, DE):
r"""
Checks if Df/f is the derivative of an element of k(t).
a in k(t) is the derivative of an element of k(t) if there exists b in k(t)
such that a = Db. Either returns (ans, u), such that Df/f == Du, or None,
which means that Df/f is not the derivative of an element of k(t). ans is
a list of tuples such that Add(*[i*j for i, j in ans]) == u. This is useful
for seeing exactly which elements of k(t) produce u.
This function uses the structure theorem approach, which says that for any
f in K, Df/f is the derivative of a element of K if and only if there are ri
in QQ such that::
--- --- Dt
\ r * Dt + \ r * i Df
/ i i / i --- = --.
--- --- t f
i in L i in E i
K/C(x) K/C(x)
Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is
transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i
in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic
monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i
is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some
a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of
hyperexponential monomials of K over C(x)). If K is an elementary extension
over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the
transcendence degree of K over C(x). Furthermore, because Const_D(K) ==
Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and
deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x)
and L_K/C(x) are disjoint.
The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed
recursively using this same function. Therefore, it is required to pass
them as indices to D (or T). E_args are the arguments of the
hyperexponentials indexed by E_K (i.e., if i is in E_K, then T[i] ==
exp(E_args[i])). This is needed to compute the final answer u such that
Df/f == Du.
log(f) will be the same as u up to a additive constant. This is because
they will both behave the same as monomials. For example, both log(x) and
log(2*x) == log(x) + log(2) satisfy Dt == 1/x, because log(2) is constant.
Therefore, the term const is returned. const is such that
log(const) + f == u. This is calculated by dividing the arguments of one
logarithm from the other. Therefore, it is necessary to pass the arguments
of the logarithmic terms in L_args.
To handle the case where we are given Df/f, not f, use is_deriv_k_in_field().
"""
# Compute Df/f
dfa, dfd = fd*(fd*derivation(fa, DE) - fa*derivation(fd, DE)), fd**2*fa
dfa, dfd = dfa.cancel(dfd, include=True)
# Our assumption here is that each monomial is recursively transcendental
if len(DE.L_K) + len(DE.E_K) != len(DE.D) - 1:
if [i for i in DE.cases if i == 'tan'] or \
{i for i in DE.cases if i == 'primitive'} - set(DE.L_K):
raise NotImplementedError('Real version of the structure '
'theorems with hypertangent support is not yet implemented.')
# TODO: What should really be done in this case?
raise NotImplementedError('Nonelementary extensions not supported '
'in the structure theorems.')
E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.E_K]
L_part = [DE.D[i].as_expr() for i in DE.L_K]
lhs = Matrix([E_part + L_part])
rhs = Matrix([dfa.as_expr()/dfd.as_expr()])
A, u = constant_system(lhs, rhs, DE)
if all(derivation(i, DE, basic=True).is_zero for i in u) and A:
# If the elements of u are all constant
# Note: See comment in constant_system
# Also note: derivation(basic=True) calls cancel()
if not all(i.is_Rational for i in u):
raise NotImplementedError('Cannot work with non-rational '
'coefficients in this case.')
else:
terms = DE.E_args + [DE.T[i] for i in DE.L_K]
ans = list(zip(terms, u))
result = Add(*[Mul(i, j) for i, j in ans])
argterms = [DE.T[i] for i in DE.E_K] + DE.L_args
l, ld = [], []
for i, j in zip(argterms, u):
# We need to get around things like sqrt(x**2) != x
# and also sqrt(x**2 + 2*x + 1) != x + 1
i, d = i.as_numer_denom()
icoeff, iterms = sqf_list(i)
l.append(Mul(*([Pow(icoeff, j)] + [Pow(b, e*j) for b, e in iterms])))
dcoeff, dterms = sqf_list(d)
ld.append(Mul(*([Pow(dcoeff, j)] + [Pow(b, e*j) for b, e in dterms])))
const = cancel(fa.as_expr()/fd.as_expr()/Mul(*l)*Mul(*ld))
return ans, result, const
def is_log_deriv_k_t_radical(fa, fd, DE, Df=True):
r"""
Checks if Df is the logarithmic derivative of a k(t)-radical.
b in k(t) can be written as the logarithmic derivative of a k(t) radical if
there exist n in ZZ and u in k(t) with n, u != 0 such that n*b == Du/u.
Either returns (ans, u, n, const) or None, which means that Df cannot be
written as the logarithmic derivative of a k(t)-radical. ans is a list of
tuples such that Mul(*[i**j for i, j in ans]) == u. This is useful for
seeing exactly what elements of k(t) produce u.
This function uses the structure theorem approach, which says that for any
f in K, Df is the logarithmic derivative of a K-radical if and only if there
are ri in QQ such that::
--- --- Dt
\ r * Dt + \ r * i
/ i i / i --- = Df.
--- --- t
i in L i in E i
K/C(x) K/C(x)
Where C = Const(K), L_K/C(x) = { i in {1, ..., n} such that t_i is
transcendental over C(x)(t_1, ..., t_i-1) and Dt_i = Da_i/a_i, for some a_i
in C(x)(t_1, ..., t_i-1)* } (i.e., the set of all indices of logarithmic
monomials of K over C(x)), and E_K/C(x) = { i in {1, ..., n} such that t_i
is transcendental over C(x)(t_1, ..., t_i-1) and Dt_i/t_i = Da_i, for some
a_i in C(x)(t_1, ..., t_i-1) } (i.e., the set of all indices of
hyperexponential monomials of K over C(x)). If K is an elementary extension
over C(x), then the cardinality of L_K/C(x) U E_K/C(x) is exactly the
transcendence degree of K over C(x). Furthermore, because Const_D(K) ==
Const_D(C(x)) == C, deg(Dt_i) == 1 when t_i is in E_K/C(x) and
deg(Dt_i) == 0 when t_i is in L_K/C(x), implying in particular that E_K/C(x)
and L_K/C(x) are disjoint.
The sets L_K/C(x) and E_K/C(x) must, by their nature, be computed
recursively using this same function. Therefore, it is required to pass
them as indices to D (or T). L_args are the arguments of the logarithms
indexed by L_K (i.e., if i is in L_K, then T[i] == log(L_args[i])). This is
needed to compute the final answer u such that n*f == Du/u.
exp(f) will be the same as u up to a multiplicative constant. This is
because they will both behave the same as monomials. For example, both
exp(x) and exp(x + 1) == E*exp(x) satisfy Dt == t. Therefore, the term const
is returned. const is such that exp(const)*f == u. This is calculated by
subtracting the arguments of one exponential from the other. Therefore, it
is necessary to pass the arguments of the exponential terms in E_args.
To handle the case where we are given Df, not f, use
is_log_deriv_k_t_radical_in_field().
"""
if Df:
dfa, dfd = (fd*derivation(fa, DE) - fa*derivation(fd, DE)).cancel(fd**2,
include=True)
else:
dfa, dfd = fa, fd
# Our assumption here is that each monomial is recursively transcendental
if len(DE.L_K) + len(DE.E_K) != len(DE.D) - 1:
if [i for i in DE.cases if i == 'tan'] or \
{i for i in DE.cases if i == 'primitive'} - set(DE.L_K):
raise NotImplementedError('Real version of the structure '
'theorems with hypertangent support is not yet implemented.')
# TODO: What should really be done in this case?
raise NotImplementedError('Nonelementary extensions not supported '
'in the structure theorems.')
E_part = [DE.D[i].quo(Poly(DE.T[i], DE.T[i])).as_expr() for i in DE.E_K]
L_part = [DE.D[i].as_expr() for i in DE.L_K]
lhs = Matrix([E_part + L_part])
rhs = Matrix([dfa.as_expr()/dfd.as_expr()])
A, u = constant_system(lhs, rhs, DE)
if all(derivation(i, DE, basic=True).is_zero for i in u) and A:
# If the elements of u are all constant
# Note: See comment in constant_system
# Also note: derivation(basic=True) calls cancel()
if not all(i.is_Rational for i in u):
# TODO: But maybe we can tell if they're not rational, like
# log(2)/log(3). Also, there should be an option to continue
# anyway, even if the result might potentially be wrong.
raise NotImplementedError('Cannot work with non-rational '
'coefficients in this case.')
else:
n = functools.reduce(math.lcm, [i.as_numer_denom()[1] for i in u])
u *= Integer(n)
terms = [DE.T[i] for i in DE.E_K] + DE.L_args
ans = list(zip(terms, u))
result = Mul(*[Pow(i, j) for i, j in ans])
# exp(f) will be the same as result up to a multiplicative
# constant. We now find the log of that constant.
argterms = DE.E_args + [DE.T[i] for i in DE.L_K]
const = cancel(fa.as_expr()/fd.as_expr() -
Add(*[Mul(i, j/n) for i, j in zip(argterms, u)]))
return ans, result, n, const
def is_log_deriv_k_t_radical_in_field(fa, fd, DE, case='auto', z=None):
"""
Checks if f can be written as the logarithmic derivative of a k(t)-radical.
f in k(t) can be written as the logarithmic derivative of a k(t) radical if
there exist n in ZZ and u in k(t) with n, u != 0 such that n*f == Du/u.
Either returns (n, u) or None, which means that f cannot be written as the
logarithmic derivative of a k(t)-radical.
case is one of {'primitive', 'exp', 'tan', 'auto'} for the primitive,
hyperexponential, and hypertangent cases, respectively. If case is 'auto',
it will attempt to determine the type of the derivation automatically.
"""
fa, fd = fa.cancel(fd, include=True)
# f must be simple
n, s = splitfactor(fd, DE)
if not s.is_one:
pass
z = z or Dummy('z')
H, b = residue_reduce(fa, fd, DE, z=z)
if not b:
# I will have to verify, but I believe that the answer should be
# None in this case. This should never happen for the
# functions given when solving the parametric logarithmic
# derivative problem when integration elementary functions (see
# Bronstein's book, page 255), so most likely this indicates a bug.
return
roots = [(i, i.real_roots()) for i, _ in H]
if not all(len(j) == i.degree() and all(k.is_Rational for k in j) for
i, j in roots):
# If f is the logarithmic derivative of a k(t)-radical, then all the
# roots of the resultant must be rational numbers.
return
# [(a, i), ...], where i*log(a) is a term in the log-part of the integral
# of f
respolys, residues = list(zip(*roots)) or [[], []]
# Note: this might be empty, but everything below should work find in that
# case (it should be the same as if it were [[1, 1]])
residueterms = [(H[j][1].subs({z: i}), i) for j in range(len(H)) for
i in residues[j]]
# TODO: finish writing this and write tests
p = cancel(fa.as_expr()/fd.as_expr() - residue_reduce_derivation(H, DE, z))
p = p.as_poly(DE.t)
if p is None:
# f - Dg will be in k[t] if f is the logarithmic derivative of a k(t)-radical
return
if p.degree(DE.t) >= max(1, DE.d.degree(DE.t)):
return
if case == 'auto':
case = DE.case
if case == 'exp':
wa, wd = derivation(DE.t, DE).cancel(Poly(DE.t, DE.t), include=True)
with DecrementLevel(DE):
pa, pd = frac_in(p, DE.t, cancel=True)
wa, wd = frac_in((wa, wd), DE.t)
A = parametric_log_deriv(pa, pd, wa, wd, DE)
if A is None:
return
n, e, u = A
u *= DE.t**e
elif case == 'primitive':
with DecrementLevel(DE):
pa, pd = frac_in(p, DE.t)
A = is_log_deriv_k_t_radical_in_field(pa, pd, DE, case='auto')
if A is None:
return
n, u = A
elif case == 'base':
# TODO: we can use more efficient residue reduction from ratint()
if not fd.is_squarefree or fa.degree() >= fd.degree():
# f is the logarithmic derivative in the base case if and only if
# f = fa/fd, fd is square-free, deg(fa) < deg(fd), and
# gcd(fa, fd) == 1. The last condition is handled by cancel() above.
return
# Note: if residueterms = [], returns (1, 1)
# f had better be 0 in that case.
n = functools.reduce(math.lcm, [i.as_numer_denom()[1] for _, i in residueterms], Integer(1))
u = Mul(*[Pow(i, j*n) for i, j in residueterms])
return Integer(n), u
elif case == 'tan':
raise NotImplementedError('The hypertangent case is '
'not yet implemented for is_log_deriv_k_t_radical_in_field()')
elif case in ['other_linear', 'other_nonlinear']:
# XXX: If these are supported by the structure theorems, change to NotImplementedError.
raise ValueError(f'The {case} case is not supported in this function.')
else:
raise ValueError("case must be one of {'primitive', 'exp', 'tan', "
f"'base', 'auto'}}, not {case}")
common_denom = functools.reduce(math.lcm, [i.as_numer_denom()[1]
for i in [j for _, j in residueterms]] + [n], Integer(1))
residueterms = [(i, j*common_denom) for i, j in residueterms]
m = common_denom//n
if common_denom != n*m: # Verify exact division
raise ValueError('Inexact division')
u = cancel(u**m*Mul(*[Pow(i, j) for i, j in residueterms]))
return Integer(common_denom), u
| 42.422633
| 110
| 0.569764
|
4129958a9fb9009cac7822443db500fd1e90a5f2
| 5,238
|
py
|
Python
|
sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_2/models/__init__.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_2/models/__init__.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 2
|
2020-03-03T23:11:13.000Z
|
2020-03-30T18:50:55.000Z
|
sdk/textanalytics/azure-ai-textanalytics/azure/ai/textanalytics/_generated/v3_1_preview_2/models/__init__.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 2
|
2020-05-21T22:51:22.000Z
|
2020-05-26T20:53:01.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
try:
from ._models_py3 import AspectConfidenceScoreLabel
from ._models_py3 import AspectRelation
from ._models_py3 import DetectedLanguage
from ._models_py3 import DocumentEntities
from ._models_py3 import DocumentError
from ._models_py3 import DocumentKeyPhrases
from ._models_py3 import DocumentLanguage
from ._models_py3 import DocumentLinkedEntities
from ._models_py3 import DocumentSentiment
from ._models_py3 import DocumentStatistics
from ._models_py3 import EntitiesResult
from ._models_py3 import Entity
from ._models_py3 import EntityLinkingResult
from ._models_py3 import ErrorResponse
from ._models_py3 import InnerError
from ._models_py3 import KeyPhraseResult
from ._models_py3 import LanguageBatchInput
from ._models_py3 import LanguageInput
from ._models_py3 import LanguageResult
from ._models_py3 import LinkedEntity
from ._models_py3 import Match
from ._models_py3 import MultiLanguageBatchInput
from ._models_py3 import MultiLanguageInput
from ._models_py3 import PiiDocumentEntities
from ._models_py3 import PiiEntitiesResult
from ._models_py3 import RequestStatistics
from ._models_py3 import SentenceAspect
from ._models_py3 import SentenceOpinion
from ._models_py3 import SentenceSentiment
from ._models_py3 import SentimentConfidenceScorePerLabel
from ._models_py3 import SentimentResponse
from ._models_py3 import TextAnalyticsError
from ._models_py3 import TextAnalyticsWarning
except (SyntaxError, ImportError):
from ._models import AspectConfidenceScoreLabel # type: ignore
from ._models import AspectRelation # type: ignore
from ._models import DetectedLanguage # type: ignore
from ._models import DocumentEntities # type: ignore
from ._models import DocumentError # type: ignore
from ._models import DocumentKeyPhrases # type: ignore
from ._models import DocumentLanguage # type: ignore
from ._models import DocumentLinkedEntities # type: ignore
from ._models import DocumentSentiment # type: ignore
from ._models import DocumentStatistics # type: ignore
from ._models import EntitiesResult # type: ignore
from ._models import Entity # type: ignore
from ._models import EntityLinkingResult # type: ignore
from ._models import ErrorResponse # type: ignore
from ._models import InnerError # type: ignore
from ._models import KeyPhraseResult # type: ignore
from ._models import LanguageBatchInput # type: ignore
from ._models import LanguageInput # type: ignore
from ._models import LanguageResult # type: ignore
from ._models import LinkedEntity # type: ignore
from ._models import Match # type: ignore
from ._models import MultiLanguageBatchInput # type: ignore
from ._models import MultiLanguageInput # type: ignore
from ._models import PiiDocumentEntities # type: ignore
from ._models import PiiEntitiesResult # type: ignore
from ._models import RequestStatistics # type: ignore
from ._models import SentenceAspect # type: ignore
from ._models import SentenceOpinion # type: ignore
from ._models import SentenceSentiment # type: ignore
from ._models import SentimentConfidenceScorePerLabel # type: ignore
from ._models import SentimentResponse # type: ignore
from ._models import TextAnalyticsError # type: ignore
from ._models import TextAnalyticsWarning # type: ignore
from ._text_analytics_client_enums import (
AspectRelationType,
DocumentSentimentValue,
ErrorCodeValue,
InnerErrorCodeValue,
SentenceSentimentValue,
StringIndexType,
TokenSentimentValue,
WarningCodeValue,
)
__all__ = [
'AspectConfidenceScoreLabel',
'AspectRelation',
'DetectedLanguage',
'DocumentEntities',
'DocumentError',
'DocumentKeyPhrases',
'DocumentLanguage',
'DocumentLinkedEntities',
'DocumentSentiment',
'DocumentStatistics',
'EntitiesResult',
'Entity',
'EntityLinkingResult',
'ErrorResponse',
'InnerError',
'KeyPhraseResult',
'LanguageBatchInput',
'LanguageInput',
'LanguageResult',
'LinkedEntity',
'Match',
'MultiLanguageBatchInput',
'MultiLanguageInput',
'PiiDocumentEntities',
'PiiEntitiesResult',
'RequestStatistics',
'SentenceAspect',
'SentenceOpinion',
'SentenceSentiment',
'SentimentConfidenceScorePerLabel',
'SentimentResponse',
'TextAnalyticsError',
'TextAnalyticsWarning',
'AspectRelationType',
'DocumentSentimentValue',
'ErrorCodeValue',
'InnerErrorCodeValue',
'SentenceSentimentValue',
'StringIndexType',
'TokenSentimentValue',
'WarningCodeValue',
]
| 39.681818
| 94
| 0.729668
|
56772a80ef3de5d3d8d6c2dd5b49b8b68ad04275
| 1,379
|
py
|
Python
|
st2client/st2client/utils/strutil.py
|
saucetray/st2
|
8f507d6c8d9483c8371e386fe2b7998596856fd7
|
[
"Apache-2.0"
] | 2
|
2021-08-04T01:04:06.000Z
|
2021-08-04T01:04:08.000Z
|
st2client/st2client/utils/strutil.py
|
saucetray/st2
|
8f507d6c8d9483c8371e386fe2b7998596856fd7
|
[
"Apache-2.0"
] | 1
|
2022-03-31T03:53:22.000Z
|
2022-03-31T03:53:22.000Z
|
st2client/st2client/utils/strutil.py
|
saucetray/st2
|
8f507d6c8d9483c8371e386fe2b7998596856fd7
|
[
"Apache-2.0"
] | 1
|
2019-10-11T14:42:28.000Z
|
2019-10-11T14:42:28.000Z
|
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import six
def unescape(s):
"""
Action execution escapes escaped chars in result (i.e. \n is stored as \\n).
This function unescapes those chars.
"""
if isinstance(s, six.string_types):
s = s.replace('\\n', '\n')
s = s.replace('\\r', '\r')
s = s.replace('\\"', '\"')
return s
def dedupe_newlines(s):
"""yaml.safe_dump converts single newlines to double.
Since we're printing this output and not loading it, we should
deduplicate them.
"""
if isinstance(s, six.string_types):
s = s.replace('\n\n', '\n')
return s
def strip_carriage_returns(s):
if isinstance(s, six.string_types):
s = s.replace('\\r', '')
s = s.replace('\r', '')
return s
| 26.519231
| 80
| 0.660624
|
07127c33c61e87e893c577dec08289698c881527
| 2,575
|
py
|
Python
|
exasol_data_science_utils_python/preprocessing/sql_to_scikit_learn/normalization/standard_scaler_factory.py
|
exasol/data-science-utils-python
|
44f4019b30f4945e14a8ee19c1a4f6bed68692d9
|
[
"MIT"
] | null | null | null |
exasol_data_science_utils_python/preprocessing/sql_to_scikit_learn/normalization/standard_scaler_factory.py
|
exasol/data-science-utils-python
|
44f4019b30f4945e14a8ee19c1a4f6bed68692d9
|
[
"MIT"
] | 27
|
2021-06-04T15:45:21.000Z
|
2022-02-14T12:16:34.000Z
|
exasol_data_science_utils_python/preprocessing/sql_to_scikit_learn/normalization/standard_scaler_factory.py
|
exasol/data-science-utils-python
|
44f4019b30f4945e14a8ee19c1a4f6bed68692d9
|
[
"MIT"
] | null | null | null |
from exasol_data_science_utils_python.preprocessing.scikit_learn.sklearn_prefitted_min_max_scaler import \
SKLearnPrefittedMinMaxScaler
from exasol_data_science_utils_python.preprocessing.scikit_learn.sklearn_prefitted_standard_scaler import \
SKLearnPrefittedStandardScaler
from exasol_data_science_utils_python.preprocessing.sql.normalization.sql_min_max_scaler import SQLMinMaxScaler
from exasol_data_science_utils_python.preprocessing.sql.normalization.sql_standard_scaler import SQLStandardScaler
from exasol_data_science_utils_python.preprocessing.sql.schema.column import Column
from exasol_data_science_utils_python.preprocessing.sql.schema.experiment_name import ExperimentName
from exasol_data_science_utils_python.preprocessing.sql.schema.schema_name import SchemaName
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_preprocessor import ColumnPreprocessor, \
SQLBasedColumnPreprocessor
from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_preprocessor_factory import \
ColumnPreprocessorFactory
from exasol_data_science_utils_python.udf_utils.sql_executor import SQLExecutor
class StandardScalerFactory(ColumnPreprocessorFactory):
def create(self,
sql_executor: SQLExecutor,
source_column: Column,
target_schema: SchemaName,
experiment_name: ExperimentName) -> ColumnPreprocessor:
parameter_tables = \
SQLStandardScaler().fit(sql_executor, source_column.name, target_schema, experiment_name)
avg_stddev_parameter_tables = \
[parameter_table for parameter_table in parameter_tables
if parameter_table.purpose == SQLStandardScaler.MEAN_AND_STDDEV_TABLE]
avg_stddev_parameter_table = avg_stddev_parameter_tables[0]
result_set = sql_executor.execute(
f"""SELECT "AVG", "STDDEV" FROM {avg_stddev_parameter_table.table.name.fully_qualified()}""")
rows = result_set.fetchall()
avg_value = rows[0][0]
stddev_value = rows[0][1]
transformer = SKLearnPrefittedStandardScaler(avg_value=avg_value, stddev_value=stddev_value)
column_preprocessor = SQLBasedColumnPreprocessor(source_column,
target_schema,
experiment_name,
transformer,
parameter_tables)
return column_preprocessor
| 61.309524
| 120
| 0.736699
|
d08768711705b27a6af810c68b5f52c8b1fe5bd1
| 42,714
|
py
|
Python
|
modules/gui.py
|
buidl1/zundernet
|
b313d30fc39d851af499dd37dd5867b0834dc416
|
[
"MIT"
] | 1
|
2021-08-07T21:38:57.000Z
|
2021-08-07T21:38:57.000Z
|
modules/gui.py
|
buidl1/zundernet
|
b313d30fc39d851af499dd37dd5867b0834dc416
|
[
"MIT"
] | 11
|
2020-11-07T17:54:35.000Z
|
2022-03-05T17:10:41.000Z
|
modules/gui.py
|
buidl1/zundernet
|
b313d30fc39d851af499dd37dd5867b0834dc416
|
[
"MIT"
] | 3
|
2020-12-15T09:13:11.000Z
|
2022-03-05T12:35:55.000Z
|
# small widgets
import os,time
from PySide2.QtCore import (
QAbstractTableModel,
QModelIndex,
QPersistentModelIndex,
QSortFilterProxyModel,
Qt,
Slot,
QLocale,
QThread,
QObject,
Signal,
QTimer,
QDateTime,
QEvent
)
from PySide2.QtGui import (
QColor,
QValidator,
QDoubleValidator,
QIntValidator,
QFont,
QKeySequence,
QIcon
# QClipboard
)
from PySide2.QtWidgets import (
QApplication,
QGridLayout,
QLayout,
QHBoxLayout,
QItemDelegate,
QLabel,
QMainWindow,
QPushButton,
QStackedLayout,
QTableView,
QWidget,
QVBoxLayout,
QTabWidget,
QTextEdit,
QLineEdit,
QGroupBox,
QMessageBox,
QComboBox,
QFileDialog,
QDialog,
QProgressDialog,
QTableWidget,
QTableWidgetItem,
QHeaderView,
QAbstractScrollArea,
QSizePolicy,
QAbstractItemView,
QShortcut
)
import traceback
def copy_progress(path,deftxt,src,dest,fromfile=True):
src_size=0
if fromfile:
src_size=os.path.getsize(src)
else:
src_size=len(src )
qpd= QProgressDialog(deftxt,'Cancel',0,src_size) # parent none
qpd.setAutoClose(True)
qpd.setMinimumDuration(1000)
qpd.setWindowModality(Qt.WindowModal)
qpd.setValue(0)
progress=0
readmode='rb'
writemode='wb'
if fromfile==False:
# readmode='r'
if type(src)!=type(b''):
writemode='w'
# if True:
try:
bb1=b''
if fromfile:
with open(src, "rb") as fin:
bb1=fin.read() # read all
else:
bb1=src
chunks=max(1,int(src_size/50))
fo=open(dest, writemode)
bts=bb1[0:chunks]
# time.sleep(1)
# print('before while')
while progress<src_size:
# print('fo.write(bts)')
fo.write(bts)
progress+=chunks
if qpd.wasCanceled():
break
# print('qpd.setValue(progress)')
qpd.setValue(progress)
if progress+chunks>src_size:
chunks=src_size-progress
# print('bts')
bts=bb1[progress:progress+chunks]
# print(progress,src_size)
# if progress+chunks>=src_size*0.5 and progress<src_size*0.5:
# time.sleep(0.5)
# print('fo.close()')
fo.close()
if qpd.wasCanceled():
showinfo('Backup CANCELED','Please try again!\n' )
qpd.close()
if os.path.exists(dest):
os.remove(dest)
return ''
else:
return dest
# else:
except:
traceback.print_exc()
if progress>0:
showinfo('Backup FAILED ','Please check if there is enough space on your drive and try again!\nFailed at byte '+str(progress)+' out of '+str(src_size) )
else:
showinfo('Backup FAILED','Please check if your drive is not locked!\n' )
fo.close()
if os.path.exists(dest):
print('Exception - remove dest?',dest,os.path.exists(dest))
os.remove(dest)
return ''
def askokcancel(title, question,parent=None):
if QMessageBox.question(parent, title, question, QMessageBox.Yes|QMessageBox.No|QMessageBox.Cancel ) == QMessageBox.Yes:
return True
return False
def messagebox_showinfo(fr_title,fr_content,parent=None):
msgBox=QMessageBox()
if parent !=None:
msgBox=QMessageBox(parent)
msgBox.setSizePolicy( QSizePolicy.Expanding,QSizePolicy.Expanding )
msgBox.setStyleSheet('QPushButton {padding:3px;font-size:13px;}')
msgBox.setWindowTitle(fr_title)
msgBox.setText(fr_content)
msgBox.layout().setSizeConstraint(QLayout.SetNoConstraint)
msgBox.exec_()
def showinfo(tit,lbl,parent=None):
messagebox_showinfo(tit, lbl,parent)
def msg_yes_no(a,b,parent=None):
msgBox=QMessageBox(parent)
msgBox.setStyleSheet('QPushButton {padding:3px;font-size:13px;}')
# reply=msgBox.question(parent,a, b,QMessageBox.Yes|QMessageBox.No)
reply = QMessageBox.question(msgBox,a, b,QMessageBox.Yes|QMessageBox.No)
if reply==QMessageBox.Yes:
return True
else:
return False
# return reply #messagebox.askyesno(a, b)
# setdir
# askdirectory
def get_file_dialog(strtitle ,init_path=os.getcwd(),parent=None,name_filter=''): #init_path=os.getcwd()
if name_filter=='dir':
return QFileDialog.getExistingDirectory(parent,strtitle,init_path )
else: #if name_filter!='':
return QFileDialog.getOpenFileName(parent,strtitle,init_path,name_filter,name_filter) #parent,strtitle,init_path,'','',options=QFileDialog.ExistingFile )
# setdir
def set_file( widget,validation_fun=None,dir=False,parent=None,init_path=os.getcwd(),title="Select relevant file",on_change_fun=None):
# print('dir')
name_filter=''
if dir:
title="Select directory"
name_filter='dir'
while True:
path=get_file_dialog(title,init_path,parent,name_filter)
if path=='':
return
elif validation_fun==None or validation_fun(path):
if widget==None:
return path
change=False
if type(path)==type('asdf'):
if path!=widget.text():
change=True
widget.setText(path )
widget.setToolTip(path )
else:
if path[0]!=widget.text():
change=True
widget.setText(path[0])
widget.setToolTip(path[0])
if on_change_fun!=None and change:
on_change_fun()
parent.parent().adjustSize()
break
else:
messagebox_showinfo("Path is not correct!", "Select apropriate path!",parent )
# copy from clipboard clipboardclipboard
def copy(btn,txt):
cli=QApplication.clipboard()
cli.setText(txt)
messagebox_showinfo('Value ready in clipboard','Value ready in clipboard:\n'+txt,btn)
# print(btn)
if btn!=None:
xtmp=btn
for ii in range(5):
if hasattr(xtmp,'parent'):
if xtmp.parent()!=None:
xtmp=xtmp.parent()
else:
break
# print('close?')
xtmp.close()
# if hasattr(btn,'parent'):
# btn.parent().close()
# if hasattr(btn,'parent'):
# btn.parent().close()
# btn.parent().parent().parent().parent().parent().close()
class CopyDialog(QDialog):
def __init__(self, parent = QWidget(),strtitle='',stroutput=('',) ):
super(CopyDialog,self).__init__(parent)
self.setMinimumWidth(256)
self.setMaximumWidth(512)
strtitle_split=strtitle.split('.')
tmptitle=strtitle
tmpcont=strtitle
if len(strtitle_split)>1:
tmptitle= strtitle_split[0]
tmpcont='.'.join(strtitle_split[1:])
self.setWindowTitle(tmptitle)
self.label = QLabel(strtitle+'\n\n'+stroutput[0])
self.label.setWordWrap(True)
self.cpbutton = Button( self,name='Copy',actionFun=copy,args=stroutput)
layout = QVBoxLayout()
layout.addWidget(self.label )
layout.addWidget(self.cpbutton )
self.setLayout(layout)
self.exec_()
def output_copy_input(parent = QWidget(),strtitle='',stroutput=('',)):
cd=CopyDialog(parent ,strtitle ,stroutput )
class CmdYesNoDialog(QDialog):
def __init__(self, parent = None,strtitle='',opt=[],cmdfun=None):
super(CmdYesNoDialog,self).__init__(parent)
self.setMinimumWidth(256)
self.setMaximumWidth(512)
self.setWindowTitle(strtitle)
self.label = QLabel(strtitle )
self.label.setWordWrap(True)
self.optbox=Combox(parent ,items_list=opt)
self.okbutton = Button( self,name='Enter',actionFun=self.enter_button, args=([cmdfun,self.optbox.currentText()],) )
layout = QVBoxLayout()
layout.addWidget(self.label )
layout.addWidget(self.optbox )
layout.addWidget(self.okbutton )
self.setLayout(layout)
self.exec_()
def enter_button(self,btn,args ):
# print(btn,args)
args[0](args[1])
self.close()
def simple_opt_box(parent = QWidget(), strtitle='',opt=[],cmdfun=None):
CmdYesNoDialog(parent,strtitle,opt,cmdfun)
# def ask_password(tmpval,title='Enter password',lbl='Enter password to decrypt file',fun_run_after=None):
class PassForm(QDialog):
def __init__(self, tmpval, first_time, parent = None,fun_run_after=None,title="Enter password to decrypt wallet and database"):
super(PassForm,self).__init__(parent)
self.setGeometry(128, 128, 512, 128)
self.tmpval=tmpval
self.fun_run_after=fun_run_after
if first_time:
title="Set up a password for wallet and database encryption"
self.setWindowTitle(title)
self.password = QLineEdit(self)
self.password.setEchoMode(QLineEdit.Password)
self.showbutton = Button( self,name='Show',actionFun=self.show_button)
self.okbutton = Button( self,name='Enter',actionFun=self.quit_button)
self.okbutton.setDefault(True)
layout = QGridLayout()
layout.addWidget(self.password,0,0)
layout.addWidget(self.showbutton,0,1)
layout.addWidget(self.okbutton,1,0,1,2)
self.setLayout(layout)
self.setAttribute(Qt.WA_QuitOnClose,False)
self.exec_()
def quit_button(self,btn ):#,args
# print(self,args,misc)
tmppas=self.password.text().strip()
if tmppas!='':
self.tmpval.append(tmppas)
if self.fun_run_after!=None:
self.fun_run_after(self.tmpval)
self.close()
def show_button(self,btn ):#,args
if btn.text()=='Show':
btn.setText('Hide')
self.password.setEchoMode(QLineEdit.Normal)
else:
btn.setText('Show')
self.password.setEchoMode(QLineEdit.Password)
# accept table widget inside
# and "go button" closes it !
class CustomDialog(QDialog):
# table_widget must have function to quit this dialog
def __init__(self, parent = None , table_widget=None, title='',wihi=None, defaultij=[]):
super(CustomDialog,self).__init__(parent)
# self.tmpval=tmpval
if wihi!=None:
self.setGeometry(128, 128, wihi[0], wihi[1])
self.setWindowTitle(title)
# self.setAttribute(Qt.WA_QuitOnClose,True)
# self.setAttribute(Qt.WA_DeleteOnClose,True)
self.setSizeGripEnabled(True)
# self.widgets=[]
# self.setSizeAdjustPolicy(QComboBox.AdjustToContents)
# QWidget {font-family:'Open Sans','Helvetica Neue',Helvetica,Arial,DejaVu }
# QFrame { border:none;}
# QTabBar {background-color:rgba(255, 255, 255, 1);}
# QPushButton {background-color:#ddd; border-style: solid; border-width: 1px; border-color: #aaa; padding:3px; margin:3px;min-width:32px;}
# QPushButton:hover {background-color:#eee; border-width: 1px; border-color: green;}
# QPushButton:pressed {background-color:lightgreen; border-width: 1px; border-color: green;}
# QComboBox {background-color:white; border-style: solid; border-width: 1px; border-color: #aaa; padding:3px; margin:3px;}
# QComboBox QAbstractItemView {background-color:white;selection-background-color: lightgray;border-style: solid; border-width: 1px; }
# QLineEdit {background-color:white; border-style: solid; border-width: 1px; border-color: #aaa; padding:3px; margin:3px;}
# QAbsractScrollArea {border-style:none}
tmp_style = """
QTableWidget {border-color:rgba(255, 255, 255, 1);}
QHeaderView { border-color:rgba(255, 255, 255, 1); }
"""
self.setStyleSheet(tmp_style)
if table_widget!=None:
layout = QVBoxLayout() #
if type(table_widget)==type([]):
for ww in table_widget:
layout.addWidget(ww)
else:
layout.addWidget(table_widget)
self.setLayout(layout)
QApplication.processEvents()
if len(defaultij)==2 and type(table_widget)!=type([]): #default button, only if table widget is single widget
table_widget.cellWidget(defaultij[0],defaultij[1]).setDefault(True)
# print(table_widget.parent())
self.setSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding)
# self.adjustSize()
self.exec_()
def widgetAt(self,idx):
return self.layout().itemAt(idx).widget()
def keyPressEvent(self,event):
if event.key()==Qt.Key_Enter or event.key()==Qt.Key_Return:
return
# QDialog.keyPressEvent(event)
class Button(QPushButton):
def updateButton(self,name='',actionFun=None,args=None,tooltip=''):
if name!='':
self.setText(name)
if tooltip!='':
self.setToolTip(tooltip)
if actionFun!=None:
self.clicked.disconnect()
self.fun=actionFun
self.args=args
if args!=None:
self.clicked.connect(lambda : actionFun(self,*args))
else:
self.clicked.connect(lambda : actionFun(self ))
# print(actionFun,args)
def __init__(self,parent,name='',actionFun=None,args=None,tooltip=''): #item_list=[{'text'}] OR item_list=[{'text','userdata'},{}]
super(Button, self).__init__(name ,parent)
self.args=args
self.setFocusPolicy(Qt.ClickFocus)
# self.setDefault(False)
if actionFun!=None:
# print(actionFun,args)
self.fun=actionFun
if args!=None:
self.clicked.connect(lambda : actionFun(self,*args))
else:
self.clicked.connect(lambda : actionFun(self ))
if tooltip!='':
self.setToolTip(tooltip)
self.setStyleSheet('QPushButton {padding:3px;font-size:13px;}')
def set_fun(self,no_self,actionFun,*args):
if hasattr(self,'fun'):
return
self.fun=actionFun
if no_self:
self.clicked.connect(lambda: actionFun(*args))
else:
self.clicked.connect(lambda: actionFun(self,*args))
# def keyPressEvent(self,event):
# self.fun(self,args)
# def __lt__(self,other):
# if str(self.text()) < str(other.text()) :
# return True
# return False
class TextEdit(QTextEdit):
def __init__(self,parent ,txt='' ):
super(TextEdit, self).__init__(txt,parent)
class FramedWidgets(QGroupBox):
def __init__(self,parent ,name=None,widgets=[],layout=None ):
super(FramedWidgets, self).__init__(name,parent)
if layout==None:
layout=QHBoxLayout()
self.setLayout(layout)
self.widgets=[]
# self.layout=layout
for ww in widgets:
self.widgets.append( self.layout().addWidget(ww) )
# ww.setParent(self)
tmpcss="""
QGroupBox {background-color:rgba(245,245,245,1)}
QGroupBox QPushButton {background-color:#ddd; border-style: solid; border-width: 1px; border-color: #aaa;}
QGroupBox QPushButton:hover {background-color:#eee; border-width: 1px; border-color: green;}
QGroupBox QPushButton:pressed {background-color:lightgreen; border-width: 1px; border-color: green;}
"""
self.setStyleSheet(tmpcss)
def insertWidget(self, wdgt, row=-1, col=-1):
if row>-1:
self.widgets.append( self.layout().addWidget(wdgt, row, col) )
else:
self.widgets.append( self.layout().addWidget(wdgt) )
def widgetAt(self,idx):
return self.layout().itemAt(idx).widget()
class Tabs(QTabWidget):
def __init__(self,parent ):
super(Tabs, self).__init__(parent)
self.index=[]
# QTabBar::tab {background-color:rgba(245, 245, 245, 1);}
# css="""
# QTabWidget::pane {background-color:rgba(245, 245, 245, 1);}
# """
# https://www.qt.io/blog/2007/06/22/styling-the-tab-widget
# self.setStyleSheet(css)
def insertTab(self,tab_dict={'name':QWidget()}):
for k,v in tab_dict.items():
self.index.append( self.addTab(v,k) )
# v.setParent(self)
class Combox(QComboBox):
def __init__(self,parent ,items_list=[],actionFun=None,every_click=False,args=None): #item_list=[{'text'}] OR item_list=[{'text','userdata'},{}]
super(Combox, self).__init__(parent)
self.orig_items_list=items_list.copy()
self.every_click=every_click
if 'text' not in items_list[0]: # assume convert:
items_list=[{'text':il} for il in items_list]
# print(items_list)
for jj,ii in enumerate(items_list):
if 'userdata' not in ii:
ii['userdata']=ii['text']
self.addItem(ii['text'],ii['userdata'])
self.setItemData(jj, ii['text'], Qt.ToolTipRole)
self.setCurrentIndex(0)
# self.setSizeAdjustPolicy(QComboBox.AdjustToContents)
if actionFun!=None:
self.fun=actionFun
# self.currentIndexChanged.connect(lambda: actionFun(self)) # self.currentText() self.currentData(Qt.UserRole) inside actionFun will get our values
if self.every_click:
# self.activated.connect(lambda: actionFun(self))
if args!=None:
self.activated.connect(lambda : actionFun(self,*args))
else:
self.activated.connect(lambda : actionFun(self ))
else:
if args!=None:
# self.activated.connect(lambda : actionFun(self,*args))
self.currentTextChanged.connect(lambda: actionFun(self,*args))
else:
# self.activated.connect(lambda : actionFun(self ))
self.currentTextChanged.connect(lambda: actionFun(self)) # self.currentText() self.currentData(Qt.UserRole) inside actionFun will get our values
self.setStyleSheet('QComboBox {padding:3px;font-size:13px;}')
# currentIndex()
# currentText()
def setIndexForText(self,txt):
fid=self.findText(txt, Qt.MatchExactly)
if fid==-1: fid=0
self.setCurrentIndex( fid )
def set_fun(self,actionFun,*args ):
if hasattr(self,'fun'):
return
self.fun=actionFun
if self.every_click:
self.activated.connect(lambda: actionFun(self,*args))
else:
self.currentIndexChanged.connect(lambda: actionFun(self,*args ))
def replace(self,old_item_name,new_item={}): # new_item={'text'} or {'text','userdata'}
idx=self.findText(old_item_name,Qt.MatchExactly)
if 'userdata' not in new_item:
new_item['userdata']=new_item['text']
self.insertItem(idx,new_item['text'],new_item['userdata'])
self.setItemData(idx, new_item['text'], Qt.ToolTipRole)
# insert new, but do not delete old
def updateBox(self,new_items_list=[]):
# self.orig_items_list=items_list
# delete if old item not in new items
# insert if new item list contains sth additional
tmp=self.currentText()
new_box_items=[]
for ni in new_items_list:
# self.orig_items_list.remove(ni)
if type(ni)!=type({}):
ni={'text':ni,'userdata':ni}
new_box_items.append(ni['text'])
if ni['text'] not in self.orig_items_list:
# new_box_items.append(ni['text'])
# if 'userdata' not in ni:
# ni['userdata']=ni['text']
self.addItem(ni['text'],ni['userdata'])
self.setItemData(self.count()-1, ni['text'], Qt.ToolTipRole)
# else:
# for oi in self.orig_items_list:
# self.orig_items_list.remove(ni)
self.orig_items_list=new_box_items.copy()
for ii in range(self.count()-1,-1,-1):
# tt=
if self.itemText(ii) not in self.orig_items_list:
self.removeItem(ii)
self.setIndexForText( tmp)
class LineEdit(QLineEdit):
def __init__(self,parent ,field_name='',placeholder_txt='',default_value='',tooltip=''):
super(LineEdit, self).__init__(default_value,parent)
self.setPlaceholderText(placeholder_txt)
self.field_name=field_name
if tooltip!='':
self.setToolTip(tooltip)
self.setStyleSheet(" QLineEdit {background-color:white; border-style: solid; border-width: 1px; border-color: #aaa;}" )
def eventFilter(self, source, event):
# print('EVENT',source)
if (event.type() == QEvent.KeyPress and source is self):
if event.key() in [int(Qt.Key_Enter),int(Qt.Key_Return) ]:
# print('key press:', (event.key(), event.text()),int(Qt.Key_Enter),int(Qt.Key_Return))
self.label_to_set_on_enter.setText(self.text().strip())
return super(LineEdit, self).eventFilter(source, event)
def setEventFilter(self,lbl2set):
self.installEventFilter(self ) #.parent().parent().parent()
self.label_to_set_on_enter=lbl2set
def addValidator(self,vtype,rrange=[]): #vtype=int,float
qv=None
self.rrange=rrange
x1,x2=0,0
if len(rrange)==2:
x1,x2=rrange[0],rrange[1]
# print(x1,x2)
self.textChanged.connect(self.test_validation)
if vtype==float:
if len(rrange)==2:
qv=QDoubleValidator(float(x1),float(x2),8,self)
else:
qv=QDoubleValidator( self)
elif vtype==int:
if len(rrange)==2:
qv=QIntValidator(int(x1),int(x2),self)
else:
qv=QIntValidator( self)
elif vtype=='custom':
return # range[0]=custom function, int ii1, int ii2
# print(qv)
qv.setLocale(QLocale(QLocale.English)) #,QLocale.Germany
# print(qv.bottom(),qv.top(),qv.decimals(),qv.locale())
self.setValidator(qv)
def test_validation(self):
# print('validator')
if self.text().strip()=='':
return
try:
if type(self.validator()) == QDoubleValidator : #.decimals()==8: # check float
x=float(self.text().strip())
if x<self.validator().bottom() or x>self.validator().top():
messagebox_showinfo('Wrong value in '+self.field_name, 'Value out of range! Please correct value '+self.text()+' in '+self.field_name+'.\n'+self.toolTip(),self)
elif type(self.validator()) == QIntValidator :
ii=int(self.text().strip())
if ii<self.validator().bottom() or ii>self.validator().top():
messagebox_showinfo('Wrong value in '+self.field_name, 'Value out of range! Please correct value '+self.text()+' in '+self.field_name+'.\n'+self.toolTip(),self)
else:
# if True:
# if True:
if hasattr(self,'property'):
rowii=self.property('rowii')
parwidget=self.parent().parent()
# print(parwidget,rowii)
tmpaddrelem=parwidget.cellWidget(rowii,self.rrange[2])
tmpoutelem=parwidget.item(rowii,self.rrange[1])
# print(tmpaddrelem ,tmpoutelem )
self.rrange[0](self,tmpoutelem,tmpaddrelem)
except:
messagebox_showinfo('Wrong value in '+self.field_name, 'Please correct value '+self.text()+' in '+self.field_name+'.\n'+self.toolTip(),self)
class Label(QLabel):
def __init__(self,parent,txt,tooltip='',transparent=True ):
super(Label, self).__init__(txt ,parent)
# self.setSizePolicy(QSizePolicy)
if transparent:
self.setAttribute(Qt.WA_TranslucentBackground)
if tooltip!='':
self.setToolTip(tooltip)
# self.ltype=ltype
class TableCell(QTableWidgetItem):
def __init__(self, value,ttype ): # ttype= float, int, str
super(TableCell, self).__init__(value)
self.ttype=ttype
if ttype==QDateTime:
tmsplit=value.strip().split()
tmpdatetime=tmsplit[0]+'T'+tmsplit[-1] #tm[2].replace(' ','T')
self.typedvalue=QDateTime.fromString(tmpdatetime,Qt.ISODate)
else:
self.typedvalue=self.ttype(value)
# if fsize>0:
# self.setStyleSheet(" QTableWidgetItem {font-size:"+fsize+"px;}" )
# self.setSizePolicy(QSizePolicy.Minimum,QSizePolicy.Minimum)
def __lt__(self,other):
if self.ttype==str:
if self.typedvalue.lower() < other.typedvalue.lower() :
# if self.typedvalue < other.typedvalue :
return True
elif self.typedvalue < other.typedvalue :
return True
return False
# importante: row indexes can be on or off if needed
# size options:
# setSectionResizeMode per vertical or horizonal section
# or setRowHeight and setRowHeight
class Table(QTableWidget):
def __init__(self, parent=None, params={}):
rows=params['dim'][0]
cols=params['dim'][1]
# print('rows,cols',rows,cols)
super(Table, self).__init__(rows,cols,parent)
# self.rowUIDs={} row names work as ids, and row data as checker if to update
self.row_key_svalue={}
# self.col_names=[]
self.updatable=False
self.setCornerButtonEnabled(False)
self.setFocusPolicy(Qt.NoFocus)
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)
self.verticalHeader().hide()
self.horizontalHeader().sectionClicked.connect(self.clickDetected)
self.horizontalHeader().setMinimumHeight(32)
self.horizontalHeader().setDefaultAlignment(Qt.AlignLeft|Qt.AlignTop)
if 'maxHeight' in params:
self.setMaximumHeight(params['maxHeight'])
if 'toContent' in params:
self.horizontalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
self.verticalHeader().setSectionResizeMode(QHeaderView.ResizeToContents)
# optional params ON OFF
if 'updatable' in params:
# if params['updatable']==True:
self.updatable=True
if 'colSizeMod' in params:
for ii,mm in enumerate(params['colSizeMod']):
if mm=='stretch':
self.horizontalHeader().setSectionResizeMode(ii,QHeaderView.Stretch)
elif mm=='toContent':
self.horizontalHeader().setSectionResizeMode(ii,QHeaderView.ResizeToContents)
else:
self.setColumnWidth(ii,mm)
if 'rowSizeMod' in params:
for ii,mm in enumerate(params['rowSizeMod']):
if mm=='stretch':
self.verticalHeader().setSectionResizeMode(ii,QHeaderView.Stretch)
elif mm=='toContent':
self.verticalHeader().setSectionResizeMode(ii,QHeaderView.ResizeToContents)
else:
self.setRowHeight(ii,mm)
# sortable=[]
if 'sortable' in params:
self.setSortingEnabled(True)
# if "show_grid" in params:
# self.setShowGrid(params["show_grid"])
self.setShowGrid(False)
self.sort_col=''
if 'default_sort_col' in params:
self.sort_col=params["default_sort_col"]
# print('self.sort_col',self.sort_col)
## STYLING CSS
tmp_header_bg_color = "rgba(245, 245, 245, 1);"
# QTableWidget::item:edit-focus { background-color:%s; border:none; color:black;}
# QWidget { background-color:%s; border:none; margin:5px; padding:5px;}
tmp_style = """
QTableWidget::item { background-color:%s; border-style:none;}
QTableWidget::item:selected { background-color:%s; border-style:none; color:black }
QHeaderView::section { background-color:%s; border-style:none; }
QTableCornerButton::section { background-color:%s; border-style:none; }
QTableWidget QPushButton {background-color:#ddd; border-style: solid; border-width: 1px; border-color: #aaa; padding:3px; margin:3px;}
QTableWidget QPushButton:hover {background-color:#eee; border-width: 1px; border-color: green;}
QTableWidget QPushButton:pressed {background-color:lightgreen; border-width: 1px; border-color: green;}
QTableWidget QComboBox {background-color:white; border-style: solid; border-width: 1px; border-color: #aaa;}
QTableWidget QComboBox QAbstractItemView {selection-background-color: lightgray;border-style: solid; border-width: 1px; }
QTableWidget QLineEdit {background-color:white; border-style: solid; border-width: 1px; border-color: #aaa;}
QTableWidget {margin:2px;padding:2px;font-size:13px; font-family:'DejaVu';border-style:none; }
QHeaderView {font-size: 13px; padding:0px; margin:0px;font-family:'DejaVu';border-style:none; }
""" % (
tmp_header_bg_color,
tmp_header_bg_color,
tmp_header_bg_color,
tmp_header_bg_color
)
# QTableWidget QLineEdit {background-color:white; border:inset;}
self.setStyleSheet(tmp_style)
# print(tmp_style)
# colnames getting zeroed
# update which column currently sorted in canse of insert to be in correct order
def clickDetected(self):
# print('click',self.sender().metaObject().className())
if hasattr(self,'col_names') and self.sender().metaObject().className()==QHeaderView.staticMetaObject.className():
tmpsender=self.sender()
# print(self.col_names)
# print(tmpsender)
# print(tmpsender.sortIndicatorSection())
self.sort_col=self.col_names[tmpsender.sortIndicatorSection()]
# print(self.sort_col)
# sortidx=self.col_names.index(self.sort_col)
# tmpord=self.horizontalHeader().sortIndicatorOrder()
# self.sortByColumn(sortidx, tmpord)
# print(tmpsender.sortIndicatorOrder(),self.sort_col)
# updateTable should mek this
def insert_at(self,widgets_line,at_line):
wasSortingEnabled=self.isSortingEnabled()
if wasSortingEnabled:
self.setSortingEnabled(False)
# ii=self.rowCount()
# self.insertRow( ii)
self.setWidgetRow(widgets_line,at_line)
# for jj, w in enumerate(widgets_line):
# self.setWidgetLine(w,ii,jj)
# if 'span' in w:
# self.setSpan(ii,jj,1,w['span'])
if wasSortingEnabled:
self.setSortingEnabled(True)
tmpord=self.horizontalHeader().sortIndicatorOrder()
tmpidx=self.horizontalHeader().sortIndicatorSection()
self.sortByColumn(tmpidx, tmpord)
# cellType= item or widget
def filtering(self,cellType,colnum,fopt ):
for ii in range(self.rowCount()):
tmpcurcat='xxx'
# print(ii,colnum,cellType,fopt)
# print(self.cellWidget(ii,colnum).text())
if cellType=='widget': tmpcurcat=self.cellWidget(ii,colnum).text()
elif cellType=='item' : tmpcurcat=self.item(ii,colnum).text()
# elif cellType=='item_date' : tmpcurcat=self.item(ii,colnum).text()
else:
print('Wrong filter value cellType')
return
t1= fopt in ['All',tmpcurcat]
t2= fopt=='Not hidden' and tmpcurcat!='Hidden'
# print(t1,t2,tmpcurcat)
if t1 or t2:
# print('show')
self.showRow(ii)
else:
# print('hide')
self.hideRow(ii)
def updateTable(self,widgets_dict,col_names=[],insert_only=False):
# print('\n\n',self,self.updatable,'\n\n')
if hasattr(self,'widgets_dict'):
if str(widgets_dict)==self.widgets_dict:
return
else:
self.widgets_dict=str(widgets_dict)
# print('update table col_names',col_names,self.col_names)
if col_names==[]:
if hasattr(self,'col_names')==False: # if first time
self.horizontalHeader().hide()
else:
self.col_names=col_names
self.setHorizontalHeaderLabels(col_names)
# sorting off
wasSortingEnabled=self.isSortingEnabled()
if wasSortingEnabled:
self.setSortingEnabled(False)
# if init - connect ii,jj with row uids
tmpCurrentRowIDs=[]
currentRowsIDs=[self.verticalHeaderItem(ll).text() for ll in range(self.rowCount()) if self.verticalHeaderItem(ll)!=None]
# print('currentRowsIDs',currentRowsIDs)
# currentRowsData=[self.verticalHeaderItem(ll).data(Qt.EditRole) for ll in range(self.rowCount()) if self.verticalHeaderItem(ll)!=None ]
tmpinit=len(currentRowsIDs)==0 and self.rowCount()>0
new_rows=[]
offset=0
if insert_only:
offset=self.rowCount()
# print('\n\n\n self.col_names', col_names)
for iii,rr in enumerate(widgets_dict):
ii=iii+offset
# print('\nupdating',ii,tmpinit,self.updatable,rr)
if tmpinit and self.updatable:# initiate row ids
self.setVerticalHeaderItem(ii,TableCell(rr['rowk'],str))
self.row_key_svalue[rr['rowk']]=str(rr['rowv'])
self.setWidgetRow(rr['rowv'],ii)
if 'rowSizeMod' in rr:
if rr['rowSizeMod']=='stretch':
self.verticalHeader().setSectionResizeMode(ii2,QHeaderView.Stretch)
elif rr['rowSizeMod']=='toContent':
self.verticalHeader().setSectionResizeMode(ii2,QHeaderView.ResizeToContents)
else:
self.setRowHeight(ii2,rr['rowSizeMod'])
elif self.updatable:
tmpCurrentRowIDs.append(rr['rowk'])
# print(772,rr['rowk'])
if rr['rowk'] in currentRowsIDs:
ii3=currentRowsIDs.index(rr['rowk'])
if str(rr['rowv'])!=self.row_key_svalue[rr['rowk']]:
# print('actual update',776,ii3,rr['rowv'])
self.setWidgetRow(rr['rowv'],ii3)
self.row_key_svalue[rr['rowk']]=str(rr['rowv'])
else: #insert new row
# print('new row?',currentRowsIDs)
ii2=self.rowCount()
# print('new row',ii2)
self.insertRow( ii2)
new_rows.append(ii2)
self.setVerticalHeaderItem(ii2,TableCell(rr['rowk'],str))
# print('\n\n\n\nheader set to',rr['rowk'])
# print('check',self.verticalHeaderItem(ii2).text())
self.row_key_svalue[rr['rowk']]=str(rr['rowv'])
# print('set',rr['rowv'])
self.setWidgetRow(rr['rowv'],ii2)
if 'rowSizeMod' in rr:
if rr['rowSizeMod']=='stretch':
self.verticalHeader().setSectionResizeMode(ii2,QHeaderView.Stretch)
elif rr['rowSizeMod']=='toContent':
self.verticalHeader().setSectionResizeMode(ii2,QHeaderView.ResizeToContents)
else:
self.setRowHeight(ii2,rr['rowSizeMod'])
else: #not updatable, just write cells
# print(803,rr,ii)
if 'rowk' in rr:
# print(1032,rr)
self.setWidgetRow(rr['rowv'],ii)
else:
# print('NO ROWK')
self.setWidgetRow(rr,ii)
if not insert_only:
if len(tmpCurrentRowIDs)==0 and len(currentRowsIDs)>0: # remove all
# print('remove all')
tmpl=self.rowCount()
while self.rowCount()>0:
self.removeRow(tmpl-1)
tmpl=self.rowCount()
else:
for nn,ccr in enumerate(currentRowsIDs) :
# print(nn,ccr)
if ccr not in tmpCurrentRowIDs:
for ll in range(self.rowCount()):
if self.verticalHeaderItem(ll).text()== ccr:
# this is ll row to delete
self.removeRow(ll)
break
self.adjustSize()
# resort if on
if wasSortingEnabled:
self.setSortingEnabled(True)
if self.sort_col!='' and len(self.col_names)>0:
try:
# print(920,self.sort_col,self.col_names)
sortidx=self.col_names.index(self.sort_col)
# print(sortidx)
except:
sortidx=-1
if sortidx==-1:
print('WARNING - sort column name WRONG sortidx=',sortidx)
sortidx=0
# self.horizontalHeader().setSortIndicator(sortidx)
tmpord=self.horizontalHeader().sortIndicatorOrder()
self.sortByColumn(sortidx, tmpord)
else:
tmpord=self.horizontalHeader().sortIndicatorOrder()
self.sortByColumn(sortidx, tmpord)
else:
tmpord=self.horizontalHeader().sortIndicatorOrder()
self.sortByColumn(0, tmpord)
return new_rows
def setWidgetRow(self,r,ii):
# print(r)
for jj, w in enumerate(r):
# print(w,ii,jj)
self.setWidgetCell(w,ii,jj)
def setWidgetCell(self,w,ii,jj):
# print(841,w,ii,jj)
if w=={}:
# w={'T':'LabelE'}
return
# print(870,w,ii,jj)
if w['T'] in ['LabelV','LabelC','LabelE']:
tmptxt=''
if 'L' in w:
tmptxt=str(w['L'])
cur_widget=self.item(ii,jj)
if cur_widget!=None:
if cur_widget.text()==tmptxt:
return
ttype=str
if 'ttype' in w:
ttype=w['ttype']
# fsize=-1
# if 'fontsize' in w:
# fsize=w['fontsize']
tmplbl=TableCell(tmptxt,ttype )
tmptt=''
if 'tooltip' in w:
tmptt=w['tooltip']
tmplbl.setToolTip(tmptt)
self.setItem(ii,jj,tmplbl)
# self.item(ii,jj).setProperty("rowii",ii)
# self.item(ii,jj).setData(Qt.FontRole, QFont(self.item(ii,jj).data(Qt.FontRole).family(),weight=6));
if 'span' in w:
self.setSpan(ii,jj,1,w['span'])
elif w['T'] in ['QLabel']:
cur_widget=self.cellWidget(ii,jj)
if cur_widget!=None:
if cur_widget.text()==str(w['L']):
return
tmptt=''
if 'tooltip' in w:
tmptt=w['tooltip']
ttype=str
if 'ttype' in w:
ttype=w['ttype']
self.setItem(ii,jj,TableCell(w['L'],ttype=QDateTime) )
lll=Label( None,w['L'], str(tmptt),transparent=False )
if 'style' in w:
lll.setStyleSheet("QLabel {%s}" % w['style'])
lll.setWordWrap(True)
# print("QLabel {%s}" % w['style'])
self.setCellWidget( ii,jj, lll )
self.cellWidget(ii,jj).setProperty("rowii",ii)
self.cellWidget(ii,jj).adjustSize()
elif w['T'] in ['Button']:
cur_widget=self.cellWidget(ii,jj)
# print('cur_widget',cur_widget)
if cur_widget!=None:
if cur_widget.text()==str(w['L']):
return
tmpargs=None
if 'args' in w:
tmpargs=w['args']
tmptt=''
if 'tooltip' in w:
tmptt=w['tooltip']
tmpfun=None
if 'fun' in w:
tmpfun=w['fun']
# print('button',w['L'],tmpfun,tmpargs)
# print('ii,jj,rows',ii,jj,self.rowCount())
self.setItem(ii,jj,TableCell(str(w['L']),ttype=str) )
bbb=Button( None,w['L'],tmpfun,tmpargs,str(tmptt) )
if 'IS' in w:
bbb.setEnabled(w['IS'])
if 'style' in w:
bbb.setStyleSheet("QPushButton {%s}" % w['style'])
else:
bbb.setStyleSheet("QPushButton {font-size:13px;padding:3px;}" )
# print("QPushButton {%s}" % w['style'])
# print(bbb.styleSheet() )
# print(bbb,ii,jj,self.rowCount(),self.columnCount())
self.setCellWidget( ii,jj, bbb )
# print(self.cellWidget(ii,jj))
self.cellWidget(ii,jj).setProperty("rowii",ii)
# print(self.cellWidget(ii,jj))
self.cellWidget(ii,jj).adjustSize()
# print(bbb.styleSheet() )
elif w['T'] in ['Combox']:
cur_widget=self.cellWidget(ii,jj)
# print(898,cur_widget)
if cur_widget!=None:
for jj,ci in enumerate(cur_widget.orig_items_list):
if ci not in w['V']:
cur_widget.removeItem(jj)
# print('remove',ci,'not in ',w['V'])
for ci in w['V']:
if ci not in cur_widget.orig_items_list:
cur_widget.addItem(ci,ci)
# print('add',[ci],'not in ',cur_widget.orig_items_list)
# cur_widget.updateCombox(w['V'])
# print('UPDATED COMBOBOX')
return
tmpargs=None
if 'args' in w:
tmpargs=w['args']
tmpfun=None
if 'fun' in w:
tmpfun=w['fun']
if 'every_click' in w:
self.setCellWidget( ii,jj, Combox( None,w['V'],tmpfun,every_click=True,args=tmpargs ) )
else:
self.setCellWidget( ii,jj, Combox( None,w['V'],tmpfun,args=tmpargs ) )
self.cellWidget(ii,jj).setProperty("rowii",ii)
self.cellWidget(ii,jj).adjustSize()
elif w['T'] in ['LineEdit']:
tmptt=''
if 'tooltip' in w:
tmptt=w['tooltip']
tmpdef=''
if 'V' in w:
tmpdef=w['V']
tmpname=''
if 'L' in w:
tmpname=w['L']
le=LineEdit(None, tmpname,'',tmpdef,tmptt )
if 'mode' in w:
if w['mode']=='pass':
le.setEchoMode( QLineEdit.Password)
if 'valid' in w: # {ttype:,rrange:[]}
le.addValidator( w['valid']['ttype'],w['valid']['rrange'] ) #vtype=int,float
self.setCellWidget( ii,jj, le )
self.cellWidget(ii,jj).setProperty("rowii",ii)
self.cellWidget(ii,jj).adjustSize()
elif w['T'] in ['TextEdit']:
ttt=QTextEdit()
if 'style' in w:
ttt.setStyleSheet("QTextEdit {%s}" % w['style'])
self.setCellWidget( ii,jj, ttt )
self.cellWidget(ii,jj).setProperty("rowii",ii)
self.cellWidget(ii,jj).adjustSize()
if 'span' in w:
self.setSpan(ii,jj,1,w['span'])
class ContainerWidget(QWidget):
# QStackedLayout() QVBoxLayout() QHBoxLayout()
def __init__(self, parent , layout=None, widgets=[] ): # ContainerWidget(None,gui.QVBoxLayout(),widgets=[])
super(ContainerWidget, self).__init__(parent)
if layout==None:
layout=QGridLayout()
self.setLayout(layout)
self.widgets=[]
if widgets!=[]:
for w in widgets:
self.insertWidget(w)
# w.setParent(self)
self.setStyleSheet("QWidget {background-color:rgba(245,245,245,1);}")
def insertWidget(self, wdgt, row=-1, col=-1):
if row>-1:
self.widgets.append( self.layout().addWidget(wdgt, row, col) )
else:
self.widgets.append( self.layout().addWidget(wdgt) )
def widgetAt(self,idx):
return self.layout().itemAt(idx).widget()
# def insertWidgetAt(self,idx,new_wdgt): for some reason not working correct
# tmp=self.layout().replaceWidget( self.layout().itemAt(idx).widget(), new_wdgt, Qt.FindDirectChildrenOnly )
# print('delete',tmp.widget())
# tmp.widget().deleteLater()
class MainWindow(QMainWindow):
# QStackedLayout() QVBoxLayout() QHBoxLayout()
def __init__(self, title="Default title", geo=(128, 128, 1024, 768), central_widget=None):
super(MainWindow, self).__init__()
# if layout==None:
# layout=QGridLayout()
self.setWindowTitle(title)
# self.layout = layout
self.setGeometry(*geo)
if central_widget == None:
central_widget = QWidget()
# central_widget.setLayout(layout)
self.setCentralWidget(central_widget)
self.widgets=[]
# tmp_header_bg_color = "rgba(255, 255, 255, 1);"
# QWidget { background-color:%s; border:none; margin:5px; padding:5px;}
#
tmp_style = """ QWidget {font-family:'Open Sans','Helvetica Neue',Helvetica,Arial,DejaVu }
QFrame {border:none;}
QTabBar {background-color:rgba(255, 255, 255, 1);}
QPushButton {background-color:#ddd; border-style: solid; border-width: 1px; border-color: #aaa; padding:3px; margin:3px;min-width:32px;}
QPushButton:hover {background-color:#eee; border-width: 1px; border-color: green;}
QPushButton:pressed {background-color:lightgreen; border-width: 1px; border-color: green;}
QComboBox {background-color:white; border-style: solid; border-width: 1px; border-color: #aaa; padding:3px; margin:3px;}
QComboBox QAbstractItemView {background-color:white;selection-background-color: lightgray;border-style: solid; border-width: 1px; }
QLineEdit {background-color:white; border-style: solid; border-width: 1px; border-color: #aaa; padding:3px; margin:3px;}
QAbsractScrollArea {border-style:none}
QTableView {border-style:none}
QAbstractItemView {border-style:none}
QHeaderView {border-style:none}
"""
# % (
# tmp_header_bg_color
# )
# QTableWidget QLineEdit {background-color:white; border:inset;}
self.setStyleSheet(tmp_style)
self.setWindowIcon( QIcon('icon.png'))
def setWorker(self,wrkr,thrd):
self.wrkr=wrkr
self.thrd=thrd
# def insertWidget(self, wdgt, row=-1, col=-1):
# if row>-1:
# self.widgets.append( self.layout().addWidget(wdgt, row, col) )
# else:
# self.widgets.append( self.layout().addWidget(wdgt) )
def setOnClose(self,closeFun):
self.on_close=closeFun
def closeEvent(self,event):
# check blockchcian status works:
if self.wrkr.block_closing:
messagebox_showinfo('Cannot close before chain status is established - please wait','Please wait for connection to be able to close the application. Cannot close before chain status is established',self)
event.ignore()
return
if not self.on_close(self):
event.ignore()
return
self.wrkr.init_app.close_thread=True
# print('1106 self.wrkr.init_app.close_thread',self.wrkr.init_app.close_thread)
self.thrd.terminate()
self.thrd.wait()
self.close()
| 28.705645
| 207
| 0.655523
|
512455782319e2df896a7bb7e34d11c32e08bb28
| 10,573
|
py
|
Python
|
raiden/tests/integration/test_send_queued_messages.py
|
christianbrb/raiden
|
64f0715af076747b293671157e2cbbd235cab81b
|
[
"MIT"
] | null | null | null |
raiden/tests/integration/test_send_queued_messages.py
|
christianbrb/raiden
|
64f0715af076747b293671157e2cbbd235cab81b
|
[
"MIT"
] | null | null | null |
raiden/tests/integration/test_send_queued_messages.py
|
christianbrb/raiden
|
64f0715af076747b293671157e2cbbd235cab81b
|
[
"MIT"
] | null | null | null |
import gevent
import pytest
from raiden import waiting
from raiden.app import App
from raiden.constants import RoutingMode
from raiden.message_handler import MessageHandler
from raiden.network.transport import MatrixTransport
from raiden.raiden_event_handler import RaidenEventHandler
from raiden.tests.utils.detect_failure import raise_on_failure
from raiden.tests.utils.events import raiden_events_search_for_item
from raiden.tests.utils.factories import make_secret
from raiden.tests.utils.network import CHAIN
from raiden.tests.utils.protocol import HoldRaidenEventHandler
from raiden.tests.utils.transfer import assert_synced_channel_state, watch_for_unlock_failures
from raiden.transfer import views
from raiden.transfer.events import EventPaymentSentSuccess
from raiden.transfer.mediated_transfer.events import SendLockedTransfer, SendSecretReveal
from raiden.utils.secrethash import sha256_secrethash
from raiden.utils.transfers import create_default_identifier
from raiden.utils.typing import (
Address,
Balance,
BlockNumber,
List,
PaymentAmount,
PaymentID,
TargetAddress,
TokenAddress,
TokenAmount,
)
@raise_on_failure
@pytest.mark.parametrize("deposit", [10])
@pytest.mark.parametrize("channels_per_node", [CHAIN])
@pytest.mark.parametrize("number_of_nodes", [2])
def test_send_queued_messages_after_restart( # pylint: disable=unused-argument
raiden_network: List[App],
deposit: TokenAmount,
token_addresses: List[TokenAddress],
network_wait: float,
):
"""Test re-sending of undelivered messages on node restart"""
app0, app1 = raiden_network
token_address = token_addresses[0]
chain_state = views.state_from_app(app0)
token_network_registry_address = app0.raiden.default_registry.address
token_network_address = views.get_token_network_address_by_token_address(
chain_state, token_network_registry_address, token_address
)
assert token_network_address
number_of_transfers = 7
amount_per_transfer = PaymentAmount(1)
total_transferred_amount = TokenAmount(amount_per_transfer * number_of_transfers)
# Make sure none of the transfers will be sent before the restart
transfers = []
for secret_seed in range(number_of_transfers):
secret = make_secret(secret_seed)
secrethash = sha256_secrethash(secret)
transfers.append((create_default_identifier(), amount_per_transfer, secret, secrethash))
assert isinstance(app0.raiden.raiden_event_handler, HoldRaidenEventHandler) # for mypy
app0.raiden.raiden_event_handler.hold(
SendLockedTransfer, {"transfer": {"lock": {"secrethash": secrethash}}}
)
for identifier, amount, secret, _ in transfers:
app0.raiden.mediated_transfer_async(
token_network_address=token_network_address,
amount=amount,
target=TargetAddress(app1.raiden.address),
identifier=identifier,
secret=secret,
)
app0.stop()
# Restart the app. The pending transfers must be processed.
new_transport = MatrixTransport(
config=app0.raiden.config.transport, environment=app0.raiden.config.environment_type
)
raiden_event_handler = RaidenEventHandler()
message_handler = MessageHandler()
app0_restart = App(
config=app0.config,
rpc_client=app0.raiden.rpc_client,
proxy_manager=app0.raiden.proxy_manager,
query_start_block=BlockNumber(0),
default_registry=app0.raiden.default_registry,
default_secret_registry=app0.raiden.default_secret_registry,
default_service_registry=app0.raiden.default_service_registry,
default_one_to_n_address=app0.raiden.default_one_to_n_address,
default_msc_address=app0.raiden.default_msc_address,
transport=new_transport,
raiden_event_handler=raiden_event_handler,
message_handler=message_handler,
routing_mode=RoutingMode.PRIVATE,
)
del app0
app0_restart.start()
# XXX: There is no synchronization among the app and the test, so it is
# possible between `start` and the check below that some of the transfers
# have completed, making it flaky.
#
# Make sure the transfers are in the queue and fail otherwise.
chain_state = views.state_from_raiden(app0_restart.raiden)
for _, _, _, secrethash in transfers:
msg = "The secrethashes of the pending transfers must be in the queue after a restart."
assert secrethash in chain_state.payment_mapping.secrethashes_to_task, msg
with watch_for_unlock_failures(*raiden_network):
exception = RuntimeError("Timeout while waiting for balance update for app0")
with gevent.Timeout(20, exception=exception):
waiting.wait_for_payment_balance(
raiden=app0_restart.raiden,
token_network_registry_address=token_network_registry_address,
token_address=token_address,
partner_address=app1.raiden.address,
target_address=app1.raiden.address,
target_balance=total_transferred_amount,
retry_timeout=network_wait,
)
exception = RuntimeError("Timeout while waiting for balance update for app1")
with gevent.Timeout(20, exception=exception):
waiting.wait_for_payment_balance(
raiden=app1.raiden,
token_network_registry_address=token_network_registry_address,
token_address=token_address,
partner_address=app0_restart.raiden.address,
target_address=app1.raiden.address,
target_balance=total_transferred_amount,
retry_timeout=network_wait,
)
assert_synced_channel_state(
token_network_address,
app0_restart,
Balance(deposit - total_transferred_amount),
[],
app1,
Balance(deposit + total_transferred_amount),
[],
)
new_transport.stop()
@raise_on_failure
@pytest.mark.parametrize("number_of_nodes", [2])
@pytest.mark.parametrize("channels_per_node", [1])
@pytest.mark.parametrize("number_of_tokens", [1])
def test_payment_statuses_are_restored( # pylint: disable=unused-argument
raiden_network: List[App], token_addresses: List[TokenAddress], network_wait: float
):
""" Test that when the Raiden is restarted, the dictionary of
`targets_to_identifiers_to_statuses` is populated before the transport
is started.
This should happen because if a client gets restarted during a transfer
cycle, once restarted, the client will proceed with the cycle
until the transfer is successfully sent. However, the dictionary
`targets_to_identifiers_to_statuses` will not contain the payment
identifiers that were originally registered when the previous client
started the transfers.
Related issue: https://github.com/raiden-network/raiden/issues/3432
"""
app0, app1 = raiden_network
token_address = token_addresses[0]
chain_state = views.state_from_app(app0)
token_network_registry_address = app0.raiden.default_registry.address
token_network_address = views.get_token_network_address_by_token_address(
chain_state, token_network_registry_address, token_address
)
assert token_network_address
target_address = TargetAddress(app1.raiden.address)
# make a few transfers from app0 to app1
amount = PaymentAmount(1)
spent_amount = TokenAmount(7)
for identifier in range(spent_amount):
# Make sure the transfer is not completed
secret = make_secret(identifier)
assert isinstance(app0.raiden.raiden_event_handler, HoldRaidenEventHandler) # for mypy
app0.raiden.raiden_event_handler.hold(SendSecretReveal, {"secret": secret})
identifier = identifier + 1
payment_status = app0.raiden.mediated_transfer_async(
token_network_address=token_network_address,
amount=amount,
target=target_address,
identifier=PaymentID(identifier),
secret=secret,
)
assert payment_status.payment_identifier == identifier
app0_restart = App(
config=app0.config,
rpc_client=app0.raiden.rpc_client,
proxy_manager=app0.raiden.proxy_manager,
query_start_block=BlockNumber(0),
default_registry=app0.raiden.default_registry,
default_secret_registry=app0.raiden.default_secret_registry,
default_service_registry=app0.raiden.default_service_registry,
default_one_to_n_address=app0.raiden.default_one_to_n_address,
default_msc_address=app0.raiden.default_msc_address,
transport=MatrixTransport(
config=app0.raiden.config.transport, environment=app0.raiden.config.environment_type
),
raiden_event_handler=RaidenEventHandler(),
message_handler=MessageHandler(),
routing_mode=RoutingMode.PRIVATE,
)
app0.stop()
del app0 # from here on the app0_restart should be used
# stop app1 to make sure that we don't complete the transfers before our checks
app1.stop()
app0_restart.start()
# Check that the payment statuses were restored properly after restart
for identifier in range(spent_amount):
identifier = PaymentID(identifier + 1)
mapping = app0_restart.raiden.targets_to_identifiers_to_statuses
status = mapping[target_address][identifier]
assert status.amount == 1
assert status.payment_identifier == identifier
assert status.token_network_address == token_network_address
app1.start() # now that our checks are done start app1 again
with watch_for_unlock_failures(*raiden_network):
waiting.wait_for_healthy(app0_restart.raiden, app1.raiden.address, network_wait)
waiting.wait_for_payment_balance(
raiden=app1.raiden,
token_network_registry_address=token_network_registry_address,
token_address=token_address,
partner_address=app0_restart.raiden.address,
target_address=Address(target_address),
target_balance=spent_amount,
retry_timeout=network_wait,
)
# Check that payments are completed after both nodes come online after restart
for identifier in range(spent_amount):
assert raiden_events_search_for_item(
app0_restart.raiden,
EventPaymentSentSuccess,
{"identifier": identifier + 1, "amount": 1},
)
| 41.140078
| 96
| 0.727513
|
75892d59f1e69d6db79bb48d1fb1c4cff92990ed
| 7,803
|
py
|
Python
|
posthog/api/cohort.py
|
leirons/posthog
|
3d8bcdfae03a1ead9aad44cd4d176ca2180c1ea4
|
[
"MIT"
] | 7,409
|
2020-02-09T23:18:10.000Z
|
2022-03-31T22:36:25.000Z
|
posthog/api/cohort.py
|
leirons/posthog
|
3d8bcdfae03a1ead9aad44cd4d176ca2180c1ea4
|
[
"MIT"
] | 5,709
|
2020-02-09T23:26:13.000Z
|
2022-03-31T20:20:01.000Z
|
posthog/api/cohort.py
|
leirons/posthog
|
3d8bcdfae03a1ead9aad44cd4d176ca2180c1ea4
|
[
"MIT"
] | 647
|
2020-02-13T17:50:55.000Z
|
2022-03-31T11:24:19.000Z
|
import csv
from typing import Any, Dict, List, Optional, cast
from django.db.models import Count, QuerySet
from rest_framework import serializers, viewsets
from rest_framework.permissions import IsAuthenticated
from rest_framework.request import Request
from sentry_sdk.api import capture_exception
from posthog.api.action import calculate_people, filter_by_type
from posthog.api.routing import StructuredViewSetMixin
from posthog.api.shared import UserBasicSerializer
from posthog.api.utils import get_target_entity
from posthog.constants import TRENDS_STICKINESS
from posthog.event_usage import report_user_action
from posthog.models import Cohort, Entity
from posthog.models.event import Event
from posthog.models.filters.filter import Filter
from posthog.models.filters.stickiness_filter import StickinessFilter
from posthog.models.user import User
from posthog.permissions import ProjectMembershipNecessaryPermissions, TeamMemberAccessPermission
from posthog.queries.stickiness import (
stickiness_fetch_people,
stickiness_format_intervals,
stickiness_process_entity_type,
)
from posthog.tasks.calculate_cohort import calculate_cohort, calculate_cohort_ch, calculate_cohort_from_list
from posthog.utils import is_clickhouse_enabled
class CohortSerializer(serializers.ModelSerializer):
created_by = UserBasicSerializer(read_only=True)
count = serializers.SerializerMethodField()
earliest_timestamp_func = lambda team_id: Event.objects.earliest_timestamp(team_id)
class Meta:
model = Cohort
fields = [
"id",
"name",
"description",
"groups",
"deleted",
"is_calculating",
"created_by",
"created_at",
"last_calculation",
"errors_calculating",
"count",
"is_static",
]
read_only_fields = [
"id",
"is_calculating",
"created_by",
"created_at",
"last_calculation",
"errors_calculating",
"count",
]
def _handle_csv(self, file, cohort: Cohort) -> None:
decoded_file = file.read().decode("utf-8").splitlines()
reader = csv.reader(decoded_file)
distinct_ids_and_emails = [row[0] for row in reader if len(row) > 0 and row]
calculate_cohort_from_list.delay(cohort.pk, distinct_ids_and_emails)
def create(self, validated_data: Dict, *args: Any, **kwargs: Any) -> Cohort:
request = self.context["request"]
validated_data["created_by"] = request.user
if not validated_data.get("is_static"):
validated_data["is_calculating"] = True
cohort = Cohort.objects.create(team_id=self.context["team_id"], **validated_data)
if cohort.is_static:
self._handle_static(cohort, request)
else:
if is_clickhouse_enabled():
calculate_cohort_ch.delay(cohort.id)
else:
calculate_cohort.delay(cohort.id)
report_user_action(request.user, "cohort created", cohort.get_analytics_metadata())
return cohort
def _handle_static(self, cohort: Cohort, request: Request):
if request.FILES.get("csv"):
self._calculate_static_by_csv(request.FILES["csv"], cohort)
else:
try:
filter = Filter(request=request)
team = cast(User, request.user).team
target_entity = get_target_entity(request)
if filter.shown_as == TRENDS_STICKINESS:
stickiness_filter = StickinessFilter(
request=request, team=team, get_earliest_timestamp=self.earliest_timestamp_func
)
self._handle_stickiness_people(target_entity, cohort, stickiness_filter)
else:
self._handle_trend_people(target_entity, cohort, filter, request)
except Exception as e:
capture_exception(e)
raise ValueError("This cohort has no conditions")
def _calculate_static_by_csv(self, file, cohort: Cohort) -> None:
decoded_file = file.read().decode("utf-8").splitlines()
reader = csv.reader(decoded_file)
distinct_ids_and_emails = [row[0] for row in reader if len(row) > 0 and row]
calculate_cohort_from_list.delay(cohort.pk, distinct_ids_and_emails)
def _calculate_static_by_people(self, people: List[str], cohort: Cohort) -> None:
calculate_cohort_from_list.delay(cohort.pk, people)
def _handle_stickiness_people(self, target_entity: Entity, cohort: Cohort, filter: StickinessFilter) -> None:
events = stickiness_process_entity_type(target_entity, cohort.team, filter)
events = stickiness_format_intervals(events, filter)
people = stickiness_fetch_people(events, cohort.team, filter)
ids = [person.distinct_ids[0] for person in people if len(person.distinct_ids)]
self._calculate_static_by_people(ids, cohort)
def _handle_trend_people(self, target_entity: Entity, cohort: Cohort, filter: Filter, request: Request) -> None:
events = filter_by_type(entity=target_entity, team=cohort.team, filter=filter)
people = calculate_people(team=cohort.team, events=events, filter=filter, request=request)
ids = [person.distinct_ids[0] for person in people if len(person.distinct_ids)]
self._calculate_static_by_people(ids, cohort)
def update(self, cohort: Cohort, validated_data: Dict, *args: Any, **kwargs: Any) -> Cohort: # type: ignore
request = self.context["request"]
cohort.name = validated_data.get("name", cohort.name)
cohort.description = validated_data.get("description", cohort.description)
cohort.groups = validated_data.get("groups", cohort.groups)
cohort.is_static = validated_data.get("is_static", cohort.is_static)
deleted_state = validated_data.get("deleted", None)
is_deletion_change = deleted_state is not None and cohort.deleted != deleted_state
if is_deletion_change:
cohort.deleted = deleted_state
if not cohort.is_static and not is_deletion_change:
cohort.is_calculating = True
cohort.save()
if not deleted_state:
if cohort.is_static:
# You can't update a static cohort using the trend/stickiness thing
if request.FILES.get("csv"):
self._calculate_static_by_csv(request.FILES["csv"], cohort)
else:
if is_clickhouse_enabled():
calculate_cohort_ch.delay(cohort.id)
else:
calculate_cohort.delay(cohort.id)
report_user_action(
request.user,
"cohort updated",
{**cohort.get_analytics_metadata(), "updated_by_creator": request.user == cohort.created_by},
)
return cohort
def get_count(self, action: Cohort) -> Optional[int]:
if hasattr(action, "count"):
return action.count # type: ignore
return None
class CohortViewSet(StructuredViewSetMixin, viewsets.ModelViewSet):
queryset = Cohort.objects.all()
serializer_class = CohortSerializer
permission_classes = [IsAuthenticated, ProjectMembershipNecessaryPermissions, TeamMemberAccessPermission]
def get_queryset(self) -> QuerySet:
queryset = super().get_queryset()
if self.action == "list":
queryset = queryset.filter(deleted=False)
queryset = queryset.annotate(count=Count("people"))
return queryset.select_related("created_by").order_by("-created_at")
class LegacyCohortViewSet(CohortViewSet):
legacy_team_compatibility = True
| 42.407609
| 116
| 0.674869
|
d931572630fb28f9f55c08d4b1ae6686e231e5ec
| 1,727
|
py
|
Python
|
pipelines/pyright.nox.py
|
Victorsitou/hikari
|
8a1e017fd10421011f6d03ca4d94d6ec964e7290
|
[
"MIT"
] | 520
|
2020-10-12T22:53:55.000Z
|
2022-03-30T17:59:53.000Z
|
pipelines/pyright.nox.py
|
Victorsitou/hikari
|
8a1e017fd10421011f6d03ca4d94d6ec964e7290
|
[
"MIT"
] | 319
|
2020-10-11T19:04:03.000Z
|
2022-03-31T16:55:28.000Z
|
pipelines/pyright.nox.py
|
Victorsitou/hikari
|
8a1e017fd10421011f6d03ca4d94d6ec964e7290
|
[
"MIT"
] | 85
|
2020-10-17T20:25:47.000Z
|
2022-03-31T15:19:40.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021 davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Pyright integrations."""
from pipelines import config
from pipelines import nox
@nox.session()
def verify_types(session: nox.Session) -> None:
"""Verify the "type completeness" of types exported by the library using Pyright."""
session.install("-r", "dev-requirements.txt")
session.install(".")
# session.env["PYRIGHT_PYTHON_GLOBAL_NODE"] = "off"
session.env["PYRIGHT_PYTHON_FORCE_VERSION"] = config.PYRIGHT_VERSION
session.run("python", "-m", "pyright", "--version")
session.run("python", "-m", "pyright", "--verifytypes", "hikari", "--ignoreexternal")
| 46.675676
| 89
| 0.744065
|
78b11baeaa0b02807f1d363549198da07a41c2e7
| 13,378
|
py
|
Python
|
tests/st/graph_kernel/model/test_split.py
|
chncwang/mindspore
|
6dac92aedf0aa1541d181e6aedab29aaadc2dafb
|
[
"Apache-2.0"
] | null | null | null |
tests/st/graph_kernel/model/test_split.py
|
chncwang/mindspore
|
6dac92aedf0aa1541d181e6aedab29aaadc2dafb
|
[
"Apache-2.0"
] | null | null | null |
tests/st/graph_kernel/model/test_split.py
|
chncwang/mindspore
|
6dac92aedf0aa1541d181e6aedab29aaadc2dafb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===========================================================================
"""Test split"""
import model
from model import model as estimate
from model import graph_split as split
def get_nodes(sp, ops):
"""Get nodes"""
if isinstance(ops[0], str):
new_ops = []
for t in ops:
for op in sp.graph.ops:
if op.output.name == t:
new_ops.append(op)
break
else:
print("ERROR: not found op: ", t)
ops = new_ops
return [sp.nodes[sp.graph.ops.index(op)] for op in ops]
def first_connected(sp, space):
for cand in space:
nodes = [sp.nodes[i] for i in cand[0]]
graphs = sp.resolve_connnected_graphs(nodes)
if len(graphs) != 1:
print("connect check faied: ", nodes)
return False
return True
def split_format(sp, cand):
names = []
for ids in cand:
ops = []
for i in ids:
ops.append(sp.graph.ops[i].output.name)
names.append(','.join(ops))
return '|'.join(names)
def graph_1():
''' ring, no succ_dep, no prev '''
gb = model.GraphBuilder()
with gb.graph_scope("main"):
a = gb.tensor([10240, 16], "float32", name="a")
b = gb.emit("Abs", a, 'b')
c = gb.emit("Abs", b, 'c')
d = gb.emit("Abs", c, 'd')
gb.emit('Add', [b, d], 'e')
return gb.get()[0]
def graph_2():
''' ring, succ_dep, no prev '''
gb = model.GraphBuilder()
with gb.graph_scope("main"):
a0 = gb.tensor([10240, 16], "float32", name="a0")
a = gb.emit("Abs", a0, 'a')
b = gb.emit("Abs", a, 'b')
c = gb.emit("Abs", a, 'c')
d = gb.emit("Abs", b, 'd')
e = gb.emit('Add', [c, d], 'e')
gb.emit("Abs", e, 'f')
return gb.get()[0]
def graph_3():
''' no ring, 1 sibling node '''
gb = model.GraphBuilder()
with gb.graph_scope("main"):
a0 = gb.tensor([10240, 16], "float32", name="a0")
a1 = gb.tensor([10240, 16], "float32", name="a1")
b = gb.emit("Abs", a0, 'b')
c = gb.emit("Abs", a1, 'c')
d = gb.emit("Abs", b, 'd')
e = gb.emit('Add', [c, d], 'e')
gb.emit("Abs", e, 'f')
return gb.get()[0]
def graph_4():
''' no ring, 2 sibling nodes in 1 step '''
gb = model.GraphBuilder()
with gb.graph_scope("main"):
a0 = gb.tensor([10240, 16], "float32", name="a0")
a1 = gb.tensor([10240, 16], "float32", name="a1")
b = gb.emit("Abs", a0, 'b')
c = gb.emit("Abs", b, 'c')
d = gb.emit("Abs", a1, 'd')
e = gb.emit("Abs", d, 'e')
f = gb.emit('Add', [c, e], 'f')
gb.emit('Abs', f, 'g')
h = gb.emit("Abs", d, 'h')
i = gb.emit('Add', [c, h], 'i')
gb.emit("Abs", i, 'j')
return gb.get()[0]
def graph_5():
''' no ring, 2 sibling step '''
gb = model.GraphBuilder()
with gb.graph_scope("main") as g:
a0 = gb.tensor([10240, 16], "float32", name="a0")
a1 = gb.tensor([10240, 16], "float32", name="a1")
a2 = gb.tensor([10240, 16], "float32", name="a2")
a = gb.emit("Abs", a0, 'a')
b = gb.emit("Abs", a1, 'b')
c = gb.emit("Abs", b, 'c')
d = gb.emit('Add', [a, c], 'd')
gb.emit("Abs", d, 'e')
f = gb.emit("Abs", a2, 'f')
g = gb.emit('Add', [c, f], 'g')
gb.emit("Abs", g, 'h')
return gb.get()[0]
def graph_6():
''' no ring, tree down '''
gb = model.GraphBuilder()
with gb.graph_scope("main"):
a0 = gb.tensor([10240, 16], "float32", name="a0")
a = gb.emit("Abs", a0, 'a')
b = gb.emit("Abs", a, 'b')
gb.emit("Abs", b, 'd')
gb.emit("Abs", b, 'e')
c = gb.emit("Abs", a, 'c')
gb.emit("Abs", c, 'f')
gb.emit("Abs", c, 'g')
return gb.get()[0]
def graph_pat_1():
''' split by reduce '''
gb = model.GraphBuilder()
with gb.graph_scope("main"):
a0 = gb.tensor([1024, 1024], "float32", name="a0")
a = gb.emit("Abs", a0, 'a')
b = gb.emit("Abs", a, 'b')
c = gb.emit("ReduceSum", b, 'c', attrs={'reduce_axis': (1,)})
d = gb.emit("Sqrt", c, 'd')
gb.emit("Sqrt", d, 'f')
return gb.get()[0]
def graph_pat_2():
''' multi output '''
gb = model.GraphBuilder()
with gb.graph_scope("main"):
a0 = gb.tensor([1024, 1024], "float32", name="a0")
a = gb.emit("Abs", a0, 'a')
b = gb.emit("Abs", a, 'b')
gb.emit("ReduceSum", b, 'c', attrs={'reduce_axis': (1,)})
gb.emit("ReduceSum", b, 'e', attrs={'reduce_axis': (1,)})
return gb.get()[0]
def graph_pat_3():
''' two reduce '''
gb = model.GraphBuilder()
with gb.graph_scope("main"):
a0 = gb.tensor([1024, 1024], "float32", name="a0")
a = gb.emit("Abs", a0, 'a')
b = gb.emit("Abs", a, 'b')
c = gb.emit("ReduceSum", b, 'c', attrs={'reduce_axis': (1,)})
d = gb.emit("Abs", c, 'd')
gb.emit("ReduceSum", d, 'e', attrs={'reduce_axis': (1,)})
return gb.get()[0]
def graph_pat_4():
''' elewise + broadcast '''
gb = model.GraphBuilder()
with gb.graph_scope("main"):
a0 = gb.tensor([1, 1024], "float32", name="a0")
a2 = gb.tensor([1014, 1024], "float32", name="a2")
a = gb.emit("Abs", a0, 'a')
b = gb.emit("Abs", a, 'b')
c = gb.emit("Abs", b, 'c')
d = gb.emit("Abs", c, 'd')
e = gb.emit("Abs", d, 'e')
f = gb.emit("Abs", e, 'f')
g0 = gb.emit("Abs", a2, 'g0')
# g0 = gb.emit("Abs", g0, 'g0')
# g0 = gb.emit("Abs", g0, 'g0')
# g0 = gb.emit("Abs", g0, 'g0')
# g0 = gb.emit("Abs", g0, 'g0')
# g0 = gb.emit("Abs", g0, 'g0')
# g0 = gb.emit("Abs", g0, 'g0')
g0 = gb.emit("Abs", g0, 'g0')
g1 = gb.emit('Add', [f, g0], 'g1')
g2 = gb.emit("Abs", g1, 'g2')
g3 = gb.emit("Abs", g2, 'g3')
g4 = gb.emit("Abs", g3, 'g4')
gb.emit("Abs", g4, 'g5')
return gb.get()[0]
def graph_pat_5():
''' reduce + reshape '''
gb = model.GraphBuilder()
with gb.graph_scope("main"):
a0 = gb.tensor([1024, 1024], "float32", name="a0")
a = gb.emit("Abs", a0, 'a')
b = gb.emit("Abs", a, 'b')
c = gb.emit("ReduceSum", b, 'c', attrs={'reduce_axis': (1,)})
d = gb.emit("Abs", c, 'd')
e = gb.tensor([512, 2048], "float32", name="e")
gb.op("Reshape", e, [d])
return gb.get()[0]
def graph_pat_6():
''' dimond '''
gb = model.GraphBuilder()
with gb.graph_scope("main"):
a0 = gb.tensor([1024, 1024], "float32", name="a0")
a = gb.emit("Abs", a0, 'a')
b = gb.emit("Abs", a, 'b')
c = gb.emit("Abs", a, 'c')
gb.emit("Add", [b, c], 'd')
gb.emit("Abs", c, 'f') # broke dimond
return gb.get()[0]
def graph_pat_7():
''' buddy of control op '''
gb = model.GraphBuilder()
with gb.graph_scope("main"):
a0 = gb.tensor([1024, 1024], "float32", name="a0")
a1 = gb.tensor([1024, 1024], "float32", name="a1")
a = gb.emit("Abs", a0, 'a')
b = gb.emit("Abs", a1, 'b')
c = gb.emit("make_tuple", [a, b], 'c')
d = gb.tensor([1024, 1024], "float32", name="d")
gb.op("AddN", d, [c])
gb.emit("Abs", d, 'f')
graph = gb.get()[0]
estimate.AddControlBuddy().visit_graph(graph)
return graph
def graph_pat_8():
''' reduce + reshape '''
gb = model.GraphBuilder()
with gb.graph_scope("main"):
a0 = gb.tensor([1024, 1024], "float32", name="a0")
a = gb.emit("Abs", a0, 'a')
b = gb.emit("Abs", a, 'b')
#c = gb.emit("Abs", b, 'b')
c = gb.emit("ReduceSum", b, 'c', attrs={'reduce_axis': (1,)})
gb.emit("Add", [b, c], 'd')
return gb.get()[0]
def graph_pat_9():
''' scalar '''
gb = model.GraphBuilder()
with gb.graph_scope("main"):
a0 = gb.tensor([1024, 1024], "float32", name="a0")
a1 = gb.tensor([1], "float32", name="a1")
a = gb.emit("Maximum", a1, 'a')
b = gb.emit("Mul", [a, a1], 'b')
gb.emit('Mul', [b, a0], 'c')
return gb.get()[0]
def graph_mo_1():
gb = model.GraphBuilder()
with gb.graph_scope("main"):
a0 = gb.tensor([1024, 1024], "float32", name="a0")
a = gb.emit("Abs", a0, 'a')
gb.emit("Abs", a, 'b')
gb.emit("Abs", a, 'c')
return gb.get()[0]
def graph_mo_2():
gb = model.GraphBuilder()
with gb.graph_scope("main") as g:
a0 = gb.tensor([1024, 1024], "float32", name="a0")
a = gb.emit("Abs", a0, 'a')
b = gb.emit("Abs", a, 'b')
c = gb.emit("Abs", b, 'c')
g.set_output(b, c)
return gb.get()[0]
def graph_mo_3():
''' two reduce '''
gb = model.GraphBuilder()
with gb.graph_scope("main") as g:
a0 = gb.tensor([1024, 1024], "float32", name="a0")
a = gb.emit("Abs", a0, 'a')
b = gb.emit("Abs", a, 'b')
c = gb.emit("ReduceSum", b, 'c', attrs={'reduce_axis': (1,)})
g.set_output(b, c)
return gb.get()[0]
def graph_mo_4():
''' two reduce '''
gb = model.GraphBuilder()
with gb.graph_scope("main") as g:
a0 = gb.tensor([1024, 1024], "float32", name="a0")
a = gb.emit("Abs", a0, 'a')
b = gb.emit("Abs", a, 'b')
c = gb.emit("ReduceSum", a, 'c', attrs={'reduce_axis': (1,)})
g.set_output(b, c)
return gb.get()[0]
def test_binary_split():
"""Test binary split"""
def _test(graph, expected_space_size):
print("********* test on graph : {} *************".format(graph.name))
sp = split.GraphSpliter(graph)
nodes = get_nodes(sp, graph.ops)
space = sp.binary_split(nodes)
for i, s in enumerate(space):
print('{}: {}'.format(i, split_format(sp, s)))
assert len(space) == expected_space_size
assert first_connected(sp, space)
_test(graph_1(), 3)
_test(graph_2(), 7)
_test(graph_3(), 4)
_test(graph_4(), 17)
_test(graph_5(), 11)
_test(graph_6(), 24)
def test_resolve_connnected_graphs():
"""Test resolve connected graphs"""
graph = graph_5()
sp = split.GraphSpliter(graph)
n1 = get_nodes(sp, ['a', 'd', 'b', 'c'])
graphs = sp.resolve_connnected_graphs(n1)
print(graphs)
assert len(graphs) == 1
n2 = get_nodes(sp, ['a', 'd', 'e', 'f', 'g'])
graphs = sp.resolve_connnected_graphs(n2)
print(graphs)
assert len(graphs) == 2
n3 = get_nodes(sp, ['a', 'b', 'f'])
graphs = sp.resolve_connnected_graphs(n3)
print(graphs)
assert len(graphs) == 3
def test_split():
"""Test split"""
def _print_cost(name, c):
print("%s\tdma_ratio=%f, saturation=%f, mix_saturation=%f, type=%s" %
(name, c.dma_ratio(), c.saturation(), c.mix_saturation(), c.cost_type()))
def _test(graph):
print("********* test on graph : {} *************".format(graph.name))
sp = split.GraphSpliter(graph)
subgraphs = sp.split(False)
print('----- main graph -------')
print(graph)
for i, g in enumerate(subgraphs):
print(' -------- subgraph {} -------'.format(i))
print(g)
print("--------- cost ------------")
cost, _ = model.estimate(graph)
_print_cost("main graph", cost)
fc, sub_costs = model.estimate(subgraphs)
_print_cost("Subgraphs:", fc)
for i, cost in enumerate(sub_costs):
_print_cost(" |_%d:\t" % (i), cost)
_test(graph_5())
# _test(graph_4())
def test_estimate():
"""Test estimate"""
graph = graph_5()
e = estimate.Estimator(graph)
e.estimate()
print(e.iter_space)
def test_pattern_split():
"""Test pattern split"""
def _test(graph, expect_n=0):
print("************* main graph **************")
print(graph)
subgraphs = split.GraphSplitByPatternV2(graph).split()
for i, g in enumerate(subgraphs):
print(' -------- subgraph {} -------'.format(i))
print(g)
if expect_n > 0:
assert len(subgraphs) == expect_n
# _test(graph_1(), 1)
# _test(graph_pat_1(), 2)
# _test(graph_pat_2())
# _test(graph_pat_3())
# _test(graph_pat_4())
# _test(graph_pat_5())
# _test(graph_pat_6())
# _test(graph_pat_7())
# _test(graph_pat_8())
# _test(graph_pat_9())
# _test(graph_mo_1())
# _test(graph_mo_2())
# _test(graph_mo_3())
_test(graph_mo_4())
def main():
# test_binary_split()
# test_resolve_connnected_graphs()
# test_split()
# test_estimate()
test_pattern_split()
if __name__ == '__main__':
main()
| 30.613272
| 87
| 0.506877
|
44083d660c6e1abd9e35e60d92de157f0debfef2
| 11,183
|
py
|
Python
|
tools/print_signatures.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 11
|
2016-08-29T07:43:26.000Z
|
2016-08-29T07:51:24.000Z
|
tools/print_signatures.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | null | null | null |
tools/print_signatures.py
|
L-Net-1992/Paddle
|
4d0ca02ba56760b456f3d4b42a538555b9b6c307
|
[
"Apache-2.0"
] | 1
|
2021-12-09T08:59:17.000Z
|
2021-12-09T08:59:17.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Print all signature of a python module in alphabet order.
Usage:
./print_signature "paddle.fluid" > signature.txt
"""
import inspect
import collections
import sys
import hashlib
import pkgutil
import logging
import argparse
member_dict = collections.OrderedDict()
visited_modules = set()
logger = logging.getLogger()
if logger.handlers:
# we assume the first handler is the one we want to configure
console = logger.handlers[0]
else:
console = logging.StreamHandler(sys.stderr)
logger.addHandler(console)
console.setFormatter(
logging.Formatter(
"%(asctime)s - %(funcName)s:%(lineno)d - %(levelname)s - %(message)s"))
def md5(doc):
try:
hashinst = hashlib.md5()
hashinst.update(str(doc).encode('utf-8'))
md5sum = hashinst.hexdigest()
except UnicodeDecodeError as e:
md5sum = None
print("Error({}) occurred when `md5({})`, discard it.".format(
str(e), doc),
file=sys.stderr)
return md5sum
def is_primitive(instance):
int_types = (int, )
pritimitive_types = int_types + (float, str)
if isinstance(instance, pritimitive_types):
return True
elif isinstance(instance, (list, tuple, set)):
for obj in instance:
if not is_primitive(obj):
return False
return True
else:
return False
ErrorSet = set()
IdSet = set()
skiplist = []
def visit_all_module(mod):
mod_name = mod.__name__
if mod_name != 'paddle' and not mod_name.startswith('paddle.'):
return
if mod_name.startswith('paddle.fluid.core'):
return
if mod in visited_modules:
return
visited_modules.add(mod)
member_names = dir(mod)
if hasattr(mod, "__all__"):
member_names += mod.__all__
for member_name in member_names:
if member_name.startswith('_'):
continue
cur_name = mod_name + '.' + member_name
if cur_name in skiplist:
continue
try:
instance = getattr(mod, member_name)
if inspect.ismodule(instance):
visit_all_module(instance)
else:
instance_id = id(instance)
if instance_id in IdSet:
continue
IdSet.add(instance_id)
if hasattr(instance,
'__name__') and member_name != instance.__name__:
print(
"Found alias API, alias name is: {}, original name is: {}"
.format(member_name, instance.__name__),
file=sys.stderr)
except:
if not cur_name in ErrorSet and not cur_name in skiplist:
ErrorSet.add(cur_name)
# all from gen_doc.py
api_info_dict = {} # used by get_all_api
# step 1: walkthrough the paddle package to collect all the apis in api_set
def get_all_api(root_path='paddle', attr="__all__"):
"""
walk through the paddle package to collect all the apis.
"""
import paddle
global api_info_dict
api_counter = 0
for filefinder, name, ispkg in pkgutil.walk_packages(
path=paddle.__path__, prefix=paddle.__name__ + '.'):
try:
if name in sys.modules:
m = sys.modules[name]
else:
# importlib.import_module(name)
m = eval(name)
continue
except AttributeError:
logger.warning("AttributeError occurred when `eval(%s)`", name)
pass
else:
api_counter += process_module(m, attr)
api_counter += process_module(paddle, attr)
logger.info('%s: collected %d apis, %d distinct apis.', attr, api_counter,
len(api_info_dict))
return [(sorted(list(api_info['all_names']))[0], md5(api_info['docstring']))
for api_info in api_info_dict.values()]
def insert_api_into_dict(full_name, gen_doc_anno=None):
"""
insert add api into the api_info_dict
Return:
api_info object or None
"""
import paddle
try:
obj = eval(full_name)
fc_id = id(obj)
except AttributeError:
logger.warning("AttributeError occurred when `id(eval(%s))`", full_name)
return None
except Exception as e:
logger.warning("Exception(%s) occurred when `id(eval(%s))`", str(e),
full_name)
return None
else:
logger.debug("adding %s to api_info_dict.", full_name)
if fc_id in api_info_dict:
api_info_dict[fc_id]["all_names"].add(full_name)
else:
api_info_dict[fc_id] = {
"all_names": set([full_name]),
"id": fc_id,
"object": obj,
"type": type(obj).__name__,
"docstring": '',
}
docstr = inspect.getdoc(obj)
if docstr:
api_info_dict[fc_id]["docstring"] = inspect.cleandoc(docstr)
if gen_doc_anno:
api_info_dict[fc_id]["gen_doc_anno"] = gen_doc_anno
if inspect.isfunction(obj):
api_info_dict[fc_id]["signature"] = repr(
inspect.getfullargspec(obj)).replace(
'FullArgSpec', 'ArgSpec', 1)
return api_info_dict[fc_id]
# step 1 fill field : `id` & `all_names`, type, docstring
def process_module(m, attr="__all__"):
api_counter = 0
if hasattr(m, attr):
# may have duplication of api
for api in set(getattr(m, attr)):
if api[0] == '_': continue
# Exception occurred when `id(eval(paddle.dataset.conll05.test, get_dict))`
if ',' in api: continue
# api's fullname
full_name = m.__name__ + "." + api
api_info = insert_api_into_dict(full_name)
if api_info is not None:
api_counter += 1
if inspect.isclass(api_info['object']):
for name, value in inspect.getmembers(api_info['object']):
if (not name.startswith("_")) and hasattr(
value, '__name__'):
method_full_name = full_name + '.' + name # value.__name__
method_api_info = insert_api_into_dict(
method_full_name, 'class_method')
if method_api_info is not None:
api_counter += 1
return api_counter
def check_public_api():
import paddle
modulelist = [ #npqa
paddle, paddle.amp, paddle.nn, paddle.nn.functional,
paddle.nn.initializer, paddle.nn.utils, paddle.static, paddle.static.nn,
paddle.io, paddle.jit, paddle.metric, paddle.distribution,
paddle.optimizer, paddle.optimizer.lr, paddle.regularizer, paddle.text,
paddle.utils, paddle.utils.download, paddle.utils.profiler,
paddle.utils.cpp_extension, paddle.sysconfig, paddle.vision,
paddle.vision.datasets, paddle.vision.models, paddle.vision.transforms,
paddle.vision.ops, paddle.distributed, paddle.distributed.fleet,
paddle.distributed.fleet.utils, paddle.distributed.parallel,
paddle.distributed.utils, paddle.callbacks, paddle.hub, paddle.autograd,
paddle.incubate, paddle.inference, paddle.onnx, paddle.device
]
apinum = 0
alldict = {}
for module in modulelist:
if hasattr(module, '__all__'):
old_all = module.__all__
else:
old_all = []
dirall = dir(module)
for item in dirall:
if item.startswith('__'):
continue
old_all.append(item)
apinum += len(old_all)
alldict.update({module.__name__: old_all})
old_all = []
dirall = dir(paddle.Tensor)
for item in dirall:
if item.startswith('_'):
continue
old_all.append(item)
apinum += len(old_all)
alldict.update({'paddle.Tensor': old_all})
for module, allapi in alldict.items():
for member_name in allapi:
cur_name = module + '.' + member_name
instance = eval(cur_name)
doc_md5 = md5(instance.__doc__)
member_dict[cur_name] = "({}, ('document', '{}'))".format(
cur_name, doc_md5)
def check_allmodule_callable():
import paddle
modulelist = [paddle]
for m in modulelist:
visit_all_module(m)
return member_dict
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Print Apis Signatures')
parser.add_argument('--debug', dest='debug', action="store_true")
parser.add_argument('--method',
dest='method',
type=str,
default='get_all_api',
help="using get_all_api or from_modulelist")
parser.add_argument('module', type=str, help='module',
default='paddle') # not used
if len(sys.argv) == 1:
args = parser.parse_args(['paddle'])
return args
# parser.print_help()
# sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
check_allmodule_callable()
if args.method == 'from_modulelist':
check_public_api()
for name in member_dict:
print(name, member_dict[name])
elif args.method == 'get_all_api':
get_all_api()
all_api_names_to_k = {}
for k, api_info in api_info_dict.items():
# 1. the shortest suggested_name may be renamed;
# 2. some api's fullname is not accessable, the module name of it is overrided by the function with the same name;
api_name = sorted(list(api_info['all_names']))[0]
all_api_names_to_k[api_name] = k
all_api_names_sorted = sorted(all_api_names_to_k.keys())
for api_name in all_api_names_sorted:
api_info = api_info_dict[all_api_names_to_k[api_name]]
print("{0} ({2}, ('document', '{1}'))".format(
api_name, md5(api_info['docstring']), api_info['signature']
if 'signature' in api_info else 'ArgSpec()'))
if len(ErrorSet) == 0:
sys.exit(0)
else:
for erroritem in ErrorSet:
print("Error, new function {} is unreachable".format(erroritem),
file=sys.stderr)
sys.exit(1)
| 33.38209
| 126
| 0.589556
|
b13985e23ad5c2785799ec9586ee579a152bb978
| 10,347
|
py
|
Python
|
dabl/tests/test_search.py
|
adekunleba/dabl
|
c4bfc23ba2be11763a2600c7d2a7a0059cb2251c
|
[
"BSD-3-Clause"
] | 2
|
2020-08-02T06:08:54.000Z
|
2021-03-02T19:59:28.000Z
|
dabl/tests/test_search.py
|
adekunleba/dabl
|
c4bfc23ba2be11763a2600c7d2a7a0059cb2251c
|
[
"BSD-3-Clause"
] | null | null | null |
dabl/tests/test_search.py
|
adekunleba/dabl
|
c4bfc23ba2be11763a2600c7d2a7a0059cb2251c
|
[
"BSD-3-Clause"
] | 1
|
2019-10-10T18:01:03.000Z
|
2019-10-10T18:01:03.000Z
|
import pytest
from scipy.stats import norm
from sklearn.datasets import make_classification
from sklearn.dummy import DummyClassifier
from dabl.search import GridSuccessiveHalving, RandomSuccessiveHalving
class FastClassifier(DummyClassifier):
"""Dummy classifier that accepts parameters a, b, ... z.
These parameter don't affect the predictions and are useful for fast
grid searching."""
def __init__(self, strategy='stratified', random_state=None,
constant=None, **kwargs):
super().__init__(strategy=strategy, random_state=random_state,
constant=constant)
def get_params(self, deep=False):
params = super().get_params(deep=deep)
for char in range(ord('a'), ord('z') + 1):
params[chr(char)] = 'whatever'
return params
def test_aggressive_elimination():
# Test the aggressive_elimination parameter.
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
parameters = {'a': ('l1', 'l2'), 'b': list(range(30))}
base_estimator = FastClassifier()
ratio = 3
# aggressive_elimination is only really relevant when there is not enough
# budget.
max_budget = 180
# aggressive_elimination=True
# In this case, the first iterations only use r_min_ resources
sh = GridSuccessiveHalving(base_estimator, parameters, cv=5,
aggressive_elimination=True,
max_budget=max_budget, ratio=ratio)
sh.fit(X, y)
assert sh.n_iterations_ == 4
assert sh.n_required_iterations_ == 4
assert sh.n_possible_iterations_ == 3
assert sh._r_i_list == [20, 20, 60, 180] # see how it loops at the start
assert sh.n_remaining_candidates_ == 1
# Make sure we get the same results with randomized search
sh = RandomSuccessiveHalving(base_estimator, parameters,
n_candidates=60, cv=5,
aggressive_elimination=True,
max_budget=max_budget, ratio=ratio)
sh.fit(X, y)
assert sh.n_iterations_ == 4
assert sh.n_required_iterations_ == 4
assert sh.n_possible_iterations_ == 3
assert sh._r_i_list == [20, 20, 60, 180] # see how it loops at the start
assert sh.n_remaining_candidates_ == 1
# aggressive_elimination=False
# In this case we don't loop at the start, and might end up with a lot of
# candidates at the last iteration
sh = GridSuccessiveHalving(base_estimator, parameters, cv=5,
aggressive_elimination=False,
max_budget=max_budget, ratio=ratio)
sh.fit(X, y)
assert sh.n_iterations_ == 3
assert sh.n_required_iterations_ == 4
assert sh.n_possible_iterations_ == 3
assert sh._r_i_list == [20, 60, 180]
assert sh.n_remaining_candidates_ == 3
max_budget = n_samples
# with enough budget, aggressive_elimination has no effect since it is not
# needed
# aggressive_elimination=True
sh = GridSuccessiveHalving(base_estimator, parameters, cv=5,
aggressive_elimination=True,
max_budget=max_budget, ratio=ratio)
sh.fit(X, y)
assert sh.n_iterations_ == 4
assert sh.n_required_iterations_ == 4
assert sh.n_possible_iterations_ == 4
assert sh._r_i_list == [20, 60, 180, 540]
assert sh.n_remaining_candidates_ == 1
# aggressive_elimination=False
sh = GridSuccessiveHalving(base_estimator, parameters, cv=5,
aggressive_elimination=False,
max_budget=max_budget, ratio=ratio)
sh.fit(X, y)
assert sh.n_iterations_ == 4
assert sh.n_required_iterations_ == 4
assert sh.n_possible_iterations_ == 4
assert sh._r_i_list == [20, 60, 180, 540]
assert sh.n_remaining_candidates_ == 1
def test_force_exhaust_budget_false():
# Test the force_exhaust_budget parameter when it's false or ignored.
# This is the default case: we start at the beginning no matter what since
# we do not overwrite r_min_
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
parameters = {'a': [1, 2], 'b': [1, 2, 3]}
base_estimator = FastClassifier()
ratio = 3
# with enough budget
sh = GridSuccessiveHalving(base_estimator, parameters, cv=5,
force_exhaust_budget=False, ratio=ratio)
sh.fit(X, y)
assert sh.n_iterations_ == 2
assert sh.n_required_iterations_ == 2
assert sh.n_possible_iterations_ == 4
assert sh._r_i_list == [20, 60]
# with enough budget but r_min!='auto': ignored
sh = GridSuccessiveHalving(base_estimator, parameters, cv=5,
force_exhaust_budget=False, ratio=ratio,
r_min=50)
sh.fit(X, y)
assert sh.n_iterations_ == 2
assert sh.n_required_iterations_ == 2
assert sh.n_possible_iterations_ == 3
assert sh._r_i_list == [50, 150]
# without enough budget (budget is exhausted anyway)
sh = GridSuccessiveHalving(base_estimator, parameters, cv=5,
force_exhaust_budget=False, ratio=ratio,
max_budget=30)
sh.fit(X, y)
assert sh.n_iterations_ == 1
assert sh.n_required_iterations_ == 2
assert sh.n_possible_iterations_ == 1
assert sh._r_i_list == [20]
@pytest.mark.parametrize('max_budget, r_i_list', [
('auto', [333, 999]),
(1000, [333, 999]),
(999, [333, 999]),
(600, [200, 600]),
(599, [199, 597]),
(300, [100, 300]),
(60, [20, 60]),
(50, [20]),
(20, [20]),
])
def test_force_exhaust_budget_true(max_budget, r_i_list):
# Test the force_exhaust_budget parameter when it's true
# in this case we need to change r_min so that the last iteration uses as
# much budget as possible
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
parameters = {'a': [1, 2], 'b': [1, 2, 3]}
base_estimator = FastClassifier()
ratio = 3
sh = GridSuccessiveHalving(base_estimator, parameters, cv=5,
force_exhaust_budget=True, ratio=ratio,
max_budget=max_budget)
sh.fit(X, y)
assert sh.n_possible_iterations_ == sh.n_iterations_ == len(sh._r_i_list)
assert sh._r_i_list == r_i_list
# Test same for randomized search
sh = RandomSuccessiveHalving(base_estimator, parameters, n_candidates=6,
cv=5, force_exhaust_budget=True,
ratio=ratio, max_budget=max_budget)
sh.fit(X, y)
assert sh.n_possible_iterations_ == sh.n_iterations_ == len(sh._r_i_list)
assert sh._r_i_list == r_i_list
@pytest.mark.parametrize(
'max_budget, n_iterations, n_possible_iterations', [
('auto', 5, 9), # whole budget is used
(1024, 5, 9),
(700, 5, 8),
(512, 5, 8),
(511, 5, 7),
(32, 4, 4),
(31, 3, 3),
(16, 3, 3),
(4, 1, 1), # max_budget == r_min, only one iteration is possible
])
def test_n_iterations(max_budget, n_iterations, n_possible_iterations):
# test the number of actual iterations that were run depending on
# max_budget
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=1)
parameters = {'a': [1, 2], 'b': list(range(10))}
base_estimator = FastClassifier()
ratio = 2
sh = GridSuccessiveHalving(base_estimator, parameters, cv=2, ratio=ratio,
max_budget=max_budget, r_min=4)
sh.fit(X, y)
assert sh.n_required_iterations_ == 5
assert sh.n_iterations_ == n_iterations
assert sh.n_possible_iterations_ == n_possible_iterations
def test_budget_on():
# Test the budget_on parameter
n_samples = 1000
X, y = make_classification(n_samples=n_samples, random_state=0)
parameters = {'a': [1, 2], 'b': list(range(10))}
base_estimator = FastClassifier()
sh = GridSuccessiveHalving(base_estimator, parameters, cv=2,
budget_on='c', max_budget=10, ratio=3)
sh.fit(X, y)
assert set(sh._r_i_list) == set([1, 3, 9])
for r_i, params, param_c in zip(sh.cv_results_['r_i'],
sh.cv_results_['params'],
sh.cv_results_['param_c']):
assert r_i == params['c'] == param_c
with pytest.raises(
ValueError,
match='Cannot budget on parameter 1234 which is not supported '):
sh = GridSuccessiveHalving(base_estimator, parameters, cv=2,
budget_on='1234', max_budget=10)
sh.fit(X, y)
with pytest.raises(
ValueError,
match='Cannot budget on parameter c since it is part of the '
'searched parameters.'):
parameters = {'a': [1, 2], 'b': [1, 2], 'c': [1, 3]}
sh = GridSuccessiveHalving(base_estimator, parameters, cv=2,
budget_on='c', max_budget=10)
sh.fit(X, y)
@pytest.mark.parametrize(
'max_budget, n_candidates, expected_n_candidates_', [
(512, 'auto', 128), # generate exactly as much as needed
(32, 'auto', 8),
(32, 8, 8),
(32, 7, 7), # ask for less than what we could
(32, 9, 9), # ask for more than 'reasonable'
])
def test_random_search(max_budget, n_candidates, expected_n_candidates_):
# Test random search and make sure the number of generated candidates is as
# expected
n_samples = 1024
X, y = make_classification(n_samples=n_samples, random_state=0)
parameters = {'a': norm, 'b': norm}
base_estimator = FastClassifier()
sh = RandomSuccessiveHalving(base_estimator, parameters,
n_candidates=n_candidates,
cv=2,
max_budget=max_budget, ratio=2, r_min=4)
sh.fit(X, y)
assert sh.n_candidates_ == expected_n_candidates_
if n_candidates == 'auto':
# Make sure 'auto' makes the last iteration use as much budget as we
# can
assert sh._r_i_list[-1] == max_budget
| 37.353791
| 79
| 0.618247
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.