hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2b4b0516b2e80a840b0daf7c96171c9f14f54bd2 | 7,172 | py | Python | egs/wham/TwoStep/train.py | saurabh-kataria/asteroid | a0b1d5e43375d5f5d83ba9ec707633659289200f | [
"MIT"
] | null | null | null | egs/wham/TwoStep/train.py | saurabh-kataria/asteroid | a0b1d5e43375d5f5d83ba9ec707633659289200f | [
"MIT"
] | 1 | 2020-05-21T13:14:43.000Z | 2020-05-21T13:14:43.000Z | egs/wham/TwoStep/train.py | saurabh-kataria/asteroid | a0b1d5e43375d5f5d83ba9ec707633659289200f | [
"MIT"
] | null | null | null | import os
import argparse
import json
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from asteroid.data.wham_dataset import WhamDataset
from system import SystemTwoStep
from asteroid.losses import PITLossWrapper, pairwise_neg_sisdr, PairwiseNegSDR
from model import get_encoded_paths
from model import load_best_filterbank_if_available
from model import make_model_and_optimizer
# Keys which are not in the conf.yml file can be added here.
# In the hierarchical dictionary created when parsing, the key `key` can be
# found at dic['main_args'][key]
# By default train.py will use all available GPUs. The `id` option in run.sh
# will limit the number of available GPUs for train.py .
# This can be changed: `python train.py --gpus 0,1` will only train on 2 GPUs.
parser = argparse.ArgumentParser()
parser.add_argument('--gpus', type=str, help='list of GPUs', default='-1')
parser.add_argument('--exp_dir', default='exp/model_logs',
help='Full path to save best validation model')
def get_data_loaders(conf, train_part='filterbank'):
train_set = WhamDataset(conf['data']['train_dir'], conf['data']['task'],
sample_rate=conf['data']['sample_rate'],
nondefault_nsrc=conf['data']['nondefault_nsrc'],
normalize_audio=True)
val_set = WhamDataset(conf['data']['valid_dir'], conf['data']['task'],
sample_rate=conf['data']['sample_rate'],
nondefault_nsrc=conf['data']['nondefault_nsrc'],
normalize_audio=True)
if train_part not in ['filterbank', 'separator']:
raise ValueError('Part to train: {} is not available.'.format(
train_part))
train_loader = DataLoader(train_set, shuffle=True, drop_last=True,
batch_size=conf[train_part + '_training'][
train_part[0] + '_batch_size'],
num_workers=conf[train_part + '_training'][
train_part[0] + '_num_workers'])
val_loader = DataLoader(val_set, shuffle=True, drop_last=True,
batch_size=conf[train_part + '_training'][
train_part[0] + '_batch_size'],
num_workers=conf[train_part + '_training'][
train_part[0] + '_num_workers'])
# Update number of source values (It depends on the task)
conf['masknet'].update({'n_src': train_set.n_src})
return train_loader, val_loader
def train_model_part(conf, train_part='filterbank', pretrained_filterbank=None):
train_loader, val_loader = get_data_loaders(conf, train_part=train_part)
# Define model and optimizer in a local function (defined in the recipe).
# Two advantages to this : re-instantiating the model and optimizer
# for retraining and evaluating is straight-forward.
model, optimizer = make_model_and_optimizer(
conf, model_part=train_part, pretrained_filterbank=pretrained_filterbank
)
# Define scheduler
scheduler = None
if conf[train_part + '_training'][train_part[0] + '_half_lr']:
scheduler = ReduceLROnPlateau(optimizer=optimizer, factor=0.5,
patience=5)
# Just after instantiating, save the args. Easy loading in the future.
exp_dir, checkpoint_dir = get_encoded_paths(conf, train_part)
os.makedirs(exp_dir, exist_ok=True)
conf_path = os.path.join(exp_dir, 'conf.yml')
with open(conf_path, 'w') as outfile:
yaml.safe_dump(conf, outfile)
# Define Loss function.
loss_func = PITLossWrapper(PairwiseNegSDR('sisdr', zero_mean=False),
pit_from='pw_mtx')
system = SystemTwoStep(model=model, loss_func=loss_func,
optimizer=optimizer, train_loader=train_loader,
val_loader=val_loader, scheduler=scheduler,
config=conf, module=train_part)
# Define callbacks
checkpoint = ModelCheckpoint(checkpoint_dir, monitor='val_loss',
mode='min', save_top_k=1, verbose=1)
early_stopping = False
if conf[train_part + '_training'][train_part[0] + '_early_stop']:
early_stopping = EarlyStopping(monitor='val_loss', patience=10,
verbose=1)
# Don't ask GPU if they are not available.
if not torch.cuda.is_available():
print('No available GPU were found, set gpus to None')
conf['main_args']['gpus'] = None
trainer = pl.Trainer(
max_nb_epochs=conf[train_part + '_training'][train_part[0] + '_epochs'],
checkpoint_callback=checkpoint,
early_stop_callback=early_stopping,
default_save_path=exp_dir,
gpus=conf['main_args']['gpus'],
distributed_backend='dp',
train_percent_check=1.0, # Useful for fast experiment
gradient_clip_val=5.)
trainer.fit(system)
with open(os.path.join(checkpoint_dir, "best_k_models.json"), "w") as file:
json.dump(checkpoint.best_k_models, file, indent=0)
def main(conf):
filterbank = load_best_filterbank_if_available(conf)
_, checkpoint_dir = get_encoded_paths(conf, 'filterbank')
if filterbank is None:
print('There are no available filterbanks under: {}. Going to '
'training.'.format(checkpoint_dir))
train_model_part(conf, train_part='filterbank')
filterbank = load_best_filterbank_if_available(conf)
else:
print('Found available filterbank at: {}'.format(checkpoint_dir))
if not conf['filterbank_training']['reuse_pretrained_filterbank']:
print('Refining filterbank...')
train_model_part(conf, train_part='filterbank')
filterbank = load_best_filterbank_if_available(conf)
train_model_part(conf, train_part='separator',
pretrained_filterbank=filterbank)
if __name__ == '__main__':
import yaml
from asteroid.utils import prepare_parser_from_dict, parse_args_as_dict
# We start with opening the config file conf.yml as a dictionary from
# which we can create parsers. Each top level key in the dictionary defined
# by the YAML file creates a group in the parser.
with open('local/conf.yml') as f:
def_conf = yaml.safe_load(f)
parser = prepare_parser_from_dict(def_conf, parser=parser)
# Arguments are then parsed into a hierarchical dictionary (instead of
# flat, as returned by argparse) to facilitate calls to the different
# asteroid methods (see in main).
# plain_args is the direct output of parser.parse_args() and contains all
# the attributes in an non-hierarchical structure. It can be useful to also
# have it so we included it here but it is not used.
arg_dic, plain_args = parse_args_as_dict(parser, return_plain_args=True)
print(arg_dic)
main(arg_dic)
| 46.270968 | 80 | 0.662856 |
1d9041bf824e5b26d87fd04b3790ece2008693f9 | 1,096 | py | Python | core/migrations/0005_auto_20161129_1950.py | vindeolal/pari | 8c69d15101480c3e803d6d74f8007cefee20c350 | [
"BSD-3-Clause"
] | null | null | null | core/migrations/0005_auto_20161129_1950.py | vindeolal/pari | 8c69d15101480c3e803d6d74f8007cefee20c350 | [
"BSD-3-Clause"
] | null | null | null | core/migrations/0005_auto_20161129_1950.py | vindeolal/pari | 8c69d15101480c3e803d6d74f8007cefee20c350 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.wagtailimages.models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20160521_1901'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='language',
field=models.CharField(choices=[('en', 'English'), ('as', 'Assamese'), ('bn', 'Bengali'), ('gu', 'Gujarati'), ('hi', 'Hindi'), ('kn', 'Kannada'), ('ml', 'Malayalam'), ('mr', 'Marathi'), ('or', 'Odia'), ('pa', 'Punjabi'), ('te', 'Telugu'), ('ta', 'Tamil'), ('ur', 'Urdu')], max_length=7),
),
migrations.AlterField(
model_name='staticpage',
name='language',
field=models.CharField(choices=[('en', 'English'), ('as', 'Assamese'), ('bn', 'Bengali'), ('gu', 'Gujarati'), ('hi', 'Hindi'), ('kn', 'Kannada'), ('ml', 'Malayalam'), ('mr', 'Marathi'), ('or', 'Odia'), ('pa', 'Punjabi'), ('te', 'Telugu'), ('ta', 'Tamil'), ('ur', 'Urdu')], max_length=7),
),
]
| 42.153846 | 299 | 0.540146 |
4bc4f03059964ea96386465cd73dd3b60195055e | 667 | py | Python | tests/conftest.py | Orhideous/python3_lzo_indexer | aaa6498bb5d3464c36e6103599035028411fb950 | [
"Apache-2.0"
] | 1 | 2020-06-24T13:59:40.000Z | 2020-06-24T13:59:40.000Z | tests/conftest.py | Orhideous/python3_lzo_indexer | aaa6498bb5d3464c36e6103599035028411fb950 | [
"Apache-2.0"
] | 49 | 2018-09-29T23:42:51.000Z | 2021-12-01T18:29:12.000Z | tests/conftest.py | Orhideous/python3_lzo_indexer | aaa6498bb5d3464c36e6103599035028411fb950 | [
"Apache-2.0"
] | null | null | null | from io import BytesIO
from subprocess import Popen, PIPE
import pytest
def lzo_stream(*, length: int = 4096):
"""
Compress a string of null bytes, the length being defined by the
argument to this function.
"""
compressor = Popen(["lzop", "-c"], stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = compressor.communicate(input=b"\x00" * length)
if stderr:
raise Exception(f"Failed to compress with error {stderr!r}")
stream = BytesIO(stdout)
stream.seek(0)
return stream
@pytest.fixture
def small_lzo():
return lzo_stream(length=1)
@pytest.fixture
def big_lzo():
return lzo_stream(length=10 ** 6)
| 20.212121 | 76 | 0.674663 |
6c44930a6eb9c414d5c9c37ff5444a21a71e5e39 | 623 | py | Python | app/api_1_0/errors.py | SanbaiWang/sanbaiblog | f78f9f045ff8c4be60ffa4eb8e5074585d5bf93f | [
"MIT"
] | null | null | null | app/api_1_0/errors.py | SanbaiWang/sanbaiblog | f78f9f045ff8c4be60ffa4eb8e5074585d5bf93f | [
"MIT"
] | null | null | null | app/api_1_0/errors.py | SanbaiWang/sanbaiblog | f78f9f045ff8c4be60ffa4eb8e5074585d5bf93f | [
"MIT"
] | 1 | 2020-04-12T00:30:32.000Z | 2020-04-12T00:30:32.000Z | from flask import jsonify
from . import api
from ..exceptions import ValidationError
def bad_request(message):
response = jsonify({'error': 'bad request', 'message': message})
response.status_code = 400
return response
def unauthorized(message):
response = jsonify({'error': 'unauthorized', 'message': message})
response.status_code = 401
return response
def forbidden(message):
response = jsonify({'error': 'forbidden', 'message': message})
response.status_code = 403
return response
@api.errorhandler(ValidationError)
def validation_error(e):
return bad_request(e.args[0])
| 23.074074 | 69 | 0.715891 |
eb81be597efaa60c8c136e85df1f43eb6fbe1e8a | 2,704 | py | Python | 3.7.0/lldb-3.7.0.src/test/tools/lldb-mi/symbol/TestMiSymbol.py | androm3da/clang_sles | 2ba6d0711546ad681883c42dfb8661b842806695 | [
"MIT"
] | 3 | 2016-02-10T14:18:40.000Z | 2018-02-05T03:15:56.000Z | 3.7.0/lldb-3.7.0.src/test/tools/lldb-mi/symbol/TestMiSymbol.py | androm3da/clang_sles | 2ba6d0711546ad681883c42dfb8661b842806695 | [
"MIT"
] | 1 | 2016-02-10T15:40:03.000Z | 2016-02-10T15:40:03.000Z | 3.7.0/lldb-3.7.0.src/test/tools/lldb-mi/symbol/TestMiSymbol.py | androm3da/clang_sles | 2ba6d0711546ad681883c42dfb8661b842806695 | [
"MIT"
] | null | null | null | """
Test lldb-mi -symbol-xxx commands.
"""
import lldbmi_testcase
from lldbtest import *
import unittest2
class MiSymbolTestCase(lldbmi_testcase.MiTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
def test_lldbmi_symbol_list_lines_file(self):
"""Test that 'lldb-mi --interpreter' works for -symbol-list-lines when file exists."""
self.spawnLldbMi(args = None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to main
self.runCmd("-break-insert -f main")
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Get address of main and its line
self.runCmd("-data-evaluate-expression main")
self.expect("\^done,value=\"0x[0-9a-f]+\"")
addr = int(self.child.after.split("\"")[1], 16)
line = line_number('main.cpp', '// FUNC_main')
# Test that -symbol-list-lines works on valid data
self.runCmd("-symbol-list-lines main.cpp")
self.expect("\^done,lines=\[\{pc=\"0x0*%x\",line=\"%d\"\}(,\{pc=\"0x[0-9a-f]+\",line=\"\d+\"\})+\]" % (addr, line))
# Test that -symbol-list-lines fails when file doesn't exist
self.runCmd("-symbol-list-lines unknown_file")
self.expect("\^error,message=\"warning: No source filenames matched 'unknown_file'\. error: no source filenames matched any command arguments \"")
# Test that -symbol-list-lines fails when file is specified using relative path
self.runCmd("-symbol-list-lines ./main.cpp")
self.expect("\^error,message=\"warning: No source filenames matched '\./main\.cpp'\. error: no source filenames matched any command arguments \"")
# Test that -symbol-list-lines works when file is specified using absolute path
import os
path = os.path.join(os.getcwd(), "main.cpp")
self.runCmd("-symbol-list-lines \"%s\"" % path)
self.expect("\^done,lines=\[\{pc=\"0x0*%x\",line=\"%d\"\}(,\{pc=\"0x[0-9a-f]+\",line=\"\d+\"\})+\]" % (addr, line))
# Test that -symbol-list-lines fails when file doesn't exist
self.runCmd("-symbol-list-lines unknown_dir/main.cpp")
self.expect("\^error,message=\"warning: No source filenames matched 'unknown_dir/main\.cpp'\. error: no source filenames matched any command arguments \"")
if __name__ == '__main__':
unittest2.main()
| 43.612903 | 163 | 0.637574 |
e83b6e42fd0a6376d4bffaca612a330bc961f672 | 5,902 | py | Python | data_analysis/ch04-time-series-visualizer/test_module.py | chaudha4/python-projects | baba3235069b7d6b084f28904f0662c043762175 | [
"MIT"
] | null | null | null | data_analysis/ch04-time-series-visualizer/test_module.py | chaudha4/python-projects | baba3235069b7d6b084f28904f0662c043762175 | [
"MIT"
] | 3 | 2021-11-23T22:19:19.000Z | 2022-03-12T00:52:34.000Z | data_analysis/ch04-time-series-visualizer/test_module.py | chaudha4/python-projects | baba3235069b7d6b084f28904f0662c043762175 | [
"MIT"
] | null | null | null | import unittest
import time_series_visualizer
import matplotlib as mpl
class DataCleaningTestCase(unittest.TestCase):
def test_data_cleaning(self):
actual = int(time_series_visualizer.df.count())
expected = 1238
self.assertEqual(actual, expected, "Expected DataFrame count after cleaning to be 1238.")
class LinePlotTestCase(unittest.TestCase):
def setUp(self):
self.fig = time_series_visualizer.draw_line_plot()
self.ax = self.fig.axes[0]
def test_line_plot_title(self):
actual = self.ax.get_title()
expected = "Daily freeCodeCamp Forum Page Views 5/2016-12/2019"
self.assertEqual(actual, expected, "Expected line plot title to be 'Daily freeCodeCamp Forum Page Views 5/2016-12/2019'")
def test_line_plot_labels(self):
actual = self.ax.get_xlabel()
expected = "Date"
self.assertEqual(actual, expected, "Expected line plot xlabel to be 'Date'")
actual = self.ax.get_ylabel()
expected = "Page Views"
self.assertEqual(actual, expected, "Expected line plot ylabel to be 'Page Views'")
def test_line_plot_data_quatity(self):
actual = len(self.ax.lines[0].get_ydata())
expected = 1238
self.assertEqual(actual, expected, "Expected number of data points in line plot to be 1238.")
class BarPlotTestCase(unittest.TestCase):
def setUp(self):
self.fig = time_series_visualizer.draw_bar_plot()
self.ax = self.fig.axes[0]
def test_bar_plot_legend_labels(self):
actual = []
for label in self.ax.get_legend().get_texts():
actual.append(label.get_text())
expected = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
self.assertEqual(actual, expected, "Expected bar plot legend labels to be months of the year.")
def test_bar_plot_labels(self):
actual = self.ax.get_xlabel()
expected = "Years"
self.assertEqual(actual, expected, "Expected bar plot xlabel to be 'Years'")
actual = self.ax.get_ylabel()
expected = "Average Page Views"
self.assertEqual(actual, expected, "Expected bar plot ylabel to be 'Average Page Views'")
actual = []
for label in self.ax.get_xaxis().get_majorticklabels():
actual.append(label.get_text())
expected = ['2016', '2017', '2018', '2019']
self.assertEqual(actual, expected, "Expected bar plot secondary labels to be '2016', '2017', '2018', '2019'")
def test_bar_plot_number_of_bars(self):
actual = len([rect for rect in self.ax.get_children() if isinstance(rect, mpl.patches.Rectangle)])
expected = 49
self.assertEqual(actual, expected, "Expected a different number of bars in bar chart.")
class BoxPlotTestCase(unittest.TestCase):
def setUp(self):
self.fig = time_series_visualizer.draw_box_plot()
self.ax1 = self.fig.axes[0]
self.ax2 = self.fig.axes[1]
def test_box_plot_number(self):
actual = len(self.fig.get_axes())
expected = 2
self.assertEqual(actual, expected, "Expected two box plots in figure.")
def test_box_plot_labels(self):
actual = self.ax1.get_xlabel()
expected = "Year"
self.assertEqual(actual, expected, "Expected box plot 1 xlabel to be 'Year'")
actual = self.ax1.get_ylabel()
expected = "Page Views"
self.assertEqual(actual, expected, "Expected box plot 1 ylabel to be 'Page Views'")
actual = self.ax2.get_xlabel()
expected = "Month"
self.assertEqual(actual, expected, "Expected box plot 1 xlabel to be 'Month'")
actual = self.ax2.get_ylabel()
expected = "Page Views"
self.assertEqual(actual, expected, "Expected box plot 1 ylabel to be 'Page Views'")
actual = []
for label in self.ax1.get_xaxis().get_majorticklabels():
actual.append(label.get_text())
expected = ['2016', '2017', '2018', '2019']
self.assertEqual(actual, expected, "Expected box plot 1 secondary labels to be '2016', '2017', '2018', '2019'")
actual = []
for label in self.ax2.get_xaxis().get_majorticklabels():
actual.append(label.get_text())
expected = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
self.assertEqual(actual, expected, "Expected box plot 2 secondary labels to be 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'")
actual = []
for label in self.ax1.get_yaxis().get_majorticklabels():
actual.append(label.get_text())
expected = ['0', '20000', '40000', '60000', '80000', '100000', '120000', '140000', '160000', '180000', '200000']
self.assertEqual(actual, expected, "Expected box plot 1 secondary labels to be '0', '20000', '40000', '60000', '80000', '100000', '120000', '140000', '160000', '180000', '200000'")
def test_box_plot_titles(self):
actual = self.ax1.get_title()
expected = "Year-wise Box Plot (Trend)"
self.assertEqual(actual, expected, "Expected box plot 1 title to be 'Year-wise Box Plot (Trend)'")
actual = self.ax2.get_title()
expected = "Month-wise Box Plot (Seasonality)"
self.assertEqual(actual, expected, "Expected box plot 1 title to be 'Month-wise Box Plot (Seasonality)'")
def test_box_plot_number_of_boxs(self):
actual = len(self.ax1.lines) / 6 # Every box has 6 lines
expected = 4
self.assertEqual(actual, expected, "Expected four boxes in box plot 1")
actual = len(self.ax2.lines) / 6 # Every box has 6 lines
expected = 12
self.assertEqual(actual, expected, "Expected 12 boxes in box plot 2")
if __name__ == "__main__":
unittest.main(verbosity=2)
| 47.596774 | 188 | 0.64368 |
a999515a3ca3ae2cb13595e8420fdfea612d93d0 | 15,122 | py | Python | pragmatic/mixins.py | matejhazala/django-pragmatic | 5f46098536ee02093c2cea23a8c574bf99863fa9 | [
"BSD-3-Clause"
] | null | null | null | pragmatic/mixins.py | matejhazala/django-pragmatic | 5f46098536ee02093c2cea23a8c574bf99863fa9 | [
"BSD-3-Clause"
] | null | null | null | pragmatic/mixins.py | matejhazala/django-pragmatic | 5f46098536ee02093c2cea23a8c574bf99863fa9 | [
"BSD-3-Clause"
] | null | null | null | import datetime
import io
import requests
from django import forms
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.mixins import PermissionRequiredMixin as DjangoPermissionRequiredMixin, AccessMixin
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.core.validators import EMPTY_VALUES
from django.db.models import F
from django.db.models.deletion import ProtectedError
from django.http.response import HttpResponseRedirect, HttpResponse
from django.shortcuts import redirect
from django.utils import timezone
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _, ugettext
from pragmatic.models import DeletedObject
class ReadOnlyFormMixin(forms.BaseForm):
def _get_cleaner(self, field):
def clean_field():
return getattr(self.instance, field, None)
return clean_field
def __init__(self, *args, **kwargs):
super(ReadOnlyFormMixin, self).__init__(*args, **kwargs)
without_instance = self.read_only_without_instance if hasattr(self, "read_only_without_instance") else False
if hasattr(self, "read_only"):
if self.instance and self.instance.pk or without_instance:
for field in self.read_only:
self.fields[field].widget.attrs['readonly'] = True
self.fields[field].widget.attrs['disabled'] = True
self.fields[field].required = False
setattr(self, "clean_" + field, self._get_cleaner(field))
class LoginPermissionRequiredMixin(DjangoPermissionRequiredMixin):
raise_exception = True
def get_permission_denied_message(self):
"""
Override this method to override the permission_denied_message attribute.
"""
if self.permission_denied_message:
return self.permission_denied_message
return self.permission_required
def handle_no_permission(self):
self.request.user.permission_error = self.get_permission_denied_message()
if not self.request.user.is_authenticated:
self.raise_exception = False
return super().handle_no_permission()
class StaffRequiredMixin(AccessMixin):
"""
CBV mixin which verifies that the current user is staff
"""
raise_exception = False
permission_denied_message = ugettext('You are not authorized for this operation')
def dispatch(self, request, *args, **kwargs):
if not request.user.is_staff and not request.user.is_superuser:
return self.handle_no_permission()
return super(StaffRequiredMixin, self).dispatch(request, *args, **kwargs)
def handle_no_permission(self):
if self.raise_exception:
raise PermissionDenied(self.get_permission_denied_message())
messages.error(self.request, self.get_permission_denied_message())
return redirect(getattr(settings, 'LOGIN_REDIRECT_URL', '/'))
class SuperuserRequiredMixin(StaffRequiredMixin):
"""
CBV mixin which verifies that the current user is superuser
"""
def dispatch(self, request, *args, **kwargs):
if not request.user.is_superuser:
return self.handle_no_permission()
return super(SuperuserRequiredMixin, self).dispatch(request, *args, **kwargs)
class DeleteObjectMixin(object):
template_name = 'confirm_delete.html'
title = _('Delete object')
message_success = _('Object successfully deleted.')
message_error = _('Object could not be deleted, check if some objects are not associated with it.')
back_url = None
failure_url = None
def get_back_url(self):
try:
return self.back_url if self.back_url else self.object.get_absolute_url()
except:
return self.success_url
def get_failure_url(self):
return self.failure_url if self.failure_url else self.get_back_url()
def get_success_url(self):
return self.success_url
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
track_deleted_objects = getattr(settings, 'PRAGMATIC_TRACK_DELETED_OBJECTS', False)
try:
if track_deleted_objects:
# prepare tracking data
content_type = ContentType.objects.get_for_model(self.object, for_concrete_model=False)
object_id = self.object.id
object_str = str(self.object)
user = self.request.user
# delete object
self.object.delete()
# show success message if available
if self.message_success:
messages.success(request, self.message_success)
if track_deleted_objects:
# track deleted object
DeletedObject.objects.create(
content_type=content_type,
object_id=object_id,
object_str=object_str,
user=user if user.is_authenticated else None
)
return HttpResponseRedirect(self.get_success_url())
except ProtectedError:
if self.message_error:
messages.error(request, self.message_error)
return HttpResponseRedirect(self.get_failure_url())
def get_context_data(self, **kwargs):
context_data = super(DeleteObjectMixin, self).get_context_data(**kwargs)
context_data['title'] = self.title
context_data['back_url'] = self.get_back_url()
return context_data
class PickadayFormMixin(object):
def fix_fields(self, form=None, *args, **kwargs):
self.field_array = form.fields if form else self.fields
for field_name in self.field_array:
is_datefield = isinstance(self.field_array[field_name], forms.fields.DateField)
is_datetimefield = isinstance(self.field_array[field_name], forms.fields.DateTimeField)
if is_datefield or is_datetimefield:
self.fix_field(field_name, *args, **kwargs)
def fix_field(self, field_name, *args, **kwargs):
date = None
if field_name in self.data:
date = self.data.get(field_name)
elif 'initial' in kwargs and field_name in kwargs.get('initial'):
date = kwargs.get('initial').get(field_name)
if not date and kwargs.get('instance', None) is not None:
instance = kwargs.get('instance')
date = getattr(instance, field_name)
elif not date and getattr(self, 'instance', None) is not None:
date = getattr(self.instance, field_name)
if date:
if type(date) == datetime.date:
# convert date to datetime
date = datetime.datetime.combine(date, datetime.time(0, 0))
# convert from naive to aware (without pytz)
date = date.replace(tzinfo=timezone.utc)
# convert from naive to aware (with pytz)
# import pytz
# date = pytz.timezone("Europe/Helsinki").localize(naive, is_dst=None)
if type(date) == datetime.date or type(date) == datetime.datetime:
# get date in custom format in local time
date = timezone.localtime(date)
date = date.strftime(settings.DATE_FORMAT)
self.field_array[field_name].widget.attrs['data-value'] = date
class FPDFMixin(object):
DEBUG = settings.DEBUG
FORMAT_A4 = 'A4'
FORMATS = {
FORMAT_A4: {
'width': 210,
'height': 297
}
}
format = FORMAT_A4
ORIENTATION_PORTRAIT = 'P'
ORIENTATION_LANDSCAPE = 'L'
ORIENTATIONS = (
(ORIENTATION_PORTRAIT, _('Portrait')),
(ORIENTATION_LANDSCAPE, _('Landscape'))
)
orientation = ORIENTATION_PORTRAIT
margin_left = 8
margin_right = 8
margin_top = 8
margin_bottom = 8
def render(self, **kwargs):
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
try:
from django.utils import six
except ImportError:
import six
for key, value in six.iteritems(kwargs):
setattr(self, key, value)
self.init_sizes()
self.init_pdf()
self.write_pdf_content()
self.response = HttpResponse(
content=self.pdf.output(dest='S'),
content_type='application/octet-stream'
)
# self.response['Content-Type'] = 'application/octet-stream; charset=UTF-8'
self.response['Content-Disposition'] =\
'attachment;filename="%(filename)s"' % {
'filename': self.get_filename()
}
return self.response
def get_filename(self):
return 'document.pdf'
def init_sizes(self):
# page sizes
self.page_width = self.FORMATS[self.format]['width']
self.page_height = self.FORMATS[self.format]['height']
if self.orientation == 'L':
self.page_width, self.page_height = self.page_height, self.page_width
# content sizes
self.content_width = self.page_width - self.margin_left - self.margin_right
self.content_height = self.page_height - self.margin_top - self.margin_bottom
def get_pdf_instance(self):
from fpdf import FPDF, HTMLMixin
class MyFPDF(FPDF, HTMLMixin):
pass
page_format = dict(self.FORMATS).get(self.format, None)
if page_format is not None:
pdf = MyFPDF(self.orientation, 'mm', (page_format['width'], page_format['height']))
else:
pdf = MyFPDF(self.orientation, 'mm', self.format)
return pdf
def init_pdf(self):
self.pdf = self.get_pdf_instance()
self.pdf.set_margins(self.margin_left, self.margin_top, self.margin_right)
self.pdf.set_auto_page_break(True, margin=self.margin_bottom)
self.pdf.add_page()
def write_pdf_content(self):
pass
class DisplayListViewMixin(object):
displays = []
paginate_by_display = {}
def dispatch(self, request, *args, **kwargs):
self.eval_get_paginate_by(request)
self.template_name_suffix = f'_{self.display}'
return super().dispatch(request, *args, **kwargs)
def eval_get_paginate_by(self, request):
paginate_values = self.paginate_by_display.get(self.display, None)
paginate_values = paginate_values if isinstance(paginate_values, list) else [paginate_values]
self.paginate_by = request.GET.get('paginate_by', next(iter(paginate_values)))
self.paginate_by = int(self.paginate_by) if self.paginate_by in map(str, paginate_values) else paginate_values[0]
@property
def display(self):
display = self.request.GET.get('display', self.displays[0])
display = display if display in self.displays else self.displays[0]
return display
def get_paginate_by(self, queryset):
"""
Get the number of items to paginate by current display, or ``None`` for no pagination.
"""
return self.paginate_by
def get_context_data(self, *args, **kwargs):
context_data = super().get_context_data(*args, **kwargs)
context_data['display_modes'] = self.displays
context_data['paginate_by_display'] = self.paginate_by_display
context_data['paginate_by'] = self.paginate_by
return context_data
class PaginateListViewMixin(object):
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def get_paginate_by(self, queryset):
"""
Get the number of items to paginate by current display, or ``None`` for no pagination.
"""
return self.paginate_by
def get_context_data(self, *args, **kwargs):
context_data = super().get_context_data(*args, **kwargs)
return context_data
class SortingListViewMixin(object):
sorting_options = {}
@property
def sorting(self):
first_sorting_option = next(iter(self.get_sorting_options().keys())) if len(self.get_sorting_options().keys()) > 0 else None
sorting = self.request.GET.get('sorting', first_sorting_option)
sorting = sorting if sorting in self.get_sorting_options() else first_sorting_option
sorting_value = self.get_sorting_options().get(sorting)
sorting = sorting_value[1] if isinstance(sorting_value, tuple) else sorting
return sorting
def get_context_data(self, *args, **kwargs):
context_data = super().get_context_data(*args, **kwargs)
context_data['sorting_options'] = self.get_sorting_options()
return context_data
def get_sorting_options(self):
return self.sorting_options
def get_queryset(self):
queryset = super().get_queryset()
return self.sort_queryset(queryset)
def sort_queryset(self, queryset):
if not self.sorting:
return queryset
if isinstance(self.sorting, list):
return queryset.order_by(*self.sorting)
sorting = F(self.sorting[1:]).desc(nulls_last=True) if self.sorting.startswith('-') else self.sorting
return queryset.order_by(sorting)
class SlugMixin(object):
MAX_SLUG_LENGTH = 150
FORCE_SLUG_REGENERATION = True
SLUG_FIELD = 'title'
def save(self, **kwargs):
if self.slug in EMPTY_VALUES or self.FORCE_SLUG_REGENERATION:
slug_field = getattr(self, self.SLUG_FIELD)
slug = slugify(slug_field)
self.slug = slug
index = 1
# Ensure uniqueness
while self.__class__.objects.filter(slug=self.slug).exclude(pk=self.pk).exists():
self.slug = f'{slug}-{index}'
index += 1
return super().save(**kwargs)
class PdfDetailMixin(object):
inline = True
def dispatch(self, request, *args, **kwargs):
response = super().dispatch(request, *args, **kwargs)
if not getattr(response, 'is_rendered', True) and callable(getattr(response, 'render', None)):
response.render()
response = self.render_pdf(response.content, self.get_filename(), self.inline)
return response
def get_filename(self):
return f'{self.get_object()}.pdf'
@staticmethod
def render_pdf(html_content, filename='output.pdf', inline=True):
content_type = 'inline' if inline else 'attachment'
api_url = settings.HTMLTOPDF_API_URL
pdf_response = requests.post(api_url, data=html_content)
buffer = io.BytesIO(pdf_response.content)
from PyPDF2 import PdfFileMerger
pdf_merger = PdfFileMerger()
pdf_merger.append(buffer)
pdf_merger.addMetadata({'/Title': filename})
pdf_merger.write(buffer)
buffer.seek(0)
response = HttpResponse(buffer, content_type='application/pdf')
response['Content-Disposition'] = f'{content_type}; filename="{filename}"'
return response
| 36.703883 | 132 | 0.655006 |
c118001dd600a2c5c77f2d1a4981bfb351e81de5 | 3,366 | py | Python | profiles_project/settings.py | rskrobotics/profiles-rest-api | ad24790dbea365b922f4e84a4c09b34802f4539d | [
"MIT"
] | null | null | null | profiles_project/settings.py | rskrobotics/profiles-rest-api | ad24790dbea365b922f4e84a4c09b34802f4539d | [
"MIT"
] | null | null | null | profiles_project/settings.py | rskrobotics/profiles-rest-api | ad24790dbea365b922f4e84a4c09b34802f4539d | [
"MIT"
] | null | null | null | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm!!rbwfe$*1ka^n(-0$jc9)mfd&1gc$d2$qvclto!&4=je6z%r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(int(os.environ.get('DEBUG', 1)))
ALLOWED_HOSTS = ['ec2-13-51-207-9.eu-north-1.compute.amazonaws.com',
'127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| 27.818182 | 91 | 0.696078 |
dd350915621691a2f01c3d124ef865b825a19d93 | 5,837 | py | Python | storage/team10/Node.py | itsmjoe/tytus | 3b0341cc854d67979b766c5c8b06ed172ce0c913 | [
"MIT"
] | null | null | null | storage/team10/Node.py | itsmjoe/tytus | 3b0341cc854d67979b766c5c8b06ed172ce0c913 | [
"MIT"
] | null | null | null | storage/team10/Node.py | itsmjoe/tytus | 3b0341cc854d67979b766c5c8b06ed172ce0c913 | [
"MIT"
] | null | null | null | class Node:
def __init__(self):
self.array = []
self.key = -1
self.pk = None
self.isGeneric = False
def insert(self, dato, key):
self.array.append((key,dato)) #ahora recibe el parametro key
lista = self.array.copy()
lista_ordenada= self.quick_sorted(lista)
self.array.clear()
for i in lista_ordenada:
self.array.append(i)
def buscarDato_binary(self, dato):
inicio = 0
final = len(self.array) -1
while inicio <= final:
mid = inicio + (final - inicio) //2
arreglo = self.array[mid]
# if int(arreglo[0]) == int(dato):
if int(arreglo[0]) == int(dato):
return True
elif int(dato) < int(arreglo[0]):
final = mid -1
else:
inicio = mid +1
return False
def busquedaB(self, dato):
inicio = 0
final = len(self.array) -1
while inicio <= final:
mid = inicio + (final - inicio) //2
arreglo = self.array[mid]
# if int(arreglo[0]) == int(dato):
if int(arreglo[0]) == int(dato):
return arreglo
elif int(dato) < int(arreglo[0]):
final = mid -1
else:
inicio = mid +1
return None
def quick_sorted(self, sequencia):
lista = sequencia
if(len(lista)) <= 1:
return lista
else:
pivote = lista.pop()
elementos_mayores = []
elementos_menores = []
elemento_medio= []
elemento_medio.append(pivote)
for elemento in lista:
if int(elemento[0]) > int(pivote[0]):
elementos_mayores.append(elemento)
else:
elementos_menores.append(elemento)
return self.quick_sorted(elementos_menores) + elemento_medio + self.quick_sorted(elementos_mayores)
def eliminar(self, dato):
if self.Eliminar_porbusqueda(dato):
lista = self.array[:]
lista_ordenada= self.quick_sorted(lista)
self.array.clear()
self.array = lista_ordenada[:]
if len(self.array) == 0:
return 0
else:
return True
else:
return False
def modificar(self, columna, modificacion, key):
try:
inicio = 0
final = len(self.array) -1
while inicio <= final:
mid = inicio + (final - inicio) //2
arreglo = self.array[mid]
# if int(arreglo[0]) == int(key):
if int(arreglo[0]) == int(key):
self.array[mid][1][columna] = modificacion
return 0
elif int(key) < int(arreglo[0]):
final = mid -1
else:
inicio = mid +1
return 4
except :
return 1
def Eliminar_porbusqueda(self, dato):
inicio = 0
final = len(self.array) -1
while inicio <= final:
mid = inicio + (final - inicio) //2
arreglo = self.array[mid]
# if int(arreglo[0]) == int(dato):
if int(arreglo[0]) == int(dato):
self.array.pop(mid)
return True
elif int(dato) < int(arreglo[0]):
final = mid -1
else:
inicio = mid +1
return None
def obtenerLower(self,columna,valor,lower): ##Cristian 17/12/2020
let=""
contador = 0
x= len(lower)-1
for i in valor[columna]:
if contador > x:
if lower.upper() == let.upper():
return True
else:
return False
else:
contador+=1
let+=i
def obtenerUpper(self,columna,valor,upper):
let=""
contador = 0
x= len(upper)-1
valor1 = valor[columna]
for i in valor1[::-1]:
if contador > x:
if upper.upper() == let[::-1].upper():
return True
else:
return False
else:
contador+=1
let+=i
def Numbervalidation(self,columnNumber,valor,lower,upper):
if int(valor[columnNumber]) <= upper and int(valor[columnNumber]) >= lower :
return valor
else:
return None
def imp_column(self,subnodo,columnNumber,lower,upper):
if isinstance(lower, int) == True:
return self.Numbervalidation(columnNumber,subnodo,lower,upper)
else:
if self.obtenerLower(columnNumber,subnodo,lower) == True and self.obtenerUpper(columnNumber,subnodo,upper) == True: ##
return subnodo
else:
return None
def imp_column2(self,columnNumber,lower,upper):
if isinstance(lower, int) == True:
for i in self.array:
return self.Numbervalidation(columnNumber,i[1],lower,upper)
else:
for i in self.array:
if self.obtenerLower(columnNumber,i[1],lower) == True and self.obtenerUpper(columnNumber,i[1],upper) == True:
return i[1]
else:
return None
#agrega una columna y registra un dato
def alterAddColumn(self, dato):
try:
for i in self.array:
i[1].append(dato)
# print("ya jalo")
except Exception as e:
print("########")
print("en el nodo")
print(e)
print("########")
| 32.248619 | 130 | 0.479356 |
7f4613c97f2f1c8e8b1740fd801e0af8c0bccd73 | 720 | py | Python | week3/class3-ex7.py | sh0gunofharlem/PyNetClass | a09bc6dec14f20a726777c107be756752054d935 | [
"Apache-2.0"
] | 1 | 2019-01-29T21:44:43.000Z | 2019-01-29T21:44:43.000Z | week3/class3-ex7.py | sh0gunofharlem/PyNetClass | a09bc6dec14f20a726777c107be756752054d935 | [
"Apache-2.0"
] | null | null | null | week3/class3-ex7.py | sh0gunofharlem/PyNetClass | a09bc6dec14f20a726777c107be756752054d935 | [
"Apache-2.0"
] | null | null | null | import yaml
import re
from ciscoconfparse import CiscoConfParse
from netmiko import ConnectHandler
from getpass import getpass
from pprint import pprint
infile = input("Where is your device list: ")
bgp_config = """
router bgp 44
bgp router-id 10.220.88.38
address-family ipv4 unicast
!
neighbor 10.220.88.20
remote-as 42
description pynet-rtr1
address-family ipv4 unicast
route-policy ALLOW in
route-policy ALLOW out
!
!
neighbor 10.220.88.32
remote-as 43
address-family ipv4 unicast
route-policy ALLOW in
route-policy ALLOW out
"""
cisco_obj = CiscoConfParse(bgp_config.splitlines())
match_nei = cisco_obj.find_objects_w_child(parentspec=r"^router bgp", childspec=r"^neighbor")
| 18.947368 | 93 | 0.756944 |
328fb94863d5b80cc7d23f6c54f1e975acae1125 | 905 | py | Python | envs/goalgan/ant_maze/ant_maze_env.py | vincentlui/megae | 16b8d29377e3180447b03cb8f5120e9e086ad56d | [
"MIT"
] | 82 | 2020-07-06T16:53:39.000Z | 2022-03-25T18:12:35.000Z | envs/goalgan/ant_maze/ant_maze_env.py | vincentlui/megae | 16b8d29377e3180447b03cb8f5120e9e086ad56d | [
"MIT"
] | 7 | 2020-10-21T21:25:12.000Z | 2022-01-13T02:58:46.000Z | envs/goalgan/ant_maze/ant_maze_env.py | vincentlui/megae | 16b8d29377e3180447b03cb8f5120e9e086ad56d | [
"MIT"
] | 19 | 2020-06-05T23:11:32.000Z | 2022-03-28T08:43:27.000Z | # Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from envs.goalgan.ant_maze.maze_env import MazeEnv
from envs.goalgan.ant_maze.ant import AntEnv
class AntMazeEnv(MazeEnv):
MODEL_CLASS = AntEnv
ORI_IND = 6
MAZE_HEIGHT = 0.5
MAZE_SIZE_SCALING = 3.0 | 36.2 | 80 | 0.692818 |
c1db9facb8407258d245b17c41d9647e6df0fc74 | 3,613 | py | Python | hammer/identification/lambdas/iam-user-inactive-keys-identification/describe_iam_accesskey_details.py | kaiboma/hammer | 674028a067cad91ce1a84577d30afe7b895f2a6a | [
"Apache-2.0"
] | 385 | 2018-07-19T17:19:11.000Z | 2022-03-29T06:54:30.000Z | hammer/identification/lambdas/iam-user-inactive-keys-identification/describe_iam_accesskey_details.py | kaiboma/hammer | 674028a067cad91ce1a84577d30afe7b895f2a6a | [
"Apache-2.0"
] | 33 | 2018-07-31T21:05:06.000Z | 2022-02-26T04:04:07.000Z | hammer/identification/lambdas/iam-user-inactive-keys-identification/describe_iam_accesskey_details.py | kaiboma/hammer | 674028a067cad91ce1a84577d30afe7b895f2a6a | [
"Apache-2.0"
] | 62 | 2018-07-19T17:19:50.000Z | 2022-03-11T15:06:16.000Z | import json
import logging
from library.logger import set_logging
from library.config import Config
from library.aws.iam import IAMKeyChecker
from library.aws.utility import Account, DDB
from library.ddb_issues import IssueStatus, IAMKeyInactiveIssue
from library.ddb_issues import Operations as IssueOperations
def lambda_handler(event, context):
""" Lambda handler to evaluate iam user inactive keys """
set_logging(level=logging.INFO)
try:
payload = json.loads(event["Records"][0]["Sns"]["Message"])
account_id = payload['account_id']
account_name = payload['account_name']
# if request_id is present in payload then this lambda was called from the API
request_id = payload.get('request_id', None)
except Exception:
logging.exception(f"Failed to parse event\n{event}")
return
try:
config = Config()
main_account = Account(region=config.aws.region)
ddb_table = main_account.resource("dynamodb").Table(config.iamUserInactiveKeys.ddb_table_name)
account = Account(id=account_id,
name=account_name,
role_name=config.aws.role_name_identification)
if account.session is None:
return
logging.debug(f"Checking for IAM user inactive keys in {account}")
# existing open issues for account to check if resolved
open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, IAMKeyInactiveIssue)
# make dictionary for fast search by id
# and filter by current region
open_issues = {issue.issue_id: issue for issue in open_issues}
logging.debug(f"Users with inactive keys in DDB:\n{open_issues.keys()}")
checker = IAMKeyChecker(account=account,
now=config.now,
inactive_criteria_days=config.iamUserInactiveKeys.inactive_criteria_days)
if not checker.check(last_used_check_enabled=True):
return
for user in checker.users:
for key in user.inactive_keys:
issue = IAMKeyInactiveIssue(account_id, key.id)
issue.issue_details.username = user.id
issue.issue_details.last_used = key.last_used.isoformat()
issue.issue_details.create_date = key.create_date.isoformat()
if config.iamUserInactiveKeys.in_whitelist(account_id, key.id) or config.iamUserInactiveKeys.in_whitelist(account_id, user.id):
issue.status = IssueStatus.Whitelisted
else:
issue.status = IssueStatus.Open
logging.debug(f"Setting {key.id}/{user.id} status {issue.status}")
IssueOperations.update(ddb_table, issue)
# remove issue id from open_issues (if exists)
# as we already checked it
open_issues.pop(key.id, None)
logging.debug(f"Inactive keys in DDB:\n{open_issues.keys()}")
# all other unresolved issues in DDB are for removed/remediated keys
for issue in open_issues.values():
IssueOperations.set_status_resolved(ddb_table, issue)
if request_id:
api_table = main_account.resource("dynamodb").Table(config.api.ddb_table_name)
DDB.track_progress(api_table, request_id)
except Exception:
logging.exception(f"Failed to check IAM user inactive keys for '{account_id} ({account_name})'")
return
logging.debug(f"Checked IAM user inactive keys for '{account_id} ({account_name})'")
| 44.060976 | 143 | 0.659839 |
2a1d0fd6e159052d91a0c59cca97fd274c0bea90 | 2,159 | py | Python | examples/inverted_pendulum.py | zadiq/PILCO | dc809d9d3f11abc5c1ead12caf9da55a738e4a8f | [
"MIT"
] | 1 | 2021-04-11T03:26:06.000Z | 2021-04-11T03:26:06.000Z | examples/inverted_pendulum.py | zadiq/PILCO | dc809d9d3f11abc5c1ead12caf9da55a738e4a8f | [
"MIT"
] | null | null | null | examples/inverted_pendulum.py | zadiq/PILCO | dc809d9d3f11abc5c1ead12caf9da55a738e4a8f | [
"MIT"
] | null | null | null | import numpy as np
import gym
from pilco.models import PILCO
from pilco.controllers import RbfController, LinearController
from pilco.rewards import ExponentialReward
import tensorflow as tf
from tensorflow import logging
np.random.seed(0)
def rollout(policy, timesteps):
X = []; Y = []
env.reset()
x, _, _, _ = env.step(0)
for timestep in range(timesteps):
env.render()
u = policy(x)
x_new, _, done, _ = env.step(u)
if done: break
X.append(np.hstack((x, u)))
Y.append(x_new - x)
x = x_new
return np.stack(X), np.stack(Y)
def random_policy(x):
return env.action_space.sample()
def pilco_policy(x):
return pilco.compute_action(x[None, :])[0, :]
with tf.Session(graph=tf.Graph()) as sess:
env = gym.make('InvertedPendulum-v2')
# Initial random rollouts to generate a dataset
X,Y = rollout(policy=random_policy, timesteps=40)
for i in range(1,3):
X_, Y_ = rollout(policy=random_policy, timesteps=40)
X = np.vstack((X, X_))
Y = np.vstack((Y, Y_))
state_dim = Y.shape[1]
control_dim = X.shape[1] - state_dim
controller = RbfController(state_dim=state_dim, control_dim=control_dim, num_basis_functions=5)
#controller = LinearController(state_dim=state_dim, control_dim=control_dim)
pilco = PILCO(X, Y, controller=controller, horizon=40)
# Example of user provided reward function, setting a custom target state
# R = ExponentialReward(state_dim=state_dim, t=np.array([0.1,0,0,0]))
# pilco = PILCO(X, Y, controller=controller, horizon=40, reward=R)
# Example of fixing a parameter, optional, for a linear controller only
#pilco.controller.b = np.array([[0.0]])
#pilco.controller.b.trainable = False
for rollouts in range(3):
pilco.optimize_models()
pilco.optimize_policy()
import pdb; pdb.set_trace()
X_new, Y_new = rollout(policy=pilco_policy, timesteps=100)
print("No of ops:", len(tf.get_default_graph().get_operations()))
# Update dataset
X = np.vstack((X, X_new)); Y = np.vstack((Y, Y_new))
pilco.mgpr.set_XY(X, Y)
| 33.734375 | 99 | 0.660491 |
9bc211833dbd32f6c99013d719a08fc32474b63f | 47,758 | py | Python | src/py/convert_questions.py | mmerveunlu/adversarial-squad | f271bc2d4abe201daeba14f1d9fd960ed376d5a8 | [
"MIT"
] | null | null | null | src/py/convert_questions.py | mmerveunlu/adversarial-squad | f271bc2d4abe201daeba14f1d9fd960ed376d5a8 | [
"MIT"
] | null | null | null | src/py/convert_questions.py | mmerveunlu/adversarial-squad | f271bc2d4abe201daeba14f1d9fd960ed376d5a8 | [
"MIT"
] | null | null | null | """Variety of tools regarding the AddSent adversary."""
import argparse
import collections
import json
import math
import sys
sys.path.insert(0, 'nectar/nectar/corenlp')
from nectar import corenlp
from nltk.corpus import wordnet as wn
from nltk.stem.lancaster import LancasterStemmer
import os
from pattern import en as pattern
import random
import re
from termcolor import colored
import sys
OPTS = None
STEMMER = LancasterStemmer()
POS_TO_WORDNET = {
'NN': wn.NOUN,
'JJ': wn.ADJ,
'JJR': wn.ADJ,
'JJS': wn.ADJ,
}
# Map to pattern.en aliases
# http://www.clips.ua.ac.be/pages/pattern-en#conjugation
POS_TO_PATTERN = {
'vb': 'inf', # Infinitive
'vbp': '1sg', # non-3rd-person singular present
'vbz': '3sg', # 3rd-person singular present
'vbg': 'part', # gerund or present participle
'vbd': 'p', # past
'vbn': 'ppart', # past participle
}
# Tenses prioritized by likelihood of arising
PATTERN_TENSES = ['inf', '3sg', 'p', 'part', 'ppart', '1sg']
# Constants
DATASETS = {
'dev': 'data/LecturesEE/train-model-lectures-ee.json',
'sample1k': 'out/none_n1000_k1_s0.json',
'train': 'data/LecturesEE/train-model-lectures-ee.json',
'data_dir':'data/LecturesEE/'
}
CORENLP_CACHES = {
'dev': 'data/LecturesEE/corenlp_cache.json',
'sample1k': 'data/LecturesEE/corenlp_cache.json',
'train': 'data/LecturesEE/train_corenlp_cache.json',
'data_dir':'data/LecturesEE/'
}
NEARBY_GLOVE_FILE = 'out-lecturesee/nearby_n100_glove_6B_100d.json'
POSTAG_FILE = 'data/postag_dict.json'
CORENLP_LOG = 'corenlp.log'
CORENLP_PORT = 8101
COMMANDS = ['print-questions', 'print-answers', 'corenlp', 'convert-q',
'inspect-q', 'alter-separate', 'alter-best', 'alter-all', 'gen-a',
'e2e-lies', 'e2e-highConf', 'e2e-all',
'dump-placeholder', 'dump-lies', 'dump-highConf', 'dump-hcSeparate', 'dump-altAll']
def parse_args():
parser = argparse.ArgumentParser('Converts SQuAD questions into declarative sentences.')
parser.add_argument('command',
help='Command (options: [%s]).' % (', '.join(COMMANDS)))
parser.add_argument('--rule', '-r', help='Rule to inspect')
parser.add_argument('--dataset', '-d', default='dev',
help='Which dataset (options: [%s])' % (', '.join(DATASETS)))
parser.add_argument('--seed', '-s', default=-1, type=int, help='Shuffle with RNG seed.')
parser.add_argument('--modified-answers', '-m', default=False,
action='store_true',help='Use the modified answers')
parser.add_argument('--prepend', '-p', default=False,
action='store_true',help='Prepend sentences.')
parser.add_argument('--quiet', '-q', default=False, action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def read_data():
filename = DATASETS[OPTS.dataset]
with open(filename) as f:
return json.load(f)
def load_cache():
cache_file = CORENLP_CACHES[OPTS.dataset]
with open(cache_file) as f:
return json.load(f)
def load_postag_dict():
with open(POSTAG_FILE) as f:
return json.load(f)
def load_nearby_words():
with open(NEARBY_GLOVE_FILE) as f:
return json.load(f)
def compress_whnp(tree, inside_whnp=False):
if not tree.children: return tree # Reached leaf
# Compress all children
for i, c in enumerate(tree.children):
tree.children[i] = compress_whnp(c, inside_whnp=inside_whnp or tree.tag == 'WHNP')
if tree.tag != 'WHNP':
if inside_whnp:
# Wrap everything in an NP
return corenlp.ConstituencyParse('NP', children=[tree])
return tree
wh_word = None
new_np_children = []
new_siblings = []
for i, c in enumerate(tree.children):
if i == 0:
if c.tag in ('WHNP', 'WHADJP', 'WHAVP', 'WHPP'):
wh_word = c.children[0]
new_np_children.extend(c.children[1:])
elif c.tag in ('WDT', 'WP', 'WP$', 'WRB'):
wh_word = c
else:
# No WH-word at start of WHNP
return tree
else:
if c.tag == 'SQ': # Due to bad parse, SQ may show up here
new_siblings = tree.children[i:]
break
# Wrap everything in an NP
new_np_children.append(corenlp.ConstituencyParse('NP', children=[c]))
if new_np_children:
new_np = corenlp.ConstituencyParse('NP', children=new_np_children)
new_tree = corenlp.ConstituencyParse('WHNP', children=[wh_word, new_np])
else:
new_tree = tree
if new_siblings:
new_tree = corenlp.ConstituencyParse('SBARQ', children=[new_tree] + new_siblings)
return new_tree
def read_const_parse(parse_str):
tree = corenlp.ConstituencyParse.from_corenlp(parse_str)
new_tree = compress_whnp(tree)
return new_tree
### Rules for converting questions into declarative sentences
def fix_style(s):
"""Minor, general style fixes for questions."""
s = s.replace('?', '') # Delete question marks anywhere in sentence.
s = s.strip(' .')
if s[0] == s[0].lower():
s = s[0].upper() + s[1:]
return s + '.'
CONST_PARSE_MACROS = {
'$Noun': '$NP/$NN/$NNS/$NNP/$NNPS',
'$Verb': '$VB/$VBD/$VBP/$VBZ',
'$Part': '$VBN/$VG',
'$Be': 'is/are/was/were',
'$Do': "do/did/does/don't/didn't/doesn't",
'$WHP': '$WHADJP/$WHADVP/$WHNP/$WHPP',
}
def _check_match(node, pattern_tok):
if pattern_tok in CONST_PARSE_MACROS:
pattern_tok = CONST_PARSE_MACROS[pattern_tok]
if ':' in pattern_tok:
# ':' means you match the LHS category and start with something on the right
lhs, rhs = pattern_tok.split(':')
match_lhs = _check_match(node, lhs)
if not match_lhs: return False
phrase = node.get_phrase().lower()
retval = any(phrase.startswith(w) for w in rhs.split('/'))
return retval
elif '/' in pattern_tok:
return any(_check_match(node, t) for t in pattern_tok.split('/'))
return ((pattern_tok.startswith('$') and pattern_tok[1:] == node.tag) or
(node.word and pattern_tok.lower() == node.word.lower()))
def _recursive_match_pattern(pattern_toks, stack, matches):
"""Recursively try to match a pattern, greedily."""
if len(matches) == len(pattern_toks):
# We matched everything in the pattern; also need stack to be empty
return len(stack) == 0
if len(stack) == 0: return False
cur_tok = pattern_toks[len(matches)]
node = stack.pop()
# See if we match the current token at this level
is_match = _check_match(node, cur_tok)
if is_match:
cur_num_matches = len(matches)
matches.append(node)
new_stack = list(stack)
success = _recursive_match_pattern(pattern_toks, new_stack, matches)
if success: return True
# Backtrack
while len(matches) > cur_num_matches:
matches.pop()
# Recurse to children
if not node.children: return False # No children to recurse on, we failed
stack.extend(node.children[::-1]) # Leftmost children should be popped first
return _recursive_match_pattern(pattern_toks, stack, matches)
def match_pattern(pattern, const_parse):
pattern_toks = pattern.split(' ')
whole_phrase = const_parse.get_phrase()
if whole_phrase.endswith('?') or whole_phrase.endswith('.'):
# Match trailing punctuation as needed
pattern_toks.append(whole_phrase[-1])
matches = []
success = _recursive_match_pattern(pattern_toks, [const_parse], matches)
if success:
return matches
else:
return None
def run_postprocessing(s, rules, all_args):
rule_list = rules.split(',')
for rule in rule_list:
if rule == 'lower':
s = s.lower()
elif rule.startswith('tense-'):
ind = int(rule[6:])
orig_vb = all_args[ind]
tenses = pattern.tenses(orig_vb)
for tense in PATTERN_TENSES: # Prioritize by PATTERN_TENSES
if tense in tenses:
break
else: # Default to first tense
tense = PATTERN_TENSES[0]
s = pattern.conjugate(s, tense)
elif rule in POS_TO_PATTERN:
s = pattern.conjugate(s, POS_TO_PATTERN[rule])
return s
def convert_whp(node, q, a, tokens):
if node.tag in ('WHNP', 'WHADJP', 'WHADVP', 'WHPP'):
# Apply WHP rules
cur_phrase = node.get_phrase()
cur_tokens = tokens[node.get_start_index():node.get_end_index()]
for r in WHP_RULES:
phrase = r.convert(cur_phrase, a, cur_tokens, node, run_fix_style=False)
if phrase:
if not OPTS.quiet:
print(' WHP Rule "%s": %s' % (r.name, colored(phrase, 'yellow')))
return phrase
return None
class ConversionRule(object):
def convert(self, q, a, tokens, const_parse, run_fix_style=True):
raise NotImplementedError
class ConstituencyRule(ConversionRule):
"""A rule for converting question to sentence based on constituency parse."""
def __init__(self, in_pattern, out_pattern, postproc=None):
self.in_pattern = in_pattern # e.g. "where did $NP $VP"
self.out_pattern = str(out_pattern)
# e.g. "{1} did {2} at {0}." Answer is always 0
self.name = in_pattern
if postproc:
self.postproc = postproc
else:
self.postproc = {}
def convert(self, q, a, tokens, const_parse, run_fix_style=True):
pattern_toks = self.in_pattern.split(' ') # Don't care about trailing punctuation
match = match_pattern(self.in_pattern, const_parse)
appended_clause = False
if not match:
# Try adding a PP at the beginning
appended_clause = True
new_pattern = '$PP , ' + self.in_pattern
pattern_toks = new_pattern.split(' ')
match = match_pattern(new_pattern, const_parse)
if not match:
# Try adding an SBAR at the beginning
new_pattern = '$SBAR , ' + self.in_pattern
pattern_toks = new_pattern.split(' ')
match = match_pattern(new_pattern, const_parse)
if not match: return None
appended_clause_match = None
fmt_args = [a]
for t, m in zip(pattern_toks, match):
if t.startswith('$') or '/' in t:
# First check if it's a WHP
phrase = convert_whp(m, q, a, tokens)
if not phrase:
phrase = m.get_phrase()
fmt_args.append(phrase)
if appended_clause:
appended_clause_match = fmt_args[1]
fmt_args = [a] + fmt_args[2:]
for i in range(len(fmt_args)):
if i in self.postproc:
# Run postprocessing filters
fmt_args[i] = run_postprocessing(fmt_args[i], self.postproc[i], fmt_args)
output = self.gen_output(fmt_args)
if appended_clause:
output = appended_clause_match + ', ' + output
if run_fix_style:
output = fix_style(output)
return output
def gen_output(self, fmt_args):
"""By default, use self.out_pattern. Can be overridden."""
return self.out_pattern.format(*fmt_args)
class ReplaceRule(ConversionRule):
"""A simple rule that replaces some tokens with the answer."""
def __init__(self, target, replacement='{}', start=False):
self.target = target
self.replacement = str(replacement)
self.name = 'replace(%s)' % target
self.start = start
def convert(self, q, a, tokens, const_parse, run_fix_style=True):
t_toks = self.target.split(' ')
q_toks = q.rstrip('?.').split(' ')
replacement_text = self.replacement.format(a)
for i in range(len(q_toks)):
if self.start and i != 0: continue
if ' '.join(q_toks[i:i + len(t_toks)]).rstrip(',').lower() == self.target:
begin = q_toks[:i]
end = q_toks[i + len(t_toks):]
output = ' '.join(begin + [replacement_text] + end)
if run_fix_style:
output = fix_style(output)
return output
return None
class FindWHPRule(ConversionRule):
"""A rule that looks for $WHP's from right to left and does replacements."""
name = 'FindWHP'
def _recursive_convert(self, node, q, a, tokens, found_whp):
if node.word: return node.word, found_whp
if not found_whp:
whp_phrase = convert_whp(node, q, a, tokens)
if whp_phrase: return whp_phrase, True
child_phrases = []
for c in node.children[::-1]:
c_phrase, found_whp = self._recursive_convert(c, q, a, tokens, found_whp)
child_phrases.append(c_phrase)
out_toks = []
for i, p in enumerate(child_phrases[::-1]):
if i == 0 or p.startswith("'"):
out_toks.append(p)
else:
out_toks.append(' ' + p)
return ''.join(out_toks), found_whp
def convert(self, q, a, tokens, const_parse, run_fix_style=True):
out_phrase, found_whp = self._recursive_convert(const_parse, q, a, tokens, False)
if found_whp:
if run_fix_style:
out_phrase = fix_style(out_phrase)
return out_phrase
return None
class AnswerRule(ConversionRule):
"""Just return the answer."""
name = 'AnswerRule'
def convert(self, q, a, tokens, const_parse, run_fix_style=True):
return a
CONVERSION_RULES = [
# Special rules
ConstituencyRule('$WHP:what $Be $NP called that $VP', '{2} that {3} {1} called {1}'),
# What type of X
#ConstituencyRule("$WHP:what/which type/sort/kind/group of $NP/$Noun $Be $NP", '{5} {4} a {1} {3}'),
#ConstituencyRule("$WHP:what/which type/sort/kind/group of $NP/$Noun $Be $VP", '{1} {3} {4} {5}'),
#ConstituencyRule("$WHP:what/which type/sort/kind/group of $NP $VP", '{1} {3} {4}'),
# How $JJ
ConstituencyRule('how $JJ $Be $NP $IN $NP', '{3} {2} {0} {1} {4} {5}'),
ConstituencyRule('how $JJ $Be $NP $SBAR', '{3} {2} {0} {1} {4}'),
ConstituencyRule('how $JJ $Be $NP', '{3} {2} {0} {1}'),
# When/where $Verb
ConstituencyRule('$WHP:when/where $Do $NP', '{3} occurred in {1}'),
ConstituencyRule('$WHP:when/where $Do $NP $Verb', '{3} {4} in {1}', {4: 'tense-2'}),
ConstituencyRule('$WHP:when/where $Do $NP $Verb $NP/$PP', '{3} {4} {5} in {1}', {4: 'tense-2'}),
ConstituencyRule('$WHP:when/where $Do $NP $Verb $NP $PP', '{3} {4} {5} {6} in {1}', {4: 'tense-2'}),
ConstituencyRule('$WHP:when/where $Be $NP', '{3} {2} in {1}'),
ConstituencyRule('$WHP:when/where $Verb $NP $VP/$ADJP', '{3} {2} {4} in {1}'),
# What/who/how $Do
ConstituencyRule("$WHP:what/which/who $Do $NP do", '{3} {1}', {0: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who/how $Do $NP $Verb", '{3} {4} {1}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who $Do $NP $Verb $IN/$NP", '{3} {4} {5} {1}', {4: 'tense-2', 0: 'vbg'}),
ConstituencyRule("$WHP:what/which/who $Do $NP $Verb $PP", '{3} {4} {1} {5}', {4: 'tense-2', 0: 'vbg'}),
ConstituencyRule("$WHP:what/which/who $Do $NP $Verb $NP $VP", '{3} {4} {5} {6} {1}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who $Do $NP $Verb to $VB", '{3} {4} to {5} {1}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who $Do $NP $Verb to $VB $VP", '{3} {4} to {5} {1} {6}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who/how $Do $NP $Verb $NP $IN $VP", '{3} {4} {5} {6} {1} {7}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who/how $Do $NP $Verb $PP/$S/$VP/$SBAR/$SQ", '{3} {4} {1} {5}', {4: 'tense-2'}),
ConstituencyRule("$WHP:what/which/who/how $Do $NP $Verb $PP $PP/$S/$VP/$SBAR", '{3} {4} {1} {5} {6}', {4: 'tense-2'}),
# What/who/how $Be
# Watch out for things that end in a preposition
ConstituencyRule("$WHP:what/which/who $Be/$MD $NP of $NP $Verb/$Part $IN", '{3} of {4} {2} {5} {6} {1}'),
ConstituencyRule("$WHP:what/which/who $Be/$MD $NP $NP $IN", '{3} {2} {4} {5} {1}'),
ConstituencyRule("$WHP:what/which/who $Be/$MD $NP $VP/$IN", '{3} {2} {4} {1}'),
ConstituencyRule("$WHP:what/which/who $Be/$MD $NP $IN $NP/$VP", '{1} {2} {3} {4} {5}'),
ConstituencyRule('$WHP:what/which/who $Be/$MD $NP $Verb $PP', '{3} {2} {4} {1} {5}'),
ConstituencyRule('$WHP:what/which/who $Be/$MD $NP/$VP/$PP', '{1} {2} {3}'),
ConstituencyRule("$WHP:how $Be/$MD $NP $VP", '{3} {2} {4} by {1}'),
# What/who $Verb
ConstituencyRule("$WHP:what/which/who $VP", '{1} {2}'),
# $IN what/which $NP
ConstituencyRule('$IN what/which $NP $Do $NP $Verb $NP', '{5} {6} {7} {1} the {3} of {0}',
{1: 'lower', 6: 'tense-4'}),
ConstituencyRule('$IN what/which $NP $Be $NP $VP/$ADJP', '{5} {4} {6} {1} the {3} of {0}',
{1: 'lower'}),
ConstituencyRule('$IN what/which $NP $Verb $NP/$ADJP $VP', '{5} {4} {6} {1} the {3} of {0}',
{1: 'lower'}),
FindWHPRule(),
]
# Rules for going from WHP to an answer constituent
WHP_RULES = [
# WHPP rules
ConstituencyRule('$IN what/which type/sort/kind/group of $NP/$Noun', '{1} {0} {4}'),
ConstituencyRule('$IN what/which type/sort/kind/group of $NP/$Noun $PP', '{1} {0} {4} {5}'),
ConstituencyRule('$IN what/which $NP', '{1} the {3} of {0}'),
ConstituencyRule('$IN $WP/$WDT', '{1} {0}'),
# what/which
ConstituencyRule('what/which type/sort/kind/group of $NP/$Noun', '{0} {3}'),
ConstituencyRule('what/which type/sort/kind/group of $NP/$Noun $PP', '{0} {3} {4}'),
ConstituencyRule('what/which $NP', 'the {2} of {0}'),
# How many
ConstituencyRule('how many/much $NP', '{0} {2}'),
# Replace
ReplaceRule('what'),
ReplaceRule('who'),
ReplaceRule('how many'),
ReplaceRule('how much'),
ReplaceRule('which'),
ReplaceRule('where'),
ReplaceRule('when'),
ReplaceRule('why'),
ReplaceRule('how'),
# Just give the answer
AnswerRule(),
]
def get_qas(dataset):
qas = []
for article in dataset['data']:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
question = qa['question'].strip()
answers = sorted(qa['answers'],
key=lambda x: len(x['text'])) # Prefer shorter answers
qas.append((question, answers, paragraph['context']))
return qas
def print_questions(qas):
qas = sorted(qas, key=lambda x: x[0])
for question, answers, context in qas:
print(question.encode('utf-8'))
def print_answers(qas):
for question, answers, context in qas:
toks = list(answers)
toks[0] = colored(answers[0]['text'], 'cyan')
print(', '.join(toks).encode('utf-8'))
def run_corenlp(dataset, qas):
cache = {}
with corenlp.CoreNLPServer(port=CORENLP_PORT, logfile=CORENLP_LOG) as server:
client = corenlp.CoreNLPClient(port=CORENLP_PORT)
print('Running NER for paragraphs...')
for article in dataset['data']:
for paragraph in article['paragraphs']:
response = client.query_ner(paragraph['context'])
print("Response from server: ",response)
cache[paragraph['context']] = response
print('Parsing questions...')
for question, answers, context in qas:
response = client.query_const_parse(question, add_ner=True)
cache[question] = response['sentences'][0]
cache_file = CORENLP_CACHES[OPTS.dataset]
with open(cache_file, 'w') as f:
json.dump(cache, f, indent=2)
def run_conversion(qas):
corenlp_cache = load_cache()
rule_counter = collections.Counter()
unmatched_qas = []
num_matched = 0
for question, answers, context in qas:
parse = corenlp_cache[question]
tokens = parse['tokens']
const_parse = read_const_parse(parse['parse'])
answer = answers[0]['text']
if not OPTS.quiet:
print(question.encode('utf-8'))
for rule in CONVERSION_RULES:
sent = rule.convert(question, answer, tokens, const_parse)
if sent:
if not OPTS.quiet:
print((' Rule "%s": %s' % (rule.name, colored(sent, 'green'))
).encode('utf-8'))
rule_counter[rule.name] += 1
num_matched += 1
break
else:
unmatched_qas.append((question, answer))
# Print stats
if not OPTS.quiet:
print()
print('=== Summary ===')
print('Matched %d/%d = %.2f%% questions' % (
num_matched, len(qas), 100.0 * num_matched / len(qas)))
for rule in CONVERSION_RULES:
num = rule_counter[rule.name]
print(' Rule "%s" used %d times = %.2f%%' % (
rule.name, num, 100.0 * num / len(qas)))
print()
print('=== Sampled unmatched questions ===')
for q, a in sorted(random.sample(unmatched_qas, 20), key=lambda x: x[0]):
print(('%s [%s]' % (q, colored(a, 'cyan'))).encode('utf-8'))
parse = corenlp_cache[q]
const_parse = read_const_parse(parse['parse'])
#const_parse.print_tree()
def inspect_rule(qas, rule_name):
corenlp_cache = load_cache()
num_matched = 0
rule = CONVERSION_RULES[rule_name]
for question, answers, context in qas:
parse = corenlp_cache[question]
answer = answers[0]['text']
func = rule(question, parse)
if func:
sent = colored(func(answer), 'green')
print(question.encode('utf-8'))
print((' Rule "%s": %s' % (rule_name, sent)).encode('utf-8'))
num_matched += 1
print()
print('Rule "%s" used %d times = %.2f%%' % (
rule_name, num_matched, 100.0 * num_matched / len(qas)))
##########
# Rules for altering words in a sentence/question/answer
# Takes a CoreNLP token as input
##########
SPECIAL_ALTERATIONS = {
'States': 'Kingdom',
'US': 'UK',
'U.S': 'U.K.',
'U.S.': 'U.K.',
'UK': 'US',
'U.K.': 'U.S.',
'U.K': 'U.S.',
'largest': 'smallest',
'smallest': 'largest',
'highest': 'lowest',
'lowest': 'highest',
'May': 'April',
'Peyton': 'Trevor',
}
DO_NOT_ALTER = ['many', 'such', 'few', 'much', 'other', 'same', 'general',
'type', 'record', 'kind', 'sort', 'part', 'form', 'terms', 'use',
'place', 'way', 'old', 'young', 'bowl', 'united', 'one',
'likely', 'different', 'square', 'war', 'republic', 'doctor', 'color']
BAD_ALTERATIONS = ['mx2004', 'planet', 'u.s.', 'Http://Www.Co.Mo.Md.Us']
def alter_special(token, **kwargs):
w = token['originalText']
if w in SPECIAL_ALTERATIONS:
return [SPECIAL_ALTERATIONS[w]]
return None
def alter_nearby(pos_list, ignore_pos=False, is_ner=False):
def func(token, nearby_word_dict=None, postag_dict=None, **kwargs):
if token['pos'] not in pos_list: return None
if is_ner and token['ner'] not in ('PERSON', 'LOCATION', 'ORGANIZATION', 'MISC'):
return None
w = token['word'].lower()
if w in ('war'): return None
if w not in nearby_word_dict: return None
new_words = []
w_stem = STEMMER.stem(w.replace('.', ''))
for x in nearby_word_dict[w][1:]:
new_word = x['word']
# Make sure words aren't too similar (e.g. same stem)
new_stem = STEMMER.stem(new_word.replace('.', ''))
if w_stem.startswith(new_stem) or new_stem.startswith(w_stem): continue
if not ignore_pos:
# Check for POS tag match
if new_word not in postag_dict: continue
new_postag = postag_dict[new_word]
if new_postag != token['pos']: continue
new_words.append(new_word)
return new_words
return func
def alter_entity_glove(token, nearby_word_dict=None, **kwargs):
# NOTE: Deprecated
if token['ner'] not in ('PERSON', 'LOCATION', 'ORGANIZATION', 'MISC'): return None
w = token['word'].lower()
if w == token['word']: return None # Only do capitalized words
if w not in nearby_word_dict: return None
new_words = []
for x in nearby_word_dict[w][1:3]:
if token['word'] == w.upper():
new_words.append(x['word'].upper())
else:
new_words.append(x['word'].title())
return new_words
def alter_entity_type(token, **kwargs):
pos = token['pos']
ner = token['ner']
word = token['word']
is_abbrev = word == word.upper() and not word == word.lower()
if token['pos'] not in (
'JJ', 'JJR', 'JJS', 'NN', 'NNS', 'NNP', 'NNPS', 'RB', 'RBR', 'RBS',
'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'):
# Don't alter non-content words
return None
if ner == 'PERSON':
return ['Jackson']
elif ner == 'LOCATION':
return ['Berlin']
elif ner == 'ORGANIZATION':
if is_abbrev: return ['UNICEF']
return ['Acme']
elif ner == 'MISC':
return ['Neptune']
elif ner == 'NNP':
if is_abbrev: return ['XKCD']
return ['Dalek']
elif pos == 'NNPS':
return ['Daleks']
return None
def alter_wordnet_antonyms(token, **kwargs):
if token['pos'] not in POS_TO_WORDNET: return None
w = token['word'].lower()
wn_pos = POS_TO_WORDNET[token['pos']]
synsets = wn.synsets(w, wn_pos)
if not synsets: return None
synset = synsets[0]
antonyms = []
for lem in synset.lemmas():
if lem.antonyms():
for a in lem.antonyms():
new_word = a.name()
if '_' in a.name(): continue
antonyms.append(new_word)
return antonyms
HIGH_CONF_ALTER_RULES = collections.OrderedDict([
('special', alter_special),
('wn_antonyms', alter_wordnet_antonyms),
('nearbyNum', alter_nearby(['CD'], ignore_pos=True)),
('nearbyProperNoun', alter_nearby(['NNP', 'NNPS'])),
('nearbyProperNoun', alter_nearby(['NNP', 'NNPS'], ignore_pos=True)),
('nearbyEntityNouns', alter_nearby(['NN', 'NNS'], is_ner=True)),
('nearbyEntityJJ', alter_nearby(['JJ', 'JJR', 'JJS'], is_ner=True)),
('entityType', alter_entity_type),
#('entity_glove', alter_entity_glove),
])
ALL_ALTER_RULES = collections.OrderedDict(HIGH_CONF_ALTER_RULES.items())
ALL_ALTER_RULES.update([
('nearbyAdj', alter_nearby(['JJ', 'JJR', 'JJS'])),
('nearbyNoun', alter_nearby(['NN', 'NNS'])),
#('nearbyNoun', alter_nearby(['NN', 'NNS'], ignore_pos=True)),
])
def alter_question(q, tokens, const_parse, nearby_word_dict, postag_dict,
strategy='separate'):
"""Alter the question to make it ask something else.
Possible strategies:
- separate: Do best alteration for each word separately.
- best: Generate exactly one best alteration (may over-alter).
- high-conf: Do all possible high-confidence alterations
- high-conf-separate: Do best high-confidence alteration for each word separately.
- all: Do all possible alterations (very conservative)
"""
used_words = [t['word'].lower() for t in tokens]
new_qs = []
toks_all = []
if strategy.startswith('high-conf'):
rules = HIGH_CONF_ALTER_RULES
else:
rules = ALL_ALTER_RULES
for i, t in enumerate(tokens):
if t['word'].lower() in DO_NOT_ALTER:
if strategy in ('high-conf', 'all'): toks_all.append(t)
continue
begin = tokens[:i]
end = tokens[i+1:]
found = False
for rule_name in rules:
rule = rules[rule_name]
new_words = rule(t, nearby_word_dict=nearby_word_dict,
postag_dict=postag_dict)
if new_words:
for nw in new_words:
if nw.lower() in used_words: continue
if nw.lower() in BAD_ALTERATIONS: continue
# Match capitzliation
if t['word'] == t['word'].upper():
nw = nw.upper()
elif t['word'] == t['word'].title():
nw = nw.title()
new_tok = dict(t)
new_tok['word'] = new_tok['lemma'] = new_tok['originalText'] = nw
new_tok['altered'] = True
# NOTE: obviously this is approximate
if strategy.endswith('separate'):
new_tokens = begin + [new_tok] + end
new_q = corenlp.rejoin(new_tokens)
tag = '%s-%d-%s' % (rule_name, i, nw)
new_const_parse = corenlp.ConstituencyParse.replace_words(
const_parse, [t['word'] for t in new_tokens])
new_qs.append((new_q, new_tokens, new_const_parse, tag))
break
elif strategy in ('high-conf', 'all'):
toks_all.append(new_tok)
found = True
break
if strategy in ('high-conf', 'all') and found: break
if strategy in ('high-conf', 'all') and not found:
toks_all.append(t)
if strategy in ('high-conf', 'all'):
new_q = corenlp.rejoin(toks_all)
new_const_parse = corenlp.ConstituencyParse.replace_words(
const_parse, [t['word'] for t in toks_all])
if new_q != q:
new_qs.append((corenlp.rejoin(toks_all), toks_all, new_const_parse, strategy))
return new_qs
def colorize_alterations(tokens):
out_toks = []
for t in tokens:
if 'altered' in t:
new_tok = {'originalText': colored(t['originalText'], 'cyan'),
'before': t['before']}
out_toks.append(new_tok)
else:
out_toks.append(t)
return corenlp.rejoin(out_toks)
def alter_questions(qas, alteration_strategy=None):
corenlp_cache = load_cache()
nearby_word_dict = load_nearby_words()
postag_dict = load_postag_dict()
rule_counter = collections.Counter()
unmatched_qas = []
num_matched = 0
for question, answers, context in qas:
parse = corenlp_cache[question]
tokens = parse['tokens']
const_parse = read_const_parse(parse['parse'])
answer = answers[0]['text']
if not OPTS.quiet:
print(question.encode('utf-8'))
new_qs = alter_question(
question, tokens, const_parse, nearby_word_dict, postag_dict,
strategy=alteration_strategy)
if new_qs:
num_matched += 1
used_rules = set([x[3].split('-')[0] for x in new_qs])
for r in used_rules:
rule_counter[r] += 1
for q, new_toks, new_const_parse, tag in new_qs:
rule = tag.split('-')[0]
print((' Rule %s: %s' % (rule, colorize_alterations(new_toks))).encode('utf-8'))
else:
unmatched_qas.append((question, answer))
# Print stats
if not OPTS.quiet:
print()
print('=== Summary ===')
print('Matched %d/%d = %.2f%% questions' % (
num_matched, len(qas), 100.0 * num_matched / len(qas)))
for rule_name in ALL_ALTER_RULES:
num = rule_counter[rule_name]
print(' Rule "%s" used %d times = %.2f%%' % (
rule_name, num, 100.0 * num / len(qas)))
print()
print('=== Sampled unmatched questions ===')
for q, a in sorted(random.sample(unmatched_qas, 20), key=lambda x: x[0]):
print(('%s [%s]' % (q, colored(a, 'cyan'))).encode('utf-8'))
def get_tokens_for_answers(answer_objs, corenlp_obj):
"""Get CoreNLP tokens corresponding to a SQuAD answer object."""
first_a_toks = None
for i, a_obj in enumerate(answer_objs):
a_toks = []
answer_start = a_obj['answer_start']
answer_end = answer_start + len(a_obj['text'])
for s in corenlp_obj['sentences']:
for t in s['tokens']:
if t['characterOffsetBegin'] >= answer_end: continue
if t['characterOffsetEnd'] <= answer_start: continue
a_toks.append(t)
if corenlp.rejoin(a_toks).strip() == a_obj['text']:
# Make sure that the tokens reconstruct the answer
return i, a_toks
if i == 0: first_a_toks = a_toks
# None of the extracted token lists reconstruct the answer
# Default to the first
return 0, first_a_toks
def get_determiner_for_answers(answer_objs):
for a in answer_objs:
words = a['text'].split(' ')
if words[0].lower() == 'the': return 'the'
if words[0].lower() in ('a', 'an'): return 'a'
return None
def ans_number(a, tokens, q, **kwargs):
out_toks = []
seen_num = False
for t in tokens:
ner = t['ner']
pos = t['pos']
w = t['word']
out_tok = {'before': t['before']}
# Split on dashes
leftover = ''
dash_toks = w.split('-')
if len(dash_toks) > 1:
w = dash_toks[0]
leftover = '-'.join(dash_toks[1:])
# Try to get a number out
value = None
if w != '%':
# Percent sign should just pass through
try:
value = float(w.replace(',', ''))
except:
try:
norm_ner = t['normalizedNER']
if norm_ner[0] in ('%', '>', '<'):
norm_ner = norm_ner[1:]
value = float(norm_ner)
except:
pass
if not value and (
ner == 'NUMBER' or
(ner == 'PERCENT' and pos == 'CD')):
# Force this to be a number anyways
value = 10
if value:
if math.isinf(value) or math.isnan(value): value = 9001
seen_num = True
if w in ('thousand', 'million', 'billion', 'trillion'):
if w == 'thousand':
new_val = 'million'
else:
new_val = 'thousand'
else:
if value < 2500 and value > 1000:
new_val = str(value - 75)
else:
# Change leading digit
if value == int(value):
val_chars = list('%d' % value)
else:
val_chars = list('%g' % value)
c = val_chars[0]
for i in range(len(val_chars)):
c = val_chars[i]
if c >= '0' and c <= '9':
val_chars[i] = str(max((int(c) + 5) % 10, 1))
break
new_val = ''.join(val_chars)
if leftover:
new_val = '%s-%s' % (new_val, leftover)
out_tok['originalText'] = new_val
else:
out_tok['originalText'] = t['originalText']
out_toks.append(out_tok)
if seen_num:
return corenlp.rejoin(out_toks).strip()
else:
return None
MONTHS = ['january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
def ans_date(a, tokens, q, **kwargs):
out_toks = []
if not all(t['ner'] == 'DATE' for t in tokens): return None
for t in tokens:
if t['pos'] == 'CD' or t['word'].isdigit():
try:
value = int(t['word'])
except:
value = 10 # fallback
if value > 50: new_val = str(value - 25) # Year
else: # Day of month
if value > 15: new_val = str(value - 11)
else: new_val = str(value + 11)
else:
if t['word'].lower() in MONTHS:
m_ind = MONTHS.index(t['word'].lower())
new_val = MONTHS[(m_ind + 6) % 12].title()
else:
# Give up
new_val = t['originalText']
out_toks.append({'before': t['before'], 'originalText': new_val})
new_ans = corenlp.rejoin(out_toks).strip()
if new_ans == a['text']: return None
return new_ans
def ans_entity_full(ner_tag, new_ans):
"""Returns a function that yields new_ans iff every token has |ner_tag|."""
def func(a, tokens, q, **kwargs):
for t in tokens:
if t['ner'] != ner_tag: return None
return new_ans
return func
def ans_abbrev(new_ans):
def func(a, tokens, q, **kwargs):
s = a['text']
if s == s.upper() and s != s.lower():
return new_ans
return None
return func
def ans_match_wh(wh_word, new_ans):
"""Returns a function that yields new_ans if the question starts with |wh_word|."""
def func(a, tokens, q, **kwargs):
if q.lower().startswith(wh_word + ' '):
return new_ans
return None
return func
def ans_pos(pos, new_ans, end=False, add_dt=False):
"""Returns a function that yields new_ans if the first/last token has |pos|."""
def func(a, tokens, q, determiner, **kwargs):
if end:
t = tokens[-1]
else:
t = tokens[0]
if t['pos'] != pos: return None
if add_dt and determiner:
return '%s %s' % (determiner, new_ans)
return new_ans
return func
def ans_catch_all(new_ans):
def func(a, tokens, q, **kwargs):
return new_ans
return func
ANSWER_RULES = [
('date', ans_date),
('number', ans_number),
('ner_person', ans_entity_full('PERSON', 'Jeff Dean')),
('ner_location', ans_entity_full('LOCATION', 'Chicago')),
('ner_organization', ans_entity_full('ORGANIZATION', 'Stark Industries')),
('ner_misc', ans_entity_full('MISC', 'Jupiter')),
('abbrev', ans_abbrev('LSTM')),
('wh_who', ans_match_wh('who', 'Jeff Dean')),
('wh_when', ans_match_wh('when', '1956')),
('wh_where', ans_match_wh('where', 'Chicago')),
('wh_where', ans_match_wh('how many', '42')),
# Starts with verb
('pos_begin_vb', ans_pos('VB', 'learn')),
('pos_end_vbd', ans_pos('VBD', 'learned')),
('pos_end_vbg', ans_pos('VBG', 'learning')),
('pos_end_vbp', ans_pos('VBP', 'learns')),
('pos_end_vbz', ans_pos('VBZ', 'learns')),
# Ends with some POS tag
('pos_end_nn', ans_pos('NN', 'hamster', end=True, add_dt=True)),
('pos_end_nnp', ans_pos('NNP', 'Central Park', end=True, add_dt=True)),
('pos_end_nns', ans_pos('NNS', 'hamsters', end=True, add_dt=True)),
('pos_end_nnps', ans_pos('NNPS', 'Kew Gardens', end=True, add_dt=True)),
('pos_end_jj', ans_pos('JJ', 'deep', end=True)),
('pos_end_jjr', ans_pos('JJR', 'deeper', end=True)),
('pos_end_jjs', ans_pos('JJS', 'deepest', end=True)),
('pos_end_rb', ans_pos('RB', 'silently', end=True)),
('pos_end_vbg', ans_pos('VBG', 'learning', end=True)),
('catch_all', ans_catch_all('aliens')),
]
MOD_ANSWER_RULES = [
('date', ans_date),
('number', ans_number),
('ner_person', ans_entity_full('PERSON', 'Charles Babbage')),
('ner_location', ans_entity_full('LOCATION', 'Stockholm')),
('ner_organization', ans_entity_full('ORGANIZATION', 'Acme Corporation')),
('ner_misc', ans_entity_full('MISC', 'Soylent')),
('abbrev', ans_abbrev('PCFG')),
('wh_who', ans_match_wh('who', 'Charles Babbage')),
('wh_when', ans_match_wh('when', '2004')),
('wh_where', ans_match_wh('where', 'Stockholm')),
('wh_where', ans_match_wh('how many', '200')),
# Starts with verb
('pos_begin_vb', ans_pos('VB', 'run')),
('pos_end_vbd', ans_pos('VBD', 'ran')),
('pos_end_vbg', ans_pos('VBG', 'running')),
('pos_end_vbp', ans_pos('VBP', 'runs')),
('pos_end_vbz', ans_pos('VBZ', 'runs')),
# Ends with some POS tag
('pos_end_nn', ans_pos('NN', 'apple', end=True, add_dt=True)),
('pos_end_nnp', ans_pos('NNP', 'Sears Tower', end=True, add_dt=True)),
('pos_end_nns', ans_pos('NNS', 'apples', end=True, add_dt=True)),
('pos_end_nnps', ans_pos('NNPS', 'Hobbits', end=True, add_dt=True)),
('pos_end_jj', ans_pos('JJ', 'blue', end=True)),
('pos_end_jjr', ans_pos('JJR', 'bluer', end=True)),
('pos_end_jjs', ans_pos('JJS', 'bluest', end=True)),
('pos_end_rb', ans_pos('RB', 'quickly', end=True)),
('pos_end_vbg', ans_pos('VBG', 'running', end=True)),
('catch_all', ans_catch_all('cosmic rays')),
]
def generate_answers(qas):
corenlp_cache = load_cache()
#nearby_word_dict = load_nearby_words()
#postag_dict = load_postag_dict()
rule_counter = collections.Counter()
unmatched_qas = []
num_matched = 0
for question, answers, context in qas:
parse = corenlp_cache[context]
ind, tokens = get_tokens_for_answers(answers, parse)
determiner = get_determiner_for_answers(answers)
answer = answers[ind]
if not OPTS.quiet:
print(('%s [%s]' % (question, colored(answer['text'], 'cyan'))).encode('utf-8'))
for rule_name, func in ANSWER_RULES:
new_ans = func(answer, tokens, question, determiner=determiner)
if new_ans:
num_matched += 1
rule_counter[rule_name] += 1
if not OPTS.quiet:
print((' Rule %s: %s' % (rule_name, colored(new_ans, 'green'))).encode('utf-8'))
break
else:
unmatched_qas.append((question, answer['text']))
# Print stats
if not OPTS.quiet:
print()
print('=== Summary ===')
print('Matched %d/%d = %.2f%% questions' % (
num_matched, len(qas), 100.0 * num_matched / len(qas)))
print()
for rule_name, func in ANSWER_RULES:
num = rule_counter[rule_name]
print(' Rule "%s" used %d times = %.2f%%' % (
rule_name, num, 100.0 * num / len(qas)))
print()
print('=== Sampled unmatched answers ===')
for q, a in sorted(random.sample(unmatched_qas, min(20, len(unmatched_qas))),
key=lambda x: x[0]):
print(('%s [%s]' % (q, colored(a, 'cyan'))).encode('utf-8'))
def run_end2end(qas, alteration_strategy=None):
corenlp_cache = load_cache()
nearby_word_dict = load_nearby_words()
postag_dict = load_postag_dict()
alt_rule_counter = collections.Counter()
conv_rule_counter = collections.Counter()
unmatched_qas = []
num_matched = 0
for question, answers, context in qas:
if not OPTS.quiet:
print(question.encode('utf-8'))
print((' Original Answers: [%s]' % (', '.join(x['text'] for x in answers))).encode('utf-8'))
# Make up answer
p_parse = corenlp_cache[context]
ind, a_toks = get_tokens_for_answers(answers, p_parse)
determiner = get_determiner_for_answers(answers)
answer_obj = answers[ind]
for rule_name, func in ANSWER_RULES:
answer = func(answer_obj, a_toks, question, determiner=determiner)
if answer: break
else:
raise ValueError('Missing answer')
if not OPTS.quiet:
print((' New Answer: %s' % colored(answer, 'red')).encode('utf-8'))
# Alter question
parse = corenlp_cache[question]
tokens = parse['tokens']
const_parse = read_const_parse(parse['parse'])
#const_parse.print_tree()
if alteration_strategy:
new_qs = alter_question(
question, tokens, const_parse, nearby_word_dict, postag_dict,
strategy=alteration_strategy)
else:
new_qs = [(question, tokens, const_parse, 'unaltered')]
matched = False
if new_qs:
used_rules = set([x[3].split('-')[0] for x in new_qs])
for r in used_rules:
alt_rule_counter[r] += 1
for q, q_tokens, q_const_parse, tag in new_qs:
alt_rule_str = tag.split('-')[0]
if not OPTS.quiet:
print((' Alter "%s": %s' % (
alt_rule_str, colorize_alterations(q_tokens))).encode('utf-8'))
# Turn it into a sentence
for rule in CONVERSION_RULES:
sent = rule.convert(q, answer, q_tokens, q_const_parse)
if sent:
matched = True
conv_rule_counter[rule.name] += 1
if not OPTS.quiet:
print((' Convert "%s": %s' % (rule.name, colored(sent, 'green'))).encode('utf-8'))
break
if matched:
num_matched += 1
else:
unmatched_qas.append((question, answer))
# Print stats
if not OPTS.quiet:
print()
print('=== Summary ===')
print('Matched %d/%d = %.2f%% questions' % (
num_matched, len(qas), 100.0 * num_matched / len(qas)))
print('Alteration:')
for rule_name in ALL_ALTER_RULES:
num = alt_rule_counter[rule_name]
print(' Rule "%s" used %d times = %.2f%%' % (
rule_name, num, 100.0 * num / len(qas)))
print('Conversion:')
for rule in CONVERSION_RULES:
num = conv_rule_counter[rule.name]
print(' Rule "%s" used %d times = %.2f%%' % (
rule.name, num, 100.0 * num / len(qas)))
print()
print('=== Sampled unmatched questions ===')
for q, a in sorted(random.sample(unmatched_qas, 20), key=lambda x: x[0]):
print(('%s [%s]' % (q, colored(a, 'cyan'))).encode('utf-8'))
def dump_data(dataset, prefix, data_dir, use_answer_placeholder=False, alteration_strategy=None):
corenlp_cache = load_cache()
nearby_word_dict = load_nearby_words()
postag_dict = load_postag_dict()
out_data = []
out_obj = {'version': dataset['version'], 'data': out_data}
mturk_data = []
for article in dataset['data']:
out_paragraphs = []
out_article = {'title': article['title'], 'paragraphs': out_paragraphs}
out_data.append(out_article)
for paragraph in article['paragraphs']:
out_paragraphs.append(paragraph)
for qa in paragraph['qas']:
question = qa['question'].strip()
if not OPTS.quiet:
print(('Question: %s' % question).encode('utf-8'))
if use_answer_placeholder:
answer = 'ANSWER'
determiner = ''
else:
p_parse = corenlp_cache[paragraph['context']]
ind, a_toks = get_tokens_for_answers(qa['answers'], p_parse)
determiner = get_determiner_for_answers(qa['answers'])
answer_obj = qa['answers'][ind]
for rule_name, func in ANSWER_RULES:
answer = func(answer_obj, a_toks, question, determiner=determiner)
if answer: break
else:
raise ValueError('Missing answer')
answer_mturk = "<span class='answer'>%s</span>" % answer
q_parse = corenlp_cache[question]
q_tokens = q_parse['tokens']
q_const_parse = read_const_parse(q_parse['parse'])
if alteration_strategy:
# Easiest to alter the question before converting
q_list = alter_question(
question, q_tokens, q_const_parse, nearby_word_dict,
postag_dict, strategy=alteration_strategy)
else:
q_list = [(question, q_tokens, q_const_parse, 'unaltered')]
for q_str, q_tokens, q_const_parse, tag in q_list:
for rule in CONVERSION_RULES:
sent = rule.convert(q_str, answer, q_tokens, q_const_parse)
if sent:
if not OPTS.quiet:
print((' Sent (%s): %s' % (tag, colored(sent, 'cyan'))).encode('utf-8'))
cur_qa = {
'question': qa['question'],
'id': '%s-%s' % (qa['id'], tag),
'answers': qa['answers']
}
if OPTS.prepend:
cur_text = '%s %s' % (sent, paragraph['context'])
new_answers = []
for a in qa['answers']:
new_answers.append({
'text': a['text'],
'answer_start': a['answer_start'] + len(sent) + 1
})
cur_qa['answers'] = new_answers
else:
cur_text = '%s %s' % (paragraph['context'], sent)
cur_paragraph = {'context': cur_text, 'qas': [cur_qa]}
out_paragraphs.append(cur_paragraph)
sent_mturk = rule.convert(q_str, answer_mturk, q_tokens, q_const_parse)
mturk_data.append((qa['id'], sent_mturk))
break
if OPTS.dataset != 'dev':
prefix = '%s-%s' % (OPTS.dataset, prefix)
if OPTS.modified_answers:
prefix = '%s-mod' % prefix
if OPTS.prepend:
prefix = '%s-pre' % prefix
with open(os.path.join(data_dir, prefix + '.json'), 'w') as f:
json.dump(out_obj, f)
with open(os.path.join(data_dir, prefix + '-indented.json'), 'w') as f:
json.dump(out_obj, f, indent=2)
with open(os.path.join(data_dir, prefix + '-mturk.tsv'), 'w') as f:
for qid, sent in mturk_data:
print(('%s\t%s' % (qid, sent)).encode('ascii', 'ignore'))
def main():
dataset = read_data()
qas = get_qas(dataset)
data_dir = DATASETS['data_dir']
if OPTS.modified_answers:
global ANSWER_RULES
ANSWER_RULES = MOD_ANSWER_RULES
if OPTS.seed >= 0:
random.seed(OPTS.seed)
random.shuffle(qas)
if OPTS.command == 'print-questions':
print_questions(qas)
elif OPTS.command == 'print-answers':
print_answers(qas)
elif OPTS.command == 'corenlp':
run_corenlp(dataset, qas)
elif OPTS.command == 'convert-q':
run_conversion(qas)
elif OPTS.command == 'inspect-q':
inspect_rule(qas, OPTS.rule)
elif OPTS.command == 'alter-separate':
alter_questions(qas, alteration_strategy='separate')
elif OPTS.command == 'alter-best':
alter_questions(qas, alteration_strategy='best')
elif OPTS.command == 'alter-all':
alter_questions(qas, alteration_strategy='all')
elif OPTS.command == 'gen-a':
generate_answers(qas)
elif OPTS.command == 'e2e-lies':
run_end2end(qas)
elif OPTS.command == 'e2e-highConf':
run_end2end(qas, alteration_strategy='high-conf')
elif OPTS.command == 'e2e-all':
run_end2end(qas, alteration_strategy='all')
elif OPTS.command == 'dump-placeholder':
dump_data(dataset, 'convPlaceholder', data_dir,use_answer_placeholder=True)
elif OPTS.command == 'dump-lies':
dump_data(dataset, 'convLies')
elif OPTS.command == 'dump-highConf':
dump_data(dataset, 'convHighConf', data_dir,alteration_strategy='high-conf')
elif OPTS.command == 'dump-hcSeparate':
dump_data(dataset, 'convHCSeparate', data_dir,alteration_strategy='high-conf-separate')
elif OPTS.command == 'dump-altAll':
dump_data(dataset, 'convAltAll',data_dir, alteration_strategy='all')
else:
raise ValueError('Unknown command "%s"' % OPTS.command)
if __name__ == '__main__':
OPTS = parse_args()
main()
| 37.079193 | 122 | 0.617174 |
bbccaed6e885715ba9b092735656d01ffca34414 | 9,779 | py | Python | venv/lib/python3.8/site-packages/ansible_collections/cisco/ise/plugins/action/guest_smtp_notification_settings.py | saeedya/docker-ansible | 6fb0cfc6bc4a5925b21380952a5a4502ec02119a | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/ansible_collections/cisco/ise/plugins/action/guest_smtp_notification_settings.py | saeedya/docker-ansible | 6fb0cfc6bc4a5925b21380952a5a4502ec02119a | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/ansible_collections/cisco/ise/plugins/action/guest_smtp_notification_settings.py | saeedya/docker-ansible | 6fb0cfc6bc4a5925b21380952a5a4502ec02119a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2021, Cisco Systems
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action import ActionBase
try:
from ansible_collections.ansible.utils.plugins.module_utils.common.argspec_validate import (
AnsibleArgSpecValidator,
)
except ImportError:
ANSIBLE_UTILS_IS_INSTALLED = False
else:
ANSIBLE_UTILS_IS_INSTALLED = True
from ansible.errors import AnsibleActionFail
from ansible_collections.cisco.ise.plugins.plugin_utils.ise import (
ISESDK,
ise_argument_spec,
ise_compare_equality,
get_dict_result,
)
from ansible_collections.cisco.ise.plugins.plugin_utils.exceptions import (
InconsistentParameters,
)
# Get common arguments specification
argument_spec = ise_argument_spec()
# Add arguments specific for this module
argument_spec.update(dict(
state=dict(type="str", default="present", choices=["present"]),
smtpServer=dict(type="str"),
notificationEnabled=dict(type="bool"),
useDefaultFromAddress=dict(type="bool"),
defaultFromAddress=dict(type="str"),
smtpPort=dict(type="str"),
connectionTimeout=dict(type="str"),
useTLSorSSLEncryption=dict(type="bool"),
usePasswordAuthentication=dict(type="bool"),
userName=dict(type="str"),
password=dict(type="str", no_log=True),
id=dict(type="str"),
))
required_if = [
("state", "present", ["id"], True),
]
required_one_of = []
mutually_exclusive = []
required_together = []
class GuestSmtpNotificationSettings(object):
def __init__(self, params, ise):
self.ise = ise
self.new_object = dict(
smtp_server=params.get("smtpServer"),
notification_enabled=params.get("notificationEnabled"),
use_default_from_address=params.get("useDefaultFromAddress"),
default_from_address=params.get("defaultFromAddress"),
smtp_port=params.get("smtpPort"),
connection_timeout=params.get("connectionTimeout"),
use_tlsor_ssl_encryption=params.get("useTLSorSSLEncryption"),
use_password_authentication=params.get("usePasswordAuthentication"),
user_name=params.get("userName"),
password=params.get("password"),
id=params.get("id"),
)
def get_object_by_name(self, name):
# NOTICE: Get does not support/work for filter by name with EQ
result = None
gen_items_responses = self.ise.exec(
family="guest_smtp_notification_configuration",
function="get_guest_smtp_notification_settings_generator"
)
try:
for items_response in gen_items_responses:
items = items_response.response['SearchResult']['resources']
result = get_dict_result(items, 'name', name)
if result:
return result
except (TypeError, AttributeError) as e:
self.ise.fail_json(
msg=(
"An error occured when executing operation."
" Check the configuration of your API Settings and API Gateway settings on your ISE server."
" This collection assumes that the API Gateway, the ERS APIs and OpenAPIs are enabled."
" You may want to enable the (ise_debug: True) argument."
" The error was: {error}"
).format(error=e)
)
except Exception:
result = None
return result
return result
def get_object_by_id(self, id):
try:
result = self.ise.exec(
family="guest_smtp_notification_configuration",
function="get_guest_smtp_notification_settings_by_id",
params={"id": id},
handle_func_exception=False,
).response['ERSGuestSmtpNotificationSettings']
except (TypeError, AttributeError) as e:
self.ise.fail_json(
msg=(
"An error occured when executing operation."
" Check the configuration of your API Settings and API Gateway settings on your ISE server."
" This collection assumes that the API Gateway, the ERS APIs and OpenAPIs are enabled."
" You may want to enable the (ise_debug: True) argument."
" The error was: {error}"
).format(error=e)
)
except Exception:
result = None
return result
def exists(self):
prev_obj = None
id_exists = False
name_exists = False
o_id = self.new_object.get("id")
name = self.new_object.get("name")
if o_id:
prev_obj = self.get_object_by_id(o_id)
id_exists = prev_obj is not None and isinstance(prev_obj, dict)
if not id_exists and name:
prev_obj = self.get_object_by_name(name)
name_exists = prev_obj is not None and isinstance(prev_obj, dict)
if name_exists:
_id = prev_obj.get("id")
if id_exists and name_exists and o_id != _id:
raise InconsistentParameters("The 'id' and 'name' params don't refer to the same object")
if _id:
prev_obj = self.get_object_by_id(_id)
it_exists = prev_obj is not None and isinstance(prev_obj, dict)
return (it_exists, prev_obj)
def requires_update(self, current_obj):
requested_obj = self.new_object
obj_params = [
("smtpServer", "smtp_server"),
("notificationEnabled", "notification_enabled"),
("useDefaultFromAddress", "use_default_from_address"),
("defaultFromAddress", "default_from_address"),
("smtpPort", "smtp_port"),
("connectionTimeout", "connection_timeout"),
("useTLSorSSLEncryption", "use_tlsor_ssl_encryption"),
("usePasswordAuthentication", "use_password_authentication"),
("userName", "user_name"),
("password", "password"),
("id", "id"),
]
# Method 1. Params present in request (Ansible) obj are the same as the current (ISE) params
# If any does not have eq params, it requires update
return any(not ise_compare_equality(current_obj.get(ise_param),
requested_obj.get(ansible_param))
for (ise_param, ansible_param) in obj_params)
def create(self):
result = self.ise.exec(
family="guest_smtp_notification_configuration",
function="create_guest_smtp_notification_settings",
params=self.new_object,
).response
return result
def update(self):
id = self.new_object.get("id")
name = self.new_object.get("name")
result = None
if not id:
id_ = self.get_object_by_name(name).get("id")
self.new_object.update(dict(id=id_))
result = self.ise.exec(
family="guest_smtp_notification_configuration",
function="update_guest_smtp_notification_settings_by_id",
params=self.new_object
).response
return result
class ActionModule(ActionBase):
def __init__(self, *args, **kwargs):
if not ANSIBLE_UTILS_IS_INSTALLED:
raise AnsibleActionFail("ansible.utils is not installed. Execute 'ansible-galaxy collection install ansible.utils'")
super(ActionModule, self).__init__(*args, **kwargs)
self._supports_async = False
self._supports_check_mode = False
self._result = None
# Checks the supplied parameters against the argument spec for this module
def _check_argspec(self):
aav = AnsibleArgSpecValidator(
data=self._task.args,
schema=dict(argument_spec=argument_spec),
schema_format="argspec",
schema_conditionals=dict(
required_if=required_if,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
required_together=required_together,
),
name=self._task.action,
)
valid, errors, self._task.args = aav.validate()
if not valid:
raise AnsibleActionFail(errors)
def run(self, tmp=None, task_vars=None):
self._task.diff = False
self._result = super(ActionModule, self).run(tmp, task_vars)
self._result["changed"] = False
self._check_argspec()
ise = ISESDK(params=self._task.args)
obj = GuestSmtpNotificationSettings(self._task.args, ise)
state = self._task.args.get("state")
response = None
if state == "present":
(obj_exists, prev_obj) = obj.exists()
if obj_exists:
if obj.requires_update(prev_obj):
ise_update_response = obj.update()
self._result.update(dict(ise_update_response=ise_update_response))
(obj_exists, updated_obj) = obj.exists()
response = updated_obj
ise.object_updated()
else:
response = prev_obj
ise.object_already_present()
else:
ise_create_response = obj.create()
(obj_exists, created_obj) = obj.exists()
response = created_obj
ise.object_created()
self._result.update(dict(ise_response=response))
self._result.update(ise.exit_json())
return self._result
| 39.431452 | 128 | 0.615809 |
e7ebd6d274963cfb3a9fffe8945b32c19bf764ff | 4,598 | py | Python | scripts/portable/Base.py | zero-tomoya/Base | 084de907994679853d47b1e4ab26232f857ca1df | [
"MIT"
] | null | null | null | scripts/portable/Base.py | zero-tomoya/Base | 084de907994679853d47b1e4ab26232f857ca1df | [
"MIT"
] | null | null | null | scripts/portable/Base.py | zero-tomoya/Base | 084de907994679853d47b1e4ab26232f857ca1df | [
"MIT"
] | null | null | null |
import tkinter
import copy
import sys,pickle
from tkinter import messagebox
from tkinter import ttk
import os
import time
import webbrowser
import traceback
import threading
import tkinter.font as tkFont
import configparser
import datetime
# this Softwere Defalut browser is keyword search
s=0
print('')
print(' simple Base browser Ver 1.5')
# 文字色の指定
cor="Black"
back="grey"
if os.path.isfile("resource/config/default.conf")==True:
config = configparser.ConfigParser()
config.read('resource/config/default.conf')
conf=config['SECTION1'].get('enable')
if conf=="True" or conf==True:
cor=config['SECTION1'].get('color')
back=config['SECTION1'].get('backcolor')
try:
def resourcePath(filename):
if hasattr(sys, "_MEIPASS"):
return os.path.join(sys._MEIPASS, filename)
return os.path.join(filename)
def exitf(event):
global s
ret=messagebox.askyesno("Simple Base browser", "このプログラムを終了しますか?")
if ret==True:
root.destroy()
sys.exit(1)
else:
return "break"
def vf(event):
global txt
global combo
with open("resource/dat/url.DAT", 'rb') as f:
url = pickle.load(f)
if txt.get()=="":
return "break"
if combo.get()=="キーワード検索":
web=url+txt.get()
else:
if "https://" in txt.get() or "http://" in txt.get():
web=copy.copy(txt.get())
else:
web="https://"+txt.get()
webbrowser.open_new(web)
date = datetime.datetime.now()
data=str(date.year)+"/"+str(date.month)+"/"+str(date.day)+" "+str(date.hour)+":"+str(date.minute)
with open('resource/dat/search.log','a') as fjk:
fjk.write("\n"+data+" "+txt.get())
if os.path.isfile("resource/config/config.DAT")==True:
with open("resource/config/config.DAT", 'rb') as f:
intf = pickle.load(f)
if intf==True:
txt.delete(0,tkinter.END)
return "break"
def dcall(event):
time.sleep(0.1)
thread1 = threading.Thread(target=vf,args=(event,))
thread1.start()
return "break"
def check1(event):
global bln1
with open("resource/config/config.DAT","wb") as web:
pickle.dump(bln1.get(),web)
if os.path.isfile("resource/config/config.DAT")==True:
with open("resource/config/config.DAT", 'rb') as f:
intf = pickle.load(f)
else:
intf=False
root = tkinter.Tk()
root.resizable(False, False)
root['bg'] = copy.copy(back)
root.iconbitmap(default="resource/icon/ifs.ico")
Static1 = tkinter.Label(text=u' ', background='grey')
Static4 = tkinter.Label(text=u' ', background='grey')
bln1=tkinter.BooleanVar()
bln1.set(intf)
root.title(u"Simple Base browser (portable)")
root.geometry("400x360")
fontStyle = tkFont.Font(family="Meiryo", size=20)
Static1.pack()
Static1.pack()
lbl1 = tkinter.Label(text='Simple Base browser',foreground=cor, font=fontStyle,background=back)
lbl1.pack()
Static2 = tkinter.Label(text=u' ',background='grey')
Static2.pack()
# ラベルの生成
lbl = tkinter.Label(text='検索キーワード',foreground=cor,background=back)
lbl.pack()
txt = tkinter.Entry(width=20)
txt.pack()
Static4 = tkinter.Label(text=u' ', background=back)
Static4.pack()
# 検索ボタンの生成
Button5 = tkinter.Button(text=u'検索',font=("",10))
Button5.bind("<Button-1>",dcall)
Button5.pack()
root.bind('<Return>', dcall)
Static5 = tkinter.Label(text=u' ', background=back)
Static5.pack()
Static6 = tkinter.Label(text=u'モード切替',foreground=cor,background=back)
Static6.pack()
# コンボボックスの生成
combo = ttk.Combobox(root, state='readonly')
combo["values"] = ("キーワード検索","指定のURLへ移動")
combo.current(0)
combo.pack()
Static8 = tkinter.Label(text=u' ', background=back)
Static8.pack()
CheckBox2 = tkinter.Checkbutton(root,variable=bln1,text=u"検索後に検索欄をクリアする",foreground=cor,background=back)
CheckBox2.pack()
Button4 = tkinter.Button(text=u'設定を記憶する',font=("",9))
Button4.bind("<Button-1>",check1)
Button4.pack()
Static7 = tkinter.Label(text=u' ', background=back)
Static7.pack()
Button3 = tkinter.Button(text=u'閉じる',font=("",10))
Button3.bind("<Button-1>",exitf)
Button3.pack()
root.mainloop()
except SystemExit:
pass
except:
with open('error.log', 'a') as fs:
traceback.print_exc(file=fs) | 28.208589 | 108 | 0.609613 |
766ab63c5695f797ad56971ebb315218bcfeb1db | 11,866 | py | Python | mozillians/users/migrations/0046_rename_externalaccount_username_identifier.py | caktus/mozillians | 312eb5d993b60092fa4f8eb94548c1db4b21fa01 | [
"BSD-3-Clause"
] | null | null | null | mozillians/users/migrations/0046_rename_externalaccount_username_identifier.py | caktus/mozillians | 312eb5d993b60092fa4f8eb94548c1db4b21fa01 | [
"BSD-3-Clause"
] | null | null | null | mozillians/users/migrations/0046_rename_externalaccount_username_identifier.py | caktus/mozillians | 312eb5d993b60092fa4f8eb94548c1db4b21fa01 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.rename_column('users_externalaccount', 'username', 'identifier')
def backwards(self, orm):
db.rename_column('users_externalaccount', 'identifier', 'username')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'groups.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'always_auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channel': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'steward': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.UserProfile']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'system': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'wiki': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
'groups.language': {
'Meta': {'ordering': "['name']", 'object_name': 'Language'},
'always_auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'})
},
'groups.skill': {
'Meta': {'ordering': "['name']", 'object_name': 'Skill'},
'always_auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'auto_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'url': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'})
},
'users.externalaccount': {
'Meta': {'ordering': "['type']", 'object_name': 'ExternalAccount'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'privacy': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['users.UserProfile']"})
},
'users.usernameblacklist': {
'Meta': {'ordering': "['value']", 'object_name': 'UsernameBlacklist'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_regex': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'value': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'users.userprofile': {
'Meta': {'ordering': "['full_name']", 'object_name': 'UserProfile', 'db_table': "'profile'"},
'allows_community_sites': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'allows_mozilla_sites': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'basket_token': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'date_mozillian': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'date_vouched': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': "orm['groups.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ircname': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'is_vouched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': "orm['groups.Language']"}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}),
'photo': ('sorl.thumbnail.fields.ImageField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'privacy_bio': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_city': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_country': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_date_mozillian': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_email': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_full_name': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_groups': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_ircname': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_languages': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_photo': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_region': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_skills': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_timezone': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_title': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'privacy_tshirt': ('mozillians.users.models.PrivacyField', [], {'default': '1'}),
'privacy_vouched_by': ('mozillians.users.models.PrivacyField', [], {'default': '3'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'skills': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'members'", 'blank': 'True', 'to': "orm['groups.Skill']"}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '70', 'blank': 'True'}),
'tshirt': ('django.db.models.fields.IntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'vouched_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vouchees'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['users.UserProfile']", 'blank': 'True', 'null': 'True'})
}
}
complete_apps = ['users']
| 82.979021 | 226 | 0.5627 |
bbf8018bdb718ad770572bce6f5759f71fd37640 | 20,177 | py | Python | Link_prediction_baseline/models/dgi.py | amazon-research/gnn-tail-generalization | 1ff49e62b8a2e2a7273c50dce59167ea9d9161fb | [
"Apache-2.0"
] | 15 | 2021-11-03T22:06:16.000Z | 2022-03-26T06:56:48.000Z | Link_prediction_baseline/models/dgi.py | amazon-research/gnn-tail-generalization | 1ff49e62b8a2e2a7273c50dce59167ea9d9161fb | [
"Apache-2.0"
] | 1 | 2022-02-22T16:11:27.000Z | 2022-03-01T07:09:35.000Z | Link_prediction_baseline/models/dgi.py | amazon-research/gnn-tail-generalization | 1ff49e62b8a2e2a7273c50dce59167ea9d9161fb | [
"Apache-2.0"
] | 3 | 2022-01-08T09:58:15.000Z | 2022-03-21T22:43:16.000Z | import numpy
from pypapi import events, papi_high as high
high.start_counters([events.PAPI_FP_OPS, ])
for n in [10, 30, 100, 300, 1000, 10000, 20000]:
aa = numpy.mgrid[0:n:1, 0:n:1][0]
high.start_counters([events.PAPI_FP_OPS, ])
a = numpy.fft.fft(aa)
x = high.stop_counters()
print(n, x)
raise
import torch
import torch.nn as nn
import math
import torch.nn.functional as F
from dgl.nn.pytorch import GraphConv, SAGEConv, GINConv
from dgl.nn.pytorch.glob import SumPooling, AvgPooling, MaxPooling
from IPython import embed
import numpy as np
try:
from models.utils import get_positive_expectation, get_negative_expectation
except ModuleNotFoundError:
from baselines.models.utils import get_positive_expectation, get_negative_expectation
class ApplyNodeFunc(nn.Module):
"""Update the node feature hv with MLP, BN and ReLU."""
def __init__(self, mlp):
super(ApplyNodeFunc, self).__init__()
self.mlp = mlp
self.bn = nn.BatchNorm1d(self.mlp.output_dim)
def forward(self, h):
h = self.mlp(h)
h = self.bn(h)
h = F.relu(h)
return h
class FF(nn.Module):
def __init__(self, input_dim):
super().__init__()
self.block = nn.Sequential(
nn.Linear(input_dim * 2, input_dim),
nn.ReLU(),
nn.Linear(input_dim, input_dim),
nn.ReLU(),
nn.Linear(input_dim, input_dim),
nn.ReLU()
)
self.linear_shortcut = nn.Linear(input_dim, input_dim)
def forward(self, x):
return self.block(x) + self.linear_shortcut(x)
class MLP(nn.Module):
"""MLP with linear output"""
def __init__(self, num_layers, input_dim, hidden_dim, output_dim):
"""MLP layers construction
Paramters
---------
num_layers: int
The number of linear layers
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
"""
super(MLP, self).__init__()
self.linear_or_not = True # default is linear model
self.num_layers = num_layers
self.output_dim = output_dim
if num_layers < 1:
raise ValueError("number of layers should be positive!")
elif num_layers == 1:
# Linear model
self.linear = nn.Linear(input_dim, output_dim)
else:
# Multi-layer model
self.linear_or_not = False
self.linears = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
self.linears.append(nn.Linear(input_dim, hidden_dim))
for layer in range(num_layers - 2):
self.linears.append(nn.Linear(hidden_dim, hidden_dim))
self.linears.append(nn.Linear(hidden_dim, output_dim))
for layer in range(num_layers - 1):
self.batch_norms.append(nn.BatchNorm1d((hidden_dim)))
def forward(self, x):
if self.linear_or_not:
# If linear model
return self.linear(x)
else:
# If MLP
h = x
for i in range(self.num_layers - 1):
h = F.relu(self.batch_norms[i](self.linears[i](h)))
return self.linears[-1](h)
class GIN(nn.Module):
"""GIN model"""
def __init__(self, g, num_layers, num_mlp_layers, input_dim, hidden_dim,
output_dim, final_dropout, learn_eps, graph_pooling_type,
neighbor_pooling_type):
"""model parameters setting
Paramters
---------
num_layers: int
The number of linear layers in the neural network
num_mlp_layers: int
The number of linear layers in mlps
input_dim: int
The dimensionality of input features
hidden_dim: int
The dimensionality of hidden units at ALL layers
output_dim: int
The number of classes for prediction
final_dropout: float
dropout ratio on the final linear layer
learn_eps: boolean
If True, learn epsilon to distinguish center nodes from neighbors
If False, aggregate neighbors and center nodes altogether.
neighbor_pooling_type: str
how to aggregate neighbors (sum, mean, or max)
graph_pooling_type: str
how to aggregate entire nodes in a graph (sum, mean or max)
"""
super(GIN, self).__init__()
self.num_layers = num_layers
self.learn_eps = learn_eps
self.g = g
# List of MLPs
self.ginlayers = torch.nn.ModuleList()
self.batch_norms = torch.nn.ModuleList()
for layer in range(self.num_layers):
if layer == 0:
mlp = MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim)
else:
mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
self.ginlayers.append(
GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.learn_eps))
self.batch_norms.append(nn.BatchNorm1d(hidden_dim))
# Linear function for graph poolings of output of each layer
# which maps the output of different layers into a prediction score
self.linears_prediction = torch.nn.ModuleList()
for layer in range(num_layers):
if layer == 0:
self.linears_prediction.append(
nn.Linear(input_dim, output_dim))
else:
self.linears_prediction.append(
nn.Linear(hidden_dim, output_dim))
self.drop = nn.Dropout(final_dropout)
if graph_pooling_type == 'sum':
self.pool = SumPooling()
elif graph_pooling_type == 'mean':
self.pool = AvgPooling()
elif graph_pooling_type == 'max':
self.pool = MaxPooling()
else:
raise NotImplementedError
def forward(self, h):
# list of hidden representation at each layer (including input)
hidden_rep = [h]
for i in range(self.num_layers):
h = self.ginlayers[i](self.g, h)
h = self.batch_norms[i](h)
h = F.relu(h)
hidden_rep.append(h)
# only need node embedding
return h
class GCN(nn.Module):
def __init__(self,
g,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout):
super(GCN, self).__init__()
self.g = g
self.layers = nn.ModuleList()
# input layer
#if in_feats != n_hidden:
# self.feat_encoder = MLP(in_feats, n_hidden, n_hidden)
#else:
# self.feat_encoder = None
self.feat_encoder = None
self.layers.append(SAGEConv(in_feats, n_hidden, aggregator_type='gcn', activation=activation))
#self.layers.append(SAGEConv(n_hidden, n_hidden, aggregator_type='gcn', activation=activation))
# hidden layers
for i in range(n_layers - 1):
#self.layers.append(GraphConv(n_hidden, n_hidden, activation=activation))
self.layers.append(SAGEConv(n_hidden, n_hidden, aggregator_type='gcn',activation=activation))
# output layer
#self.layers.append(GraphConv(n_hidden, n_classes))
self.layers.append(SAGEConv(n_hidden, n_classes, aggregator_type='gcn',activation=None))
self.dropout = nn.Dropout(p=dropout)
def forward(self, features):
#if self.feat_encoder is not None:
# features = self.feat_encoder(features)
h = features
for i, layer in enumerate(self.layers):
if i != 0:
h = self.dropout(h)
h = layer(self.g, h)
return h
class GRUReduce(nn.Module):
def __init__(self, in_feats, hidden_size):
nn.Module.__init__(self)
self.hidden_size = hidden_size
self.W_z = nn.Linear(in_feats, hidden_size)
self.W_h = nn.Linear(in_feats + hidden_size, hidden_size)
#self.W_1 = nn.Linear(in_feats, hidden_size)
def forward(self, node):
if node.mailbox:
'''
s = node.mailbox['m'].sum(dim=1)
rm = node.mailbox['rm'].sum(dim=1)
z = torch.sigmoid(self.W_z(torch.cat([node.data['x'], s], 1)))
m = torch.tanh(self.W_h(torch.cat([node.data['x'], rm], 1)))
m = (1 - z) * s + z * m
'''
m = F.relu(self.W_z(node.data['x']) + node.mailbox['m'].mean(dim=1))
root = node.mailbox['root'].mean(dim=1)
#embed()
#nroot = node.mailbox['nroot'].mean(dim=1)
#h = torch.cat([node.mailbox['root'], node.data['x'].repeat(1, node.mailbox['root'].shape[1], 1),
# s.repeat(1, node.mailbox['root'].shape[1], 1)], dim=1)
#pred = self.U_s(F.relu(self.linear(h)))
return {'m': m, 'root': root} #, 'out': pred}
else:
'''
z = torch.sigmoid(self.W_z(torch.cat([node.data['x'], torch.zeros_like(node.data['m'])], 1)))
m = torch.tanh(self.W_h(torch.cat([node.data['x'], torch.zeros_like(node.data['m'])], 1)))
m = z * m
'''
m = self.W_z(node.data['x'])
#m =torch.cat([node.data['x'], torch.zeros_like(node.data['m'])], 1)
return {'m': m, 'root':node.data['emb']}
class MsgLayer(nn.Module):
def __init__(self, in_feats, hidden_size):
super(MsgLayer, self).__init__()
self.W_r = nn.Linear(in_feats, hidden_size, bias=False)
self.U_r = nn.Linear(hidden_size, hidden_size)
def forward(self, edges):
# z = torch.cat([edges.src['z'], edges.dst['z']], dim=1)
'''
r_1 = self.W_r(edges.dst['x'])
r_2 = self.U_r(edges.src['m'])
r = torch.sigmoid(r_1 + r_2)
return {'m': edges.src['m'], 'rm': r * edges.src['m'], 'root': edges.src['root']}
'''
return {'m': edges.src['m'], 'root': edges.src['root']}
class Encoder(nn.Module):
def __init__(self, g, in_feats, n_hidden, n_layers, activation, dropout):
super(Encoder, self).__init__()
self.g = g
#self.conv = GCN(g, in_feats, n_hidden, n_hidden, n_layers, activation, dropout)
self.conv = GIN(g, n_layers + 1, 1, in_feats, n_hidden, n_hidden, dropout, True, 'sum', 'sum')
def forward(self, features, corrupt=False):
if corrupt:
perm = torch.randperm(self.g.number_of_nodes())
features = features[perm]
features = self.conv(features)
return features
class Discriminator(nn.Module):
def __init__(self, n_hidden):
super(Discriminator, self).__init__()
self.weight = nn.Parameter(torch.Tensor(n_hidden, n_hidden))
self.reset_parameters()
def uniform(self, size, tensor):
bound = 1.0 / math.sqrt(size)
if tensor is not None:
tensor.data.uniform_(-bound, bound)
def reset_parameters(self):
size = self.weight.size(0)
self.uniform(size, self.weight)
def forward(self, features, summary):
features = torch.matmul(features, torch.matmul(self.weight, summary))
return features
class DGI(nn.Module):
def __init__(self, g, in_feats, n_hidden, n_layers, activation, dropout):
super(DGI, self).__init__()
self.g = g
self.in_feats = in_feats
self.n_hidden = n_hidden
self.n_layers = n_layers
self.activation = activation
self.dropout = dropout
self.encoder = Encoder(g, in_feats, n_hidden, n_layers, activation, dropout)
self.discriminator = Discriminator(n_hidden)
self.loss = nn.BCEWithLogitsLoss()
def reset_parameters(self):
self.encoder = Encoder(self.g, self.in_feats, self.n_hidden, self.n_layers, self.activation, self.dropout)
self.discriminator = Discriminator(self.n_hidden)
self.loss = nn.BCEWithLogitsLoss()
def forward(self, features):
# features = self.g.ndata['features']
positive = self.encoder(features, corrupt=False)
negative = self.encoder(features, corrupt=True)
summary = torch.sigmoid(positive.mean(dim=0))
positive = self.discriminator(positive, summary)
negative = self.discriminator(negative, summary)
l1 = self.loss(positive, torch.ones_like(positive))
l2 = self.loss(negative, torch.zeros_like(negative))
return l1 + l2
class SubGDiscriminator(nn.Module):
def __init__(self, g, in_feats, n_hidden):
super(SubGDiscriminator, self).__init__()
self.g = g
# in_feats = in_feats
self.dc_layers = nn.ModuleList([
MsgLayer(in_feats, n_hidden),
GRUReduce(in_feats, n_hidden),
# DecodeLayer(hidden_dim2 * 2, hidden_dim2)
])
self.linear = nn.Linear(n_hidden + n_hidden, n_hidden, bias = True)
self.U_s = nn.Linear(n_hidden, 1)
self.in_feats = in_feats
#self.weight = nn.Parameter(torch.Tensor(n_hidden, n_hidden))
#self.reset_parameters()
def edge_output(self, edges):
#return {'h': torch.cat([edges.src['root'], edges.dst['emb']], dim=1)}
return {'h': torch.cat([edges.src['root'], edges.dst['x']], dim=1)}
def forward(self, nf, emb, features):
#features = torch.matmul(features, torch.matmul(self.weight, summary))
#self.g.edata['score'] = torch.zeros((self.g.number_of_edges())).cuda()
#sub_graph
reverse_edges = []
for i in range(nf.num_blocks):
# print(i)
u,v = self.g.find_edges(nf.block_parent_eid(i))
reverse_edges += self.g.edge_ids(v,u).numpy().tolist()
small_g = self.g.edge_subgraph( reverse_edges)
small_g.ndata['emb'] = emb[small_g.ndata['_ID']]
small_g.ndata['x'] = features[small_g.ndata['_ID']]
small_g.ndata['m']= torch.zeros_like(emb[small_g.ndata['_ID']])
#small_g.ndata['m']= torch.zeros_like(emb[small_g.ndata['_ID']])
#self.g.ndata['x'] = features.detach()
edge_embs = []
for i in range(nf.num_blocks)[::-1]:
#nf.layers[i].data['z'] = z[nf.layer_parent_nid(i)]
#nf.layers[i].data['nz'] = nz[nf.layer_parent_nid(i)]
#u,v = self.g.find_edges(nf.block_parent_eid(i))
#reverse_edges = self.g.edge_ids(v,u)
v = small_g.map_to_subgraph_nid(nf.layer_parent_nid(i+1))
if i+1 == nf.num_blocks:
# print('YES')
small_g.apply_nodes(self.dc_layers[1], v)
#self.g.apply_edges(self.dc_layers[1], v)
# embed()
#embed()
uid = small_g.out_edges(v, 'eid')
small_g.apply_edges(self.edge_output, uid)
small_g.push(v, self.dc_layers[0], self.dc_layers[1])
#small_g.copy_to_parent()
#self.g.apply_edges(self.edge_output, reverse_edges)
#self.g.send_and_recv(reverse_edges, self.dc_layers[0],
# self.dc_layers[1])
#h_tmp = torch.zeros( (self.g.number_of_edges(), emb.shape[1])).cuda()
#embed()
#
#small_g.copy_to_parent()
#h = self.g.edata.pop('h')
h = small_g.edata.pop('h')[uid]
edge_embs.append(self.U_s(F.relu(self.linear(h))))
#edge_embs += h.sum(dim=0)
#edge_embs.append(self.U_s(F.relu(self.linear(h[reverse_edges]))))
# embed()
return edge_embs
#return features
class SubGI(nn.Module):
def __init__(self, g, in_feats, n_hidden, n_layers, activation, dropout, pretrain=None):
super(SubGI, self).__init__()
self.encoder = Encoder(g, in_feats, n_hidden, n_layers, activation, dropout)
self.g = g
self.subg_disc = SubGDiscriminator(g, in_feats, n_hidden)
self.loss = nn.BCEWithLogitsLoss()
if pretrain is not None:
print("Loaded pre-train model: {}".format(pretrain) )
#self.load_state_dict(torch.load(project_path + '/' + pretrain))
self.load_state_dict(torch.load(pretrain))
def forward(self, features, nf):
positive, x = self.encoder(features, corrupt=False)
# negative, x = self.encoder(features, corrupt=True)
perm = torch.randperm(self.g.number_of_nodes())
#
negative = positive[perm]
#negative = torch.zeros_like(positive)
# summary = torch.sigmoid(positive.mean(dim=0))
#self.g.ndata['emb'] = positive
#self.g.ndata['m'] = torch.zeros_like(positive)
#perm = torch.randperm(self.g.number_of_nodes())
#features = features[perm]
#self.g.ndata['x'] = features.detach()
positive_batch = self.subg_disc(nf, positive, features)
#self.g.ndata['emb'] = negative
# perm = torch.randperm(self.g.number_of_nodes())
#embed()
#negative = positive
negative_batch = self.subg_disc(nf, negative, features)
#embed()
E_pos, E_neg, l = 0.0, 0.0, 0.0
pos_num, neg_num = 0, 0
for positive_edge, negative_edge in zip(positive_batch, negative_batch):
# embed()
E_pos += get_positive_expectation(positive_edge, 'JSD', average=False).sum()
pos_num += positive_edge.shape[0]
#E_pos = E_pos / positive_edge.shape[0]
E_neg += get_negative_expectation(negative_edge, 'JSD', average=False).sum()
neg_num += negative_edge.shape[0]
#E_neg = E_neg / negative_edge.shape[0]
#l1 = self.loss(positive_edge, torch.ones_like(positive_edge))
#l2 = self.loss(negative_edge, torch.zeros_like(negative_edge))
l += E_neg - E_pos
#l += (l1 + l2)
return E_neg / neg_num - E_pos / pos_num
#return l
def train_model(self):
self.train()
cur_loss = []
for nf in self.train_sampler:
# neighbor_ids, target_ids = extract_nodeflow(nf)
#print(idx)
self.optimizer.zero_grad()
l = self.forward(self.features, nf)
l.backward()
cur_loss.append(l.item())
# continue
self.optimizer.step()
#print("Train NLL:{}".format(np.sum(cur_nll)))
# embed()
return np.mean(cur_loss)
class Classifier(nn.Module):
def __init__(self, n_hidden, n_classes):
super(Classifier, self).__init__()
self.fc = nn.Linear(n_hidden, n_classes)
self.reset_parameters()
def reset_parameters(self):
self.fc.reset_parameters()
def forward(self, features):
features = self.fc(features)
return features
#return features.squeeze()
#return torch.log_softmax(features, dim=-1)
class MultiClassifier(nn.Module):
def __init__(self, n_hidden, n_classes):
super(MultiClassifier, self).__init__()
self.fc = nn.Linear(n_hidden, n_classes)
self.reset_parameters()
def reset_parameters(self):
self.fc.reset_parameters()
def forward(self, features):
features = self.fc(features)
#return features.squeeze()
return torch.log_softmax(features, dim=-1)
| 37.643657 | 115 | 0.571839 |
f9aa2a42949bbee001ce3a776d918cc2565fa6cf | 2,526 | py | Python | azureml/data.py | ajansenn/BRUVNet | 68d905929902787c9986ecc41adcd46f07a4551e | [
"MIT"
] | 4 | 2021-10-07T00:29:58.000Z | 2022-01-14T00:26:17.000Z | azureml/data.py | ajansenn/BRUVNet | 68d905929902787c9986ecc41adcd46f07a4551e | [
"MIT"
] | null | null | null | azureml/data.py | ajansenn/BRUVNet | 68d905929902787c9986ecc41adcd46f07a4551e | [
"MIT"
] | null | null | null | import numpy as np
import os
import torch.utils.data
from azureml.core import Run
from PIL import Image
class KakaduFishDataset(torch.utils.data.Dataset):
def __init__(self, root, transforms=None):
self.root = root
self.transforms = transforms
# load all image files, sorting them to ensure that they are aligned
self.img_dir = os.path.join(root, "Images")
self.mask_dir = os.path.join(root, "Masks")
self.imgs = list(sorted(os.listdir(self.img_dir)))
self.masks = list(sorted(os.listdir(self.mask_dir)))
def __getitem__(self, idx):
# load images ad masks
img_path = os.path.join(self.img_dir, self.imgs[idx])
mask_path = os.path.join(self.mask_dir, self.masks[idx])
img = Image.open(img_path).convert("RGB")
# note that we haven't converted the mask to RGB,
# because each color corresponds to a different instance
# with 0 being background
mask = Image.open(mask_path)
mask = np.array(mask)
# instances are encoded as different colors
obj_ids = np.unique(mask)
# first id is the background, so remove it
obj_ids = obj_ids[1:]
# split the color-encoded mask into a set
# of binary masks
masks = mask == obj_ids[:, None, None]
# get bounding box coordinates for each mask
num_objs = len(obj_ids)
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
boxes.append([xmin, ymin, xmax, ymax])
boxes = torch.as_tensor(boxes, dtype=torch.float32)
# there is only one class
labels = torch.ones((num_objs,), dtype=torch.int64)
masks = torch.as_tensor(masks, dtype=torch.uint8)
image_id = torch.tensor([idx])
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
# suppose all instances are not crowd
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.imgs)
| 32.384615 | 76 | 0.593428 |
ff6e8f84ed3e2778b91a1466c357a12931d0410c | 1,022 | py | Python | returns/_generated/pipeline/pipe.py | seancrwhite/returns | 1350beb3ece388d1b7e3a2a529ddd7c984fe74ca | [
"BSD-2-Clause"
] | null | null | null | returns/_generated/pipeline/pipe.py | seancrwhite/returns | 1350beb3ece388d1b7e3a2a529ddd7c984fe74ca | [
"BSD-2-Clause"
] | 1 | 2020-07-13T23:12:58.000Z | 2020-07-13T23:12:58.000Z | returns/_generated/pipeline/pipe.py | seancrwhite/returns | 1350beb3ece388d1b7e3a2a529ddd7c984fe74ca | [
"BSD-2-Clause"
] | null | null | null | from returns._generated.pipeline.flow import _flow
def _pipe(*functions):
"""
Allows to compose a value and up to 7 functions that use this value.
We use a custom ``mypy`` plugin to make sure types are correct.
Otherwise, it is currently impossible to properly type this function.
Each next function uses the previous result as an input parameter.
Here's how it should be used:
.. code:: python
>>> from returns.pipeline import pipe
>>> # => executes: str(float(int('1')))
>>> assert pipe(int, float, str)('1') == '1.0'
This function is closely related
to :func:`pipe <returns._generated.pipeline.flow._flow>`:
.. code:: python
>>> from returns.pipeline import flow
>>> assert pipe(int, float, str)('1') == flow('1', int, float, str)
See also:
- https://stackoverflow.com/a/41585450/4842742
- https://github.com/gcanti/fp-ts/blob/master/src/pipeable.ts
"""
return lambda instance: _flow(instance, *functions)
| 29.2 | 73 | 0.64775 |
38e48ebd111edf21f197b04e49512a59689feb65 | 1,765 | py | Python | python/oneflow/test/modules/test_matmul.py | LiPengze97/oneflow | 1c1d2d3faa1c02d20e009046a290cf1095ee12e0 | [
"Apache-2.0"
] | null | null | null | python/oneflow/test/modules/test_matmul.py | LiPengze97/oneflow | 1c1d2d3faa1c02d20e009046a290cf1095ee12e0 | [
"Apache-2.0"
] | null | null | null | python/oneflow/test/modules/test_matmul.py | LiPengze97/oneflow | 1c1d2d3faa1c02d20e009046a290cf1095ee12e0 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
@flow.unittest.skip_unless_1n1d()
class TestModule(flow.unittest.TestCase):
@autotest(check_graph=False)
def test_flow_matmul_with_random_data(test_case):
device = random_device()
k = random(1, 6)
x = random_pytorch_tensor(ndim=2, dim1=k).to(device)
y = random_pytorch_tensor(ndim=2, dim0=k).to(device)
z = torch.matmul(x, y)
return z
@autotest(check_graph=False)
def test_flow_tensor_matmul_with_random_data(test_case):
device = random_device()
k = random(1, 6)
x = random_pytorch_tensor(ndim=2, dim1=k).to(device)
y = random_pytorch_tensor(ndim=2, dim0=k).to(device)
return x.matmul(y)
@autotest(check_graph=False)
def test_flow_tensor_broadcast_matmul_with_random_data(test_case):
device = random_device()
k = random(1, 6)
x = random_pytorch_tensor(ndim=4, dim3=k).to(device)
y = random_pytorch_tensor(ndim=2, dim0=k).to(device)
return x.matmul(y)
if __name__ == "__main__":
unittest.main()
| 32.090909 | 72 | 0.713881 |
9b8db524584369acb50eadbf2c9e09b08a4273a5 | 1,373 | py | Python | python/107_Binary_Tree_Level_Order_Traversal_II.py | dvlpsh/leetcode-1 | f965328af72113ac8a5a9d6624868c1502be937b | [
"MIT"
] | 4,416 | 2016-03-30T15:02:26.000Z | 2022-03-31T16:31:03.000Z | python/107_Binary_Tree_Level_Order_Traversal_II.py | YinpuLi/leetcode-6 | 1371de2631d745efba39de41b51c3424e35da434 | [
"MIT"
] | 20 | 2018-11-17T13:46:25.000Z | 2022-03-13T05:37:06.000Z | python/107_Binary_Tree_Level_Order_Traversal_II.py | YinpuLi/leetcode-6 | 1371de2631d745efba39de41b51c3424e35da434 | [
"MIT"
] | 1,374 | 2017-05-26T15:44:30.000Z | 2022-03-30T19:21:02.000Z | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
# def levelOrderBottom(self, root):
# """
# :type root: TreeNode
# :rtype: List[List[int]]
# """
# res = []
# if root is None:
# return []
# self.get_level(res, root, 0)
# # reverse result
# res.reverse()
# return res
#
# def get_level(self, res, root, depth):
# if root is None:
# return
# if depth == len(res):
# res.append([])
# res[depth].append(root.val)
# self.get_level(res, root.left, depth + 1)
# self.get_level(res, root.right, depth + 1)
def levelOrderBottom(self, root):
if root is None:
return []
# use stack
stack = [[root]]
res = []
while len(stack) > 0:
top = stack.pop()
res.insert(0, [t.val for t in top])
temp = []
for node in top:
if node.left is not None:
temp.append(node.left)
if node.right is not None:
temp.append(node.right)
if len(temp) > 0:
stack.append(temp)
return res | 29.212766 | 52 | 0.463948 |
f0e7007553a4ea303e2d2886a38f6caa01475fe1 | 4,697 | py | Python | airflow/dags/training.py | msstoci/airflow-mastoci | 8195f25ae235dfff832fc84cee6a2f0f862bfa66 | [
"Apache-2.0"
] | null | null | null | airflow/dags/training.py | msstoci/airflow-mastoci | 8195f25ae235dfff832fc84cee6a2f0f862bfa66 | [
"Apache-2.0"
] | null | null | null | airflow/dags/training.py | msstoci/airflow-mastoci | 8195f25ae235dfff832fc84cee6a2f0f862bfa66 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DAG definition for recserv model training."""
import airflow
from airflow import DAG
from airflow.contrib.operators.bigquery_operator import BigQueryOperator
from airflow.contrib.operators.bigquery_to_gcs import BigQueryToCloudStorageOperator
from airflow.hooks.base_hook import BaseHook
from airflow.operators.app_engine_admin_plugin import AppEngineVersionOperator
from airflow.operators.ml_engine_plugin import MLEngineTrainingOperator
import datetime
def _get_project_id():
"""Get project ID from default GCP connection."""
extras = BaseHook.get_connection('google_cloud_default').extra_dejson
key = 'extra__google_cloud_platform__project'
if key in extras:
project_id = extras[key]
else:
raise ('Must configure project_id in google_cloud_default '
'connection from Airflow Console')
return project_id
PROJECT_ID = _get_project_id()
# Data set constants, used in BigQuery tasks. You can change these
# to conform to your data.
DATASET = 'GA360_test'
TABLE_NAME = 'ga_sessions_sample'
ARTICLE_CUSTOM_DIMENSION = '10'
# GCS bucket names and region, can also be changed.
BUCKET = 'gs://recserve_' + PROJECT_ID
REGION = 'asia-southeast1-b'
# The code package name comes from the model code in the wals_ml_engine
# directory of the solution code base.
PACKAGE_URI = BUCKET + '/code/wals_ml_engine-0.1.tar.gz'
JOB_DIR = BUCKET + '/jobs'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(2),
'email': ['airflow@example.com'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 5,
'retry_delay': datetime.timedelta(minutes=5)
}
# Default schedule interval using cronjob syntax - can be customized here
# or in the Airflow console.
schedule_interval = '00 21 * * *'
dag = DAG('recommendations_training_v1', default_args=default_args,
schedule_interval=schedule_interval)
dag.doc_md = __doc__
#
#
# Task Definition
#
#
# BigQuery training data query
bql='''
#legacySql
SELECT
fullVisitorId as clientId,
ArticleID as contentId,
(nextTime - hits.time) as timeOnPage,
FROM(
SELECT
fullVisitorId,
hits.time,
MAX(IF(hits.customDimensions.index={0},
hits.customDimensions.value,NULL)) WITHIN hits AS ArticleID,
LEAD(hits.time, 1) OVER (PARTITION BY fullVisitorId, visitNumber
ORDER BY hits.time ASC) as nextTime
FROM [{1}.{2}.{3}]
WHERE hits.type = "PAGE"
) HAVING timeOnPage is not null and contentId is not null;
'''
bql = bql.format(ARTICLE_CUSTOM_DIMENSION, PROJECT_ID, DATASET, TABLE_NAME)
t1 = BigQueryOperator(
task_id='bq_rec_training_data',
bql=bql,
destination_dataset_table='%s.recommendation_events' % DATASET,
write_disposition='WRITE_TRUNCATE',
dag=dag)
# BigQuery training data export to GCS
training_file = BUCKET + '/data/recommendation_events.csv'
t2 = BigQueryToCloudStorageOperator(
task_id='bq_export_op',
source_project_dataset_table='%s.recommendation_events' % DATASET,
destination_cloud_storage_uris=[training_file],
export_format='CSV',
dag=dag
)
# ML Engine training job
job_id = 'recserve_{0}'.format(datetime.datetime.now().strftime('%Y%m%d%H%M'))
job_dir = BUCKET + '/jobs/' + job_id
output_dir = BUCKET
training_args = ['--job-dir', job_dir,
'--train-file', training_file,
'--output-dir', output_dir,
'--data-type', 'web_views',
'--use-optimized']
t3 = MLEngineTrainingOperator(
task_id='ml_engine_training_op',
project_id=PROJECT_ID,
job_id=job_id,
package_uris=[PACKAGE_URI],
training_python_module='trainer.task',
training_args=training_args,
region=REGION,
scale_tier='CUSTOM',
master_type='complex_model_m_gpu',
dag=dag
)
# App Engine deploy new version
t4 = AppEngineVersionOperator(
task_id='app_engine_deploy_version',
project_id=PROJECT_ID,
service_id='default',
region=REGION,
service_spec=None,
dag=dag
)
t2.set_upstream(t1)
t3.set_upstream(t2)
t4.set_upstream(t3)
| 28.640244 | 84 | 0.729615 |
c84c20d6622cefbcf41b7dfcd5b6f6c7eea7a232 | 1,933 | py | Python | test/pytest_t_tools_densedm.py | Tomographer/tomographer | 0a64927e639454175803c1746141bd9288af8b29 | [
"MIT"
] | 12 | 2015-09-24T02:25:11.000Z | 2020-02-13T02:26:00.000Z | test/pytest_t_tools_densedm.py | Tomographer/tomographer | 0a64927e639454175803c1746141bd9288af8b29 | [
"MIT"
] | 4 | 2015-10-12T15:48:55.000Z | 2021-07-21T15:14:59.000Z | test/pytest_t_tools_densedm.py | Tomographer/tomographer | 0a64927e639454175803c1746141bd9288af8b29 | [
"MIT"
] | 2 | 2015-10-12T15:32:29.000Z | 2018-05-08T11:39:49.000Z | #!/usr/bin/env python
from __future__ import print_function
import re
import numpy as np
import numpy.testing as npt
import logging
logging.basicConfig(level=logging.DEBUG)
import unittest
# import the module
import tomographer.tools.densedm
import tomographer.tools.densedm.mle
import tomographer
import tomographer.densedm
class SimulateMeasurements(unittest.TestCase):
def test_sim(self):
rho_sim = np.array([[0.9, 0], [0, 0.1]])
Mk = tomographer.tools.densedm.PauliMeasEffectsQubit
n = 1000
d = tomographer.tools.densedm.simulate_measurements(rho_sim, Mk, n)
self.assertEqual(d.Nm[0] + d.Nm[1], n)
self.assertEqual(d.Nm[2] + d.Nm[3], n)
self.assertEqual(d.Nm[4] + d.Nm[5], n)
# Hoeffding's inequality: Prob( |N(+) - p*n| > eps*n ) < 2*exp(-2*eps^2*n)
#
# --> so the probability to deviate by more than 0.1 fraction is bounded by
# 2*exp(-2 * 0.1**2 * n) ~ 4e-9 (for n=1000)
self.assertLessEqual( (d.Nm[0] - 0.5*n) , 0.1*n )
self.assertLessEqual( (d.Nm[2] - 0.5*n) , 0.1*n )
self.assertLessEqual( (d.Nm[4] - 0.9*n) , 0.1*n )
class Mle(unittest.TestCase):
def test_mle(self):
Emn = sum(tomographer.tools.densedm.PauliMeasEffectsQubit, [])
Nm = np.array([250, 250, 250, 250, 500, 0]) # really extreme example
llh = tomographer.densedm.IndepMeasLLH(tomographer.densedm.DMTypes(2))
llh.setMeas(Emn, Nm)
(rho_MLE, d) = tomographer.tools.densedm.mle.find_mle(llh)
# we know the exact solution, rho_MLE = |0><0|
npt.assert_array_almost_equal(rho_MLE,
np.array([[1, 0], [0, 0]]))
# normally, this is not needed as we are being run via pyruntest.py, but it might be
# useful if we want to run individually picked tests
if __name__ == '__main__':
unittest.main()
| 28.014493 | 84 | 0.619762 |
b90220e8421213508fee9d1331569e5e8893d9c6 | 8,515 | py | Python | Tests/Methods/Slot/test_SlotM15_meth.py | ajpina/pyleecan | f8d1fce7d108cf443f5767e35d59ff15905fb49f | [
"Apache-2.0"
] | null | null | null | Tests/Methods/Slot/test_SlotM15_meth.py | ajpina/pyleecan | f8d1fce7d108cf443f5767e35d59ff15905fb49f | [
"Apache-2.0"
] | null | null | null | Tests/Methods/Slot/test_SlotM15_meth.py | ajpina/pyleecan | f8d1fce7d108cf443f5767e35d59ff15905fb49f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from pyleecan.Classes.LamSlotMag import LamSlotMag
from pyleecan.Classes.SlotM15 import SlotM15
from numpy import pi, exp, sqrt, arcsin, angle
from pyleecan.Methods.Slot.Slot.comp_height import comp_height
from pyleecan.Methods.Slot.Slot.comp_surface import comp_surface
from pyleecan.Methods.Slot.Slot.comp_angle_opening import comp_angle_opening
from pyleecan.Methods.Slot.Slot.comp_height_active import comp_height_active
from pyleecan.Methods.Slot.Slot.comp_surface_active import comp_surface_active
from pyleecan.Methods import ParentMissingError
mm = 1e-3
Mag15_test = list()
# Internal Slot inset magnet with same top and bottom radius
lam = LamSlotMag(Rint=40 * mm, Rext=110 * mm, is_internal=True)
lam.slot = SlotM15(
Zs=4, W0=80 * pi / 180, H0=10 * mm, Hmag=20 * mm, Wmag=100 * mm, Rtopm=100 * mm
)
Mag15_test.append(
{
"test_obj": lam,
"Rmec": 120 * mm,
"S_exp": 1.46607e-3,
"SA_exp": 2e-3,
"HA_exp": 0.02,
"Ao": 1.39626,
"H_exp": 0.01,
}
)
# Internal Slot inset magnet with same top and bottom radius
lam = LamSlotMag(Rint=40 * mm, Rext=110 * mm, is_internal=True)
lam.slot = SlotM15(
Zs=4, W0=80 * pi / 180, H0=20 * mm, Hmag=20 * mm, Wmag=100 * mm, Rtopm=100 * mm
)
Mag15_test.append(
{
"test_obj": lam,
"Rmec": 110 * mm,
"S_exp": 2.7925e-3,
"SA_exp": 2.0533e-3,
"HA_exp": 0.02,
"Ao": 1.39626,
"H_exp": 20 * mm,
}
)
# Internal slot surface magnet with same top and bottom radius
lam = LamSlotMag(Rint=40 * mm, Rext=100 * mm, is_internal=True)
lam.slot = SlotM15(
Zs=4, W0=80 * pi / 180, H0=0 * mm, Hmag=20 * mm, Wmag=100 * mm, Rtopm=100 * mm
)
Mag15_test.append(
{
"test_obj": lam,
"Rmec": 120e-3,
"S_exp": 0,
"SA_exp": 2e-3,
"HA_exp": 0.02,
"Ao": 1.39626,
"H_exp": 0,
}
)
# Internal slot surface magnet with different top and bottom radius
lam = LamSlotMag(Rint=40 * mm, Rext=100 * mm, is_internal=True)
lam.slot = SlotM15(
Zs=4, W0=80 * pi / 180, H0=0 * mm, Hmag=20 * mm, Wmag=100 * mm, Rtopm=65 * mm
)
Mag15_test.append(
{
"test_obj": lam,
"Rmec": 120e-3,
"S_exp": 0,
"SA_exp": 1.7185e-3,
"HA_exp": 0.02,
"Ao": 1.39626,
"H_exp": 0,
}
)
# For AlmostEqual
DELTA = 1e-4
@pytest.mark.METHODS
class Test_Magnet_Type_15_meth(object):
"""unittest for MagnetType15 methods"""
@pytest.mark.parametrize("test_dict", Mag15_test)
def test_comp_surface(self, test_dict):
"""Check that the computation of the surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_surface()
a = result
b = test_dict["S_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
# Check that the analytical method returns the same result as the numerical one
b = comp_surface(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
@pytest.mark.parametrize("test_dict", Mag15_test)
def test_comp_surface_active(self, test_dict):
"""Check that the computation of the active surface is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_surface_active()
a = result
b = test_dict["SA_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
# Check that the analytical method returns the same result as the numerical one
b = comp_surface_active(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
@pytest.mark.parametrize("test_dict", Mag15_test)
def test_comp_height(self, test_dict):
"""Check that the computation of the height is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_height()
a = result
b = test_dict["H_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
# Check that the analytical method returns the same result as the numerical one
b = comp_height(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
@pytest.mark.parametrize("test_dict", Mag15_test)
def test_comp_height_active(self, test_dict):
"""Check that the computation of the active height is correct"""
test_obj = test_dict["test_obj"]
result = test_obj.slot.comp_height_active()
a = result
b = test_dict["HA_exp"]
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
# Check that the analytical method returns the same result as the numerical one
b = comp_height_active(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA), msg
@pytest.mark.parametrize("test_dict", Mag15_test)
def test_comp_angle_opening(self, test_dict):
"""Check that the computation of the average opening angle is correct"""
test_obj = test_dict["test_obj"]
a = test_obj.slot.comp_angle_opening()
assert a == pytest.approx(test_dict["Ao"], rel=DELTA)
# Check that the analytical method returns the same result as the numerical one
b = comp_angle_opening(test_obj.slot)
msg = "Return " + str(a) + " expected " + str(b)
assert a == pytest.approx(b, rel=DELTA)
@pytest.mark.parametrize("test_dict", Mag15_test)
def test_comp_width_opening(self, test_dict):
"""Check that the computation of the average opening width is correct"""
test_obj = test_dict["test_obj"]
a = test_obj.slot.comp_width_opening()
point_dict = test_obj.slot._comp_point_coordinate()
assert a == pytest.approx(abs(point_dict["Z1"] - point_dict["Z4"]), rel=DELTA)
@pytest.mark.parametrize("test_dict", Mag15_test)
def test_comp_mec_radius(self, test_dict):
"""Check that the computation of the mechanical radius is correct"""
test_obj = test_dict["test_obj"]
a = test_obj.comp_radius_mec()
assert a == pytest.approx(test_dict["Rmec"], rel=DELTA)
@pytest.mark.parametrize("test_dict", Mag15_test)
def test_comp_point_coordinate(self, test_dict):
"""Check that the point coordinates are correct"""
test_obj = test_dict["test_obj"]
point_dict = test_obj.slot._comp_point_coordinate()
Z1 = point_dict["Z1"]
Z2 = point_dict["Z2"]
Z3 = point_dict["Z3"]
Z4 = point_dict["Z4"]
ZM0 = point_dict["ZM0"]
ZM1 = point_dict["ZM1"]
ZM2 = point_dict["ZM2"]
ZM3 = point_dict["ZM3"]
ZM4 = point_dict["ZM4"]
W0 = test_obj.slot.W0
H0 = test_obj.slot.H0
Wmag = test_obj.slot.Wmag
Hmag = test_obj.slot.Hmag
Rbo = test_obj.get_Rbo()
# Polar Slot
assert abs(Z1) == pytest.approx(Rbo, rel=DELTA)
assert angle(Z1) == pytest.approx(-W0 / 2, rel=DELTA)
assert abs(Z4) == pytest.approx(Rbo, rel=DELTA)
assert angle(Z4) == pytest.approx(W0 / 2, rel=DELTA)
if test_obj.is_internal:
assert abs(Z2) == pytest.approx(Rbo - H0, rel=DELTA)
assert abs(Z3) == pytest.approx(Rbo - H0, rel=DELTA)
else:
assert abs(Z3) == pytest.approx(Rbo + H0, rel=DELTA)
assert abs(Z2) == pytest.approx(Rbo + H0, rel=DELTA)
assert angle(Z2) == pytest.approx(-W0 / 2, rel=DELTA)
assert angle(Z3) == pytest.approx(W0 / 2, rel=DELTA)
# Polar bottom for magnet
assert abs(Z2) == pytest.approx(abs(ZM1), rel=DELTA)
assert abs(Z2) == pytest.approx(abs(ZM4), rel=DELTA)
# Parallel side
assert ZM1.imag == pytest.approx(ZM2.imag, rel=DELTA)
assert ZM3.imag == pytest.approx(ZM4.imag, rel=DELTA)
assert ZM1.imag == pytest.approx(-Wmag / 2, rel=DELTA)
assert ZM3.imag == pytest.approx(Wmag / 2, rel=DELTA)
# Hmag def
if test_obj.is_internal:
assert ZM0 == pytest.approx(abs(Z2) + Hmag, rel=DELTA)
else:
assert ZM0 == pytest.approx(abs(Z2) - Hmag, rel=DELTA)
| 36.861472 | 87 | 0.617733 |
f2abb85e214cf39ae68448ddeb4063154ffcf547 | 467 | py | Python | data/scripts/templates/object/building/poi/shared_dathomir_imperialprisonpatrol_large2.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/building/poi/shared_dathomir_imperialprisonpatrol_large2.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/building/poi/shared_dathomir_imperialprisonpatrol_large2.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_dathomir_imperialprisonpatrol_large2.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 27.470588 | 88 | 0.738758 |
287e216efc28d7dd79afb1b5b4eaf04f26f984b8 | 6,121 | py | Python | ajax_filtered_fields/forms/widgets.py | bisio/django-ajax-filtered-fields | a80f25c79a8564b942690408493d245fd84c2782 | [
"MIT"
] | null | null | null | ajax_filtered_fields/forms/widgets.py | bisio/django-ajax-filtered-fields | a80f25c79a8564b942690408493d245fd84c2782 | [
"MIT"
] | null | null | null | ajax_filtered_fields/forms/widgets.py | bisio/django-ajax-filtered-fields | a80f25c79a8564b942690408493d245fd84c2782 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import operator
from django.conf import settings
from django import forms
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from ajax_filtered_fields import utils
def _renderFilter(js_method_name, element_id, model, lookup_list,
select_related):
"""Return the html output of a filter link."""
label, lookup_dict = lookup_list
script = "ajax_filtered_fields.%s('%s', '%s', '%s', '%s', '%s')" % (
js_method_name,
element_id,
model._meta.app_label,
model._meta.object_name,
utils.lookupToString(lookup_dict),
select_related)
return u"""
<a class="ajax_filter_choice"
href="javascript:void(0)"
onclick="%s">%s</a>
""" % (script, label)
class FilteredSelectMultiple(forms.SelectMultiple):
def render(self, name, value, attrs=None, choices=()):
self._element_id = attrs['id']
# choices links
# if there is only one choice, then nothing will be rendered
lookups_output = ""
lookups = utils.getLookups(self.lookups)
if len(lookups) > 1:
js_method_name = "getManyToManyJSON"
lookups_output = "\n".join(
_renderFilter(js_method_name, self._element_id,
self.model, i, self.select_related)
for i in lookups)
# normal widget output from the anchestor
self.choices = self._getAllChoices(value)
parent_output = super(FilteredSelectMultiple, self
).render(name, value, attrs, choices)
# create the output including the django admin's Javascript code that
# mutates the select widget into a selectfilter one
# this assumes that /admin/jsi18n/, core.js, SelectBox.js and
# SelectFilter2.js are loaded from the page
verbose_name = self.model._meta.verbose_name_plural.replace('"', '\\"')
output = u"""
<div>
%s
</div>
%s
<script type="text/javascript">
$(document).ready(function(){
SelectFilter.init("id_%s", "%s", 0, "%s");
});
</script>
""" % (lookups_output, parent_output, name,
verbose_name, settings.ADMIN_MEDIA_PREFIX)
return mark_safe(output)
def _getAllChoices(self, value):
value = value or []
choices = list(self.choices)
# convert to unicode for safe comparisong during a ValidationError
choices_keys = [unicode(i[0]) for i in choices]
for i in value:
if not unicode(i) in choices_keys:
obj = utils.getObject(self.model, {"pk": i}, self.select_related)
choices.append((i, unicode(obj)))
choices.sort(key=operator.itemgetter(1))
return choices
class FilteredSelect(forms.Select):
def render(self, name, value, attrs=None, choices=()):
self._element_id = attrs['id']
# choices links
# if there is only one choice, then nothing will be rendered
lookups_output = ""
lookups = utils.getLookups(self.lookups)
if len(lookups) > 1:
js_method_name = "getForeignKeyJSON"
lookups_output = "\n".join(
_renderFilter(js_method_name, self._element_id,
self.model, i, self.select_related)
for i in lookups)
# get the selected object name
selection = "-" * 9
if value:
selection = utils.getObject(self.model, {"pk": value},
self.select_related)
# filter selectbox input
filter_id = "%s_input" % self._element_id
# give a style to the final select widget
_attrs = {"size": 2, "style": "width:270px;"}
try:
attrs.update(_attrs)
except AttributeError:
attrs = _attrs
# normal widget output from the anchestor
# create a field with a dummy name , the real value
# will be retrieved from a hidden field
parent_output = super(FilteredSelect, self
).render("dummy-%s" % name, value, attrs, choices)
# output
mapping = {
"lookups_output": lookups_output,
"selection": selection,
"filter_id": filter_id,
"parent_output": parent_output,
"name": name,
"element_id": self._element_id,
"value": "" if value is None else value,
}
output = u"""
<div class="selector">
%(lookups_output)s
</div>
<div class="selector">
<div class="selector-available">
<h2>%(selection)s</h2>
<p class="selector-filter">
<img src="/media/img/admin/selector-search.gif">
<input id="%(filter_id)s" type="text">
</p>
%(parent_output)s
</div>
</div>
<input type="hidden" name="%(name)s" id="hidden-%(element_id)s" value="%(value)s" />
<script type="text/javascript" charset="utf-8">
$(document).ready(function(){
SelectBox.init('%(element_id)s');
$("#%(filter_id)s").bind("keyup", function(e) {
SelectBox.filter("%(element_id)s", $("#%(filter_id)s").val())
});
$(".ajax_letter").click(function(e) {
$("#%(filter_id)s").val("");
});
ajax_filtered_fields.bindForeignKeyOptions("%(element_id)s");
});
</script>
""" % mapping
return mark_safe(output)
| 36.652695 | 96 | 0.523117 |
16f63f7f538d9931cf963117243553de6d78c183 | 2,508 | py | Python | run_worker.py | SaneBow/AttentionAgentCarRacing | 944dc18b99b2c51a25c206f722a0bbc43cb7bbb0 | [
"Apache-2.0"
] | 1 | 2020-12-05T13:42:51.000Z | 2020-12-05T13:42:51.000Z | run_worker.py | SaneBow/AttentionAgentCarRacing | 944dc18b99b2c51a25c206f722a0bbc43cb7bbb0 | [
"Apache-2.0"
] | null | null | null | run_worker.py | SaneBow/AttentionAgentCarRacing | 944dc18b99b2c51a25c206f722a0bbc43cb7bbb0 | [
"Apache-2.0"
] | null | null | null | import argparse
import protobuf.roll_out_service_pb2_grpc
import gin
import grpc
import time
import misc.utility
from concurrent import futures
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
_MAX_MSG_LEN = 40 * 1024 * 1024
def main(config):
"""Start the worker."""
gin.parse_config_file(config.config)
logger = misc.utility.create_logger(
name='es_worker{}'.format(config.worker_id), log_dir=config.log_dir)
if config.master_address is not None:
logger.info('master_address: {}'.format(config.master_address))
channel = grpc.insecure_channel(
config.master_address,
[("grpc.max_receive_message_length", _MAX_MSG_LEN)])
stub = protobuf.roll_out_service_pb2_grpc.ParameterSyncServiceStub(
channel)
worker = misc.utility.get_es_worker(logger=logger, load_model_path=config.restore_model, master=stub)
else:
worker = misc.utility.get_es_worker(logger=logger, load_model_path=config.restore_model)
if config.run_on_gke:
port = config.port
else:
port = config.port + config.worker_id
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=1),
options=[("grpc.max_send_message_length", _MAX_MSG_LEN),
("grpc.max_receive_message_length", _MAX_MSG_LEN)])
# Start the RPC server.
protobuf.roll_out_service_pb2_grpc.add_RollOutServiceServicer_to_server(
worker, server)
server.add_insecure_port('[::]:{}'.format(port))
server.start()
logger.info('Listening to port {} ...'.format(port))
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
logger.info('Worker quit.')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--port', help='Port to start the service.', type=int, default=20000)
parser.add_argument(
'--config', help='Path to the config file.')
parser.add_argument(
'--restore-model', help='Path to existing model file.', default=None)
parser.add_argument(
'--log-dir', help='Path to the log directory.', default='./log')
parser.add_argument(
'--worker-id', help='Worker ID.', type=int, default=0)
parser.add_argument(
'--master-address', help='Master address.')
parser.add_argument(
'--run-on-gke', help='Whether run this on GKE.', default=False,
action='store_true')
args, _ = parser.parse_known_args()
main(args)
| 33.44 | 109 | 0.67185 |
5e293d993be0b87c2547741c4d0d9ae7155e1670 | 502 | py | Python | scripts/eval.py | milebril/Temporal-SBMC-extension | 57c56b73786e49d233facffde4ba80f212a00fa8 | [
"Apache-2.0"
] | null | null | null | scripts/eval.py | milebril/Temporal-SBMC-extension | 57c56b73786e49d233facffde4ba80f212a00fa8 | [
"Apache-2.0"
] | null | null | null | scripts/eval.py | milebril/Temporal-SBMC-extension | 57c56b73786e49d233facffde4ba80f212a00fa8 | [
"Apache-2.0"
] | null | null | null | import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('output/emil/dataviz_sequence/denoised/peters/-peters-scene-0_frame-0.png',0)
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20*np.log(np.abs(fshift))
plt.subplot(121),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show() | 35.857143 | 94 | 0.729084 |
b989fa204cc2773a1e7e84de34da2336805f6da6 | 1,523 | py | Python | gameoflife/modal/screen.py | hnrkcode/game-of-life | 7fa948bc788bcb3f1d21a5720ebf1a84fd0b512e | [
"MIT"
] | 5 | 2020-10-10T13:53:36.000Z | 2021-05-26T13:01:24.000Z | gameoflife/modal/screen.py | hnrkcode/game-of-life | 7fa948bc788bcb3f1d21a5720ebf1a84fd0b512e | [
"MIT"
] | null | null | null | gameoflife/modal/screen.py | hnrkcode/game-of-life | 7fa948bc788bcb3f1d21a5720ebf1a84fd0b512e | [
"MIT"
] | null | null | null | import math
import pygame
from gameoflife import settings
from gameoflife.util.text import InfoText
class ScreenText(pygame.sprite.Sprite):
def __init__(self, text):
super().__init__()
self.text = self.set_text(text)
self.color = settings.OVERLAY_COLOR
self.image = pygame.Surface(
[settings.BOARD_WIDTH_SIZE, settings.BOARD_HEIGHT_SIZE]
)
self.image.set_alpha(150)
self.image.fill(self.color)
self.rect = self.image.get_rect()
self.rect.topleft = [settings.BOARD_X_POS, settings.BOARD_Y_POS]
# Determines how much the letter is being moved.
self.y = 0
def set_text(self, text):
x, y = 0, 0
letters = []
for letter in list(text):
letters.append(InfoText(letter, size=settings.H1, pos=[x, y], alpha=True))
x += 50
# Center text.
text_width = sum(map(lambda letter: letter.image.get_width(), letters))
center = int(settings.BOARD_WIDTH_SIZE / 2) - (text_width / 2)
for letter in letters:
letter.rect[0] += center
return letters
def update(self):
self.image.fill(self.color)
for letter in self.text:
# Creates a wave effect for the letters horizontal position.
movement = int(10 * math.sin(self.y)) + 10
self.y += 1
letter.rect[1] = int(settings.BOARD_WIDTH_SIZE * 0.25) + movement
self.image.blit(letter.image, letter.rect) | 29.288462 | 86 | 0.607354 |
f385c8cb906b59d3a34c21aa210c6511c9be268b | 11,064 | bzl | Python | third_party/py/python_configure.bzl | efc-robot/cyberRT | c0e63539cf97030c64545d87c203d4e457d481ca | [
"Apache-2.0"
] | 41 | 2020-12-07T07:32:02.000Z | 2022-01-18T09:14:59.000Z | third_party/py/python_configure.bzl | efc-robot/cyberRT | c0e63539cf97030c64545d87c203d4e457d481ca | [
"Apache-2.0"
] | 4 | 2021-01-05T02:53:46.000Z | 2021-11-05T08:35:46.000Z | third_party/py/python_configure.bzl | efc-robot/cyberRT | c0e63539cf97030c64545d87c203d4e457d481ca | [
"Apache-2.0"
] | 28 | 2020-12-06T08:01:38.000Z | 2022-03-10T08:14:01.000Z | # Adapted with modifications from tensorflow/third_party/py/
"""Repository rule for Python autoconfiguration.
`python_configure` depends on the following environment variables:
* `PYTHON_BIN_PATH`: location of python binary.
* `PYTHON_LIB_PATH`: Location of python libraries.
"""
load("//tools/bazel:common.bzl", "basename")
_BAZEL_SH = "BAZEL_SH"
_PYTHON3_BIN_PATH = "PYTHON_BIN_PATH"
_PYTHON3_LIB_PATH = "PYTHON_LIB_PATH"
_HEADERS_HELP = (
"Are Python3 headers installed? Try installing python3-dev on " +
"Debian-based systems. Try python3-devel on Redhat-based systems."
)
def _tpl(repository_ctx, tpl, substitutions = {}, out = None):
if not out:
out = tpl
repository_ctx.template(
out,
Label("//third_party/py:%s.tpl" % tpl),
substitutions,
)
def _fail(msg):
"""Output failure message when auto configuration fails."""
red = "\033[0;31m"
no_color = "\033[0m"
fail("%sPython Configuration Error:%s %s\n" % (red, no_color, msg))
def _execute(
repository_ctx,
cmdline,
error_msg = None,
error_details = None,
empty_stdout_fine = False):
"""Executes an arbitrary shell command.
Args:
repository_ctx: the repository_ctx object
cmdline: list of strings, the command to execute
error_msg: string, a summary of the error if the command fails
error_details: string, details about the error or steps to fix it
empty_stdout_fine: bool, if True, an empty stdout result is fine, otherwise
it's an error
Return:
the result of repository_ctx.execute(cmdline)
"""
result = repository_ctx.execute(cmdline)
if result.stderr or not (empty_stdout_fine or result.stdout):
_fail("\n".join([
error_msg.strip() if error_msg else "Repository command failed",
result.stderr.strip(),
error_details if error_details else "",
]))
else:
return result
def _read_dir(repository_ctx, src_dir):
"""Returns a string with all files in a directory.
Finds all files inside a directory, traversing subfolders and following
symlinks. The returned string contains the full path of all files
separated by line breaks.
"""
find_result = _execute(
repository_ctx,
["find", src_dir, "-follow", "-type", "f"],
empty_stdout_fine = True,
)
return find_result.stdout
def _genrule(src_dir, genrule_name, command, outs):
"""Returns a string with a genrule.
Genrule executes the given command and produces the given outputs.
"""
return ("genrule(\n" + ' name = "' + genrule_name + '",\n' +
" outs = [\n" + outs + "\n ],\n" + ' cmd = """\n' +
command + '\n """,\n' + ")\n")
def _normalize_path(path):
"""Returns a path with '/' and remove the trailing slash."""
path = path.replace("\\", "/")
if path[-1] == "/":
path = path[:-1]
return path
def _symlink_genrule_for_dir(
repository_ctx,
src_dir,
dest_dir,
genrule_name,
src_files = [],
dest_files = []):
"""Returns a genrule to symlink a set of files.
If src_dir is passed, files will be read from the given directory; otherwise
we assume files are in src_files and dest_files
"""
if src_dir != None:
src_dir = _normalize_path(src_dir)
dest_dir = _normalize_path(dest_dir)
files = "\n".join(
sorted(_read_dir(repository_ctx, src_dir).splitlines()),
)
# Create a list with the src_dir stripped to use for outputs.
dest_files = files.replace(src_dir, "").splitlines()
src_files = files.splitlines()
command = []
outs = []
for i in range(len(dest_files)):
if dest_files[i] != "":
# If we have only one file to link we do not want to use the dest_dir, as
# $(@D) will include the full path to the file.
dest = "$(@D)/" + dest_dir + dest_files[i] if len(
dest_files,
) != 1 else "$(@D)/" + dest_files[i]
cmd = "ln -s"
command.append(cmd + ' "%s" "%s"' % (src_files[i], dest))
outs.append(' "' + dest_dir + dest_files[i] + '",')
return _genrule(
src_dir,
genrule_name,
" && ".join(command),
"\n".join(outs),
)
def _get_python_bin(repository_ctx, bin_path_key, default_bin_path):
"""Gets the python bin path."""
python_bin = repository_ctx.os.environ.get(bin_path_key, default_bin_path)
if not repository_ctx.path(python_bin).exists:
# It's a command, use 'which' to find its path.
python_bin_path = repository_ctx.which(python_bin)
else:
# It's a path, use it as it is.
python_bin_path = python_bin
if python_bin_path != None:
return str(python_bin_path)
_fail("Cannot find python in PATH, please make sure " +
"python is installed and add its directory in PATH, or --define " +
"%s='/something/else'.\nPATH=%s" %
(bin_path_key, repository_ctx.os.environ.get("PATH", "")))
def _get_bash_bin(repository_ctx):
"""Gets the bash bin path."""
bash_bin = repository_ctx.os.environ.get(_BAZEL_SH)
if bash_bin != None:
return bash_bin
else:
bash_bin_path = repository_ctx.which("bash")
if bash_bin_path != None:
return str(bash_bin_path)
else:
_fail(
"Cannot find bash in PATH, please make sure " +
"bash is installed and add its directory in PATH, or --define " +
"%s='/path/to/bash'.\nPATH=%s" %
(_BAZEL_SH, repository_ctx.os.environ.get("PATH", "")),
)
def _get_python_lib(repository_ctx, python_bin, lib_path_key):
"""Gets the python lib path."""
python_lib = repository_ctx.os.environ.get(lib_path_key)
if python_lib != None:
return python_lib
print_lib = (
"<<END\n" + "from __future__ import print_function\n" +
"import site\n" + "import os\n" + "\n" + "try:\n" +
" input = raw_input\n" + "except NameError:\n" + " pass\n" + "\n" +
"python_paths = []\n" + "if os.getenv('PYTHONPATH') is not None:\n" +
" python_paths = os.getenv('PYTHONPATH').split(':')\n" + "try:\n" +
" library_paths = site.getsitepackages()\n" +
"except AttributeError:\n" +
" from distutils.sysconfig import get_python_lib\n" +
" library_paths = [get_python_lib()]\n" +
"all_paths = set(python_paths + library_paths)\n" + "paths = []\n" +
"for path in all_paths:\n" + " if os.path.isdir(path):\n" +
" paths.append(path)\n" + "if len(paths) >=1:\n" +
" print(paths[0])\n" + "END"
)
cmd = "%s - %s" % (python_bin, print_lib)
result = repository_ctx.execute([_get_bash_bin(repository_ctx), "-c", cmd])
return result.stdout.strip("\n")
def _check_python_lib(repository_ctx, python_lib):
"""Checks the python lib path."""
cmd = 'test -d "%s" -a -x "%s"' % (python_lib, python_lib)
result = repository_ctx.execute([_get_bash_bin(repository_ctx), "-c", cmd])
if result.return_code == 1:
_fail("Invalid python library path: %s" % python_lib)
def _check_python_bin(repository_ctx, python_bin, bin_path_key):
"""Checks the python bin path."""
cmd = '[[ -x "%s" ]] && [[ ! -d "%s" ]]' % (python_bin, python_bin)
result = repository_ctx.execute([_get_bash_bin(repository_ctx), "-c", cmd])
if result.return_code == 1:
_fail("--define %s='%s' is not executable. Is it the python binary?" %
(bin_path_key, python_bin))
def _get_python_include(repository_ctx, python_bin):
"""Gets the python include path."""
result = _execute(
repository_ctx,
[
python_bin,
"-c",
"from __future__ import print_function;" +
"from distutils import sysconfig;" +
"print(sysconfig.get_python_inc())",
],
error_msg = "Problem getting python include path for {}.".format(python_bin),
error_details = (
"Is the Python binary path set up right? " + "(See " +
python_bin + ".) " + "Is distutils installed? " +
_HEADERS_HELP
),
)
include_path = result.stdout.splitlines()[0]
_execute(
repository_ctx,
[
python_bin,
"-c",
"import os;" +
"main_header = os.path.join('{}', 'Python.h');".format(include_path) +
"assert os.path.exists(main_header), main_header + ' does not exist.'",
],
error_msg = "Unable to find Python headers for {}".format(python_bin),
error_details = _HEADERS_HELP,
empty_stdout_fine = True,
)
return include_path
def _create_single_version_package(
repository_ctx,
variety_name,
bin_path_key,
default_bin_path,
lib_path_key):
"""Creates the repository containing files set up to build with Python."""
python_bin = _get_python_bin(repository_ctx, bin_path_key, default_bin_path)
_check_python_bin(repository_ctx, python_bin, bin_path_key)
python_lib = _get_python_lib(repository_ctx, python_bin, lib_path_key)
_check_python_lib(repository_ctx, python_lib)
python_include = _get_python_include(repository_ctx, python_bin)
python_include_rule = _symlink_genrule_for_dir(
repository_ctx,
python_include,
"{}_include".format(variety_name),
"{}_include".format(variety_name),
)
python_solib = basename(python_include)
_tpl(
repository_ctx,
"variety",
{
"%{PYTHON_INCLUDE_GENRULE}": python_include_rule,
"%{VARIETY_NAME}": variety_name,
"%{PYTHON_SO_NAME}": python_solib,
},
out = "{}/BUILD".format(variety_name),
)
def _python_autoconf_impl(repository_ctx):
"""Implementation of the python_autoconf repository rule."""
_create_single_version_package(
repository_ctx,
"_python3",
_PYTHON3_BIN_PATH,
"python3",
_PYTHON3_LIB_PATH,
)
_tpl(repository_ctx, "BUILD")
python_configure = repository_rule(
implementation = _python_autoconf_impl,
environ = [
_BAZEL_SH,
_PYTHON3_BIN_PATH,
_PYTHON3_LIB_PATH,
],
attrs = {
"_build_tpl": attr.label(
default = Label("//third_party/py:BUILD.tpl"),
allow_single_file = True,
),
"_variety_tpl": attr.label(
default = Label("//third_party/py:variety.tpl"),
allow_single_file = True,
),
},
)
"""Detects and configures the local Python.
It is expected that the system have both a working Python 3
installation
Add the following to your WORKSPACE FILE:
```python
python_configure(name = "local_config_python")
```
Args:
name: A unique name for this workspace rule.
"""
| 35.12381 | 85 | 0.611714 |
d0faf977fe1fad080be5eedd70ebc62247064411 | 4,541 | py | Python | xtlib/cache_client.py | microsoft/ExperimentTools | ea9c15899dc787d8fbaf87dc4e9849ec4b28aadc | [
"MIT"
] | 5 | 2020-06-13T17:44:51.000Z | 2021-12-21T21:02:36.000Z | xtlib/cache_client.py | microsoft/ExperimentTools | ea9c15899dc787d8fbaf87dc4e9849ec4b28aadc | [
"MIT"
] | 9 | 2020-06-11T20:56:52.000Z | 2022-03-12T00:34:45.000Z | xtlib/cache_client.py | microsoft/ExperimentTools | ea9c15899dc787d8fbaf87dc4e9849ec4b28aadc | [
"MIT"
] | 7 | 2020-06-13T17:44:54.000Z | 2021-12-21T21:02:52.000Z | #
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
#
# cache_client.py: handles the caching of credehntials for the XT client
import os
import ssl
import sys
import time
import json
import socket
import logging
from xtlib import utils
from xtlib import console
from xtlib import pc_utils
from xtlib import constants
from xtlib import file_utils
logger = logging.getLogger(__name__)
HOST = '127.0.0.1' # localhost
CACHE_SERVER_PORT = 65421
#FN_CERT = os.path.expanduser(constants.FN_XT_CERT)
class CacheClient():
def __init__(self):
self.use_ssl = False
def get_creds(self, team_name):
cmd_dict = {"get_creds": True, "team_name": team_name}
response = self._send_cmd_to_cache_server(cmd_dict, max_retries=1, can_start_server=False)
return response
def store_creds(self, team_name, creds):
cmd_dict = {"set_creds": creds, "team_name": team_name}
response =self._send_cmd_to_cache_server(cmd_dict, max_retries=5, can_start_server=True)
return response
def terminate_server(self):
cmd_dict = {"terminate": True}
response =self._send_cmd_to_cache_server(cmd_dict, max_retries=1, can_start_server=False)
return response
def _send_cmd_to_cache_server(self, cmd_dict, max_retries, can_start_server):
# retry up to 5 secs (to handle case where XT cache server is being started)
if True: # os.path.exists(FN_CERT):
# context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, capath=FN_CERT)
# context.set_ciphers('EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH')
for i in range(max_retries):
try:
byte_buffer = json.dumps(cmd_dict).encode()
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as normal_sock:
if self.use_ssl:
sock = context.wrap_socket(normal_sock, server_hostname=HOST, ca_certs="server.crt",
cert_reqs=ssl.CERT_REQUIRED)
else:
sock = normal_sock
sock.connect((HOST, CACHE_SERVER_PORT))
# send cmd_dict as bytes
sock.sendall(byte_buffer)
# read response
data = sock.recv(16000)
response = data.decode()
return response
except BaseException as ex:
if i == 0 and can_start_server:
# first try failed; try starting the server
self._start_xt_cache_server()
if i > 0:
# we are retrying some error after trying to start the server
console.print(".", end="", flush=True)
#console.print(ex)
time.sleep(1)
# don't log this since it shows up to user as a confusing message
# if i == max_retries-1:
# logger.exception("Error retry exceeded sending cmd to XT cache server. Last ex={}".format(ex))
return None
def _start_xt_cache_server(self):
import subprocess
DETACHED_PROCESS = 0x00000008
CREATE_NO_WINDOW = 0x08000000
# launch in visible window for debugging
MAKE_SERVER_VISIBLE = False
xtlib_dir = os.path.dirname(__file__)
fn_script = "{}/cache_server.py".format(xtlib_dir)
fn_log = os.path.expanduser("~/.xt/tmp/cache_server.log")
file_utils.ensure_dir_exists(file=fn_log)
if MAKE_SERVER_VISIBLE:
#subprocess.Popen(parts, cwd=".", creationflags=DETACHED_PROCESS)
cmd = "start python " + fn_script
os.system(cmd)
elif pc_utils.is_windows():
# run detached, hidden for WINDOWS
parts = ["cmd", "/c", "python", fn_script]
flags = CREATE_NO_WINDOW
with open(fn_log, 'w') as output:
subprocess.Popen(parts, cwd=".", creationflags=flags, stdout=output, stderr=subprocess.STDOUT)
else:
# run detached, hidden for LINUX
parts = ["python", fn_script]
with open(fn_log, 'w') as output:
subprocess.Popen(parts, cwd=".", stdout=output, stderr=subprocess.STDOUT)
# give it time to start-up and receive commands
time.sleep(2)
| 35.476563 | 121 | 0.595464 |
581fba0b855e09ef3d8d6350d1f2abf810ed3f0e | 457 | py | Python | drf_email_project/users/migrations/0009_emailuser_avatar.py | liquanhui01/drf_email_user | 188ecc3e36edb190cc1cd8f908d3b72c42d49301 | [
"MIT"
] | null | null | null | drf_email_project/users/migrations/0009_emailuser_avatar.py | liquanhui01/drf_email_user | 188ecc3e36edb190cc1cd8f908d3b72c42d49301 | [
"MIT"
] | null | null | null | drf_email_project/users/migrations/0009_emailuser_avatar.py | liquanhui01/drf_email_user | 188ecc3e36edb190cc1cd8f908d3b72c42d49301 | [
"MIT"
] | 1 | 2021-11-15T08:23:13.000Z | 2021-11-15T08:23:13.000Z | # Generated by Django 3.1.4 on 2020-12-04 09:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0008_auto_20201204_1704'),
]
operations = [
migrations.AddField(
model_name='emailuser',
name='avatar',
field=models.ImageField(blank=True, default='', null=True, upload_to='avatar/', verbose_name="user's avater"),
),
]
| 24.052632 | 122 | 0.61488 |
ce4007f22385e52ddce2407a51d63c1871e88bcc | 6,340 | py | Python | tests/profiler_test.py | sudhakarsingh27/jax | 5afb5a6e5b1ddf29b55bdd30442022190e25529c | [
"Apache-2.0"
] | null | null | null | tests/profiler_test.py | sudhakarsingh27/jax | 5afb5a6e5b1ddf29b55bdd30442022190e25529c | [
"Apache-2.0"
] | 6 | 2022-01-03T23:12:33.000Z | 2022-02-14T23:13:52.000Z | tests/profiler_test.py | sudhakarsingh27/jax | 5afb5a6e5b1ddf29b55bdd30442022190e25529c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import glob
import os
import shutil
import tempfile
import threading
import unittest
from absl.testing import absltest
import jax
import jax.numpy as jnp
import jax.profiler
from jax.config import config
import jax._src.test_util as jtu
try:
import portpicker
except ImportError:
portpicker = None
try:
from tensorflow.python.profiler import profiler_client
from tensorflow.python.profiler import profiler_v2 as tf_profiler
except ImportError:
profiler_client = None
tf_profiler = None
config.parse_flags_with_absl()
class ProfilerTest(unittest.TestCase):
# These tests simply test that the profiler API does not crash; they do not
# check functional correctness.
def setUp(self):
super().setUp()
self.worker_start = threading.Event()
self.profile_done = False
@unittest.skipIf(not portpicker, "Test requires portpicker")
def testStartServer(self):
port = portpicker.pick_unused_port()
jax.profiler.start_server(port=port)
del port
def testProgrammaticProfiling(self):
with tempfile.TemporaryDirectory() as tmpdir:
try:
jax.profiler.start_trace(tmpdir)
jax.pmap(lambda x: jax.lax.psum(x + 1, 'i'), axis_name='i')(
jnp.ones(jax.local_device_count()))
finally:
jax.profiler.stop_trace()
proto_path = glob.glob(os.path.join(tmpdir, "**/*.xplane.pb"),
recursive=True)
self.assertEqual(len(proto_path), 1)
with open(proto_path[0], "rb") as f:
proto = f.read()
# Sanity check that serialized proto contains host, device, and
# Python traces without deserializing.
self.assertIn(b"/host:CPU", proto)
if jtu.device_under_test() == "tpu":
self.assertIn(b"/device:TPU", proto)
self.assertIn(b"pxla.py", proto)
def testProgrammaticProfilingErrors(self):
with self.assertRaisesRegex(RuntimeError, "No profile started"):
jax.profiler.stop_trace()
try:
with tempfile.TemporaryDirectory() as tmpdir:
jax.profiler.start_trace(tmpdir)
with self.assertRaisesRegex(
RuntimeError,
"Profile has already been started. Only one profile may be run at a "
"time."):
jax.profiler.start_trace(tmpdir)
finally:
jax.profiler.stop_trace()
def testProgrammaticProfilingContextManager(self):
with tempfile.TemporaryDirectory() as tmpdir:
with jax.profiler.trace(tmpdir):
jax.pmap(lambda x: jax.lax.psum(x + 1, 'i'), axis_name='i')(
jnp.ones(jax.local_device_count()))
proto_path = glob.glob(os.path.join(tmpdir, "**/*.xplane.pb"),
recursive=True)
self.assertEqual(len(proto_path), 1)
with open(proto_path[0], "rb") as f:
proto = f.read()
# Sanity check that serialized proto contains host and device traces
# without deserializing.
self.assertIn(b"/host:CPU", proto)
if jtu.device_under_test() == "tpu":
self.assertIn(b"/device:TPU", proto)
def testTraceAnnotation(self):
x = 3
with jax.profiler.TraceAnnotation("mycontext"):
x = x + 2
def testTraceFunction(self):
@jax.profiler.annotate_function
def f(x, *, y):
return x + 2 * y
self.assertEqual(f(7, y=3), 13)
@jax.profiler.annotate_function
def f(x, *, name):
return x + 2 * len(name)
self.assertEqual(f(7, name="abc"), 13)
@partial(jax.profiler.annotate_function, name="aname")
def g(x):
return x + 2
self.assertEqual(g(7), 9)
@partial(jax.profiler.annotate_function, name="aname", akwarg="hello")
def h(x):
return x + 2
self.assertEqual(h(7), 9)
def testDeviceMemoryProfile(self):
x = jnp.ones((20,)) + 7.
self.assertIsInstance(jax.profiler.device_memory_profile(), bytes)
del x
def _check_xspace_pb_exist(self, logdir):
path = os.path.join(logdir, 'plugins', 'profile', '*', '*.xplane.pb')
self.assertEqual(1, len(glob.glob(path)),
'Expected one path match: ' + path)
@unittest.skip("Test causes OOMs")
@unittest.skipIf(not (portpicker and profiler_client and tf_profiler),
"Test requires tensorflow.profiler and portpicker")
def testSingleWorkerSamplingMode(self, delay_ms=None):
def on_worker(port, worker_start):
# Must keep return value `server` around.
server = jax.profiler.start_server(port) # noqa: F841
worker_start.set()
x = jnp.ones((1000, 1000))
while True:
with jax.profiler.TraceAnnotation("atraceannotation"):
jnp.dot(x, x.T).block_until_ready()
if self.profile_done:
break
def on_profile(port, logdir, worker_start):
worker_start.wait()
options = tf_profiler.ProfilerOptions(
host_tracer_level=2,
python_tracer_level=2,
device_tracer_level=1,
delay_ms=delay_ms,
)
# Request for 1000 milliseconds of profile.
duration_ms = 1000
profiler_client.trace(f'localhost:{port}', logdir, duration_ms,
'', 1000, options)
self.profile_done = True
logdir = absltest.get_default_test_tmpdir()
# Remove any existing log files.
shutil.rmtree(logdir, ignore_errors=True)
port = portpicker.pick_unused_port()
thread_profiler = threading.Thread(
target=on_profile, args=(port, logdir, self.worker_start))
thread_worker = threading.Thread(
target=on_worker, args=(port, self.worker_start))
thread_worker.start()
thread_profiler.start()
thread_profiler.join()
thread_worker.join(120)
self._check_xspace_pb_exist(logdir)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| 32.512821 | 79 | 0.673817 |
4c967f73676f63ec6a0992961018f7aa503fe8ca | 905 | py | Python | tests/test_http_input.py | giannisdoukas/cwltool | 5a29b0742b8387f1ce8fc11e9b408a3b636432ee | [
"Apache-2.0"
] | null | null | null | tests/test_http_input.py | giannisdoukas/cwltool | 5a29b0742b8387f1ce8fc11e9b408a3b636432ee | [
"Apache-2.0"
] | null | null | null | tests/test_http_input.py | giannisdoukas/cwltool | 5a29b0742b8387f1ce8fc11e9b408a3b636432ee | [
"Apache-2.0"
] | null | null | null | import os
import tempfile
from typing import List
from cwltool.pathmapper import PathMapper
from cwltool.utils import CWLObjectType
def test_http_path_mapping() -> None:
input_file_path = "https://raw.githubusercontent.com/common-workflow-language/cwltool/main/tests/2.fasta"
tempdir = tempfile.mkdtemp()
base_file = [
{
"class": "File",
"location": "https://raw.githubusercontent.com/common-workflow-language/cwltool/main/tests/2.fasta",
"basename": "chr20.fa",
}
] # type: List[CWLObjectType]
pathmap = PathMapper(base_file, os.getcwd(), tempdir)._pathmap
assert input_file_path in pathmap
assert os.path.exists(pathmap[input_file_path].resolved)
with open(pathmap[input_file_path].resolved) as file:
contents = file.read()
assert ">Sequence 561 BP; 135 A; 106 C; 98 G; 222 T; 0 other;" in contents
| 31.206897 | 112 | 0.688398 |
d03bbe026817cef36499c0a77050247c7de5fcd6 | 2,770 | py | Python | tests/test_boxes.py | lycantropos/hypothesis_geometry | 23e1638144ffba089eee21eb623b0499713e0b1c | [
"MIT"
] | 9 | 2020-01-16T13:52:16.000Z | 2022-03-16T00:01:26.000Z | tests/test_boxes.py | lycantropos/hypothesis_geometry | 23e1638144ffba089eee21eb623b0499713e0b1c | [
"MIT"
] | 38 | 2020-01-16T12:08:51.000Z | 2021-01-11T11:06:32.000Z | tests/test_boxes.py | lycantropos/hypothesis_geometry | 23e1638144ffba089eee21eb623b0499713e0b1c | [
"MIT"
] | 1 | 2020-03-12T10:29:44.000Z | 2020-03-12T10:29:44.000Z | from typing import Tuple
from ground.hints import Scalar
from hypothesis import given
from hypothesis.strategies import DataObject
from hypothesis_geometry.hints import Strategy
from hypothesis_geometry.planar import boxes
from tests import strategies
from tests.utils import (ScalarsLimitsType,
box_has_coordinates_in_range,
box_has_coordinates_types,
is_box)
@given(strategies.scalars_strategies)
def test_basic(coordinates: Strategy[Scalar]) -> None:
result = boxes(coordinates)
assert isinstance(result, Strategy)
@given(strategies.data,
strategies.scalars_strategy_with_limit_and_type_pairs)
def test_properties(data: DataObject,
coordinates_limits_type_pair: Tuple[ScalarsLimitsType,
ScalarsLimitsType]
) -> None:
(x_coordinates_limits_type,
y_coordinates_limits_type) = coordinates_limits_type_pair
((x_coordinates, (min_x_value, max_x_value)),
x_type) = x_coordinates_limits_type
((y_coordinates, (min_y_value, max_y_value)),
y_type) = y_coordinates_limits_type
strategy = boxes(x_coordinates, y_coordinates)
result = data.draw(strategy)
assert is_box(result)
assert box_has_coordinates_types(result,
x_type=x_type,
y_type=y_type)
assert box_has_coordinates_in_range(result,
min_x_value=min_x_value,
max_x_value=max_x_value,
min_y_value=min_y_value,
max_y_value=max_y_value)
assert result.min_x < result.max_x
assert result.min_y < result.max_y
@given(strategies.data,
strategies.scalars_strategies_with_limits_and_types)
def test_same_coordinates(data: DataObject,
coordinates_limits_type: ScalarsLimitsType
) -> None:
(coordinates, (min_value, max_value)), type_ = coordinates_limits_type
strategy = boxes(coordinates)
result = data.draw(strategy)
assert is_box(result)
assert box_has_coordinates_types(result,
x_type=type_,
y_type=type_)
assert box_has_coordinates_in_range(result,
min_x_value=min_value,
max_x_value=max_value,
min_y_value=min_value,
max_y_value=max_value)
assert result.min_x < result.max_x
assert result.min_y < result.max_y
| 36.933333 | 74 | 0.598556 |
d984984f11834466ffad31177ad657fbbf1f3ca6 | 8,012 | py | Python | test/functional/wallet_backup.py | barrystyle/nyc3 | 43a15d192e23602d2d5d97d458efbc1cb7a4da7d | [
"MIT"
] | 1 | 2019-06-06T22:44:39.000Z | 2019-06-06T22:44:39.000Z | test/functional/wallet_backup.py | barrystyle/nyc3 | 43a15d192e23602d2d5d97d458efbc1cb7a4da7d | [
"MIT"
] | null | null | null | test/functional/wallet_backup.py | barrystyle/nyc3 | 43a15d192e23602d2d5d97d458efbc1cb7a4da7d | [
"MIT"
] | 3 | 2019-06-05T22:50:07.000Z | 2021-04-19T22:59:55.000Z | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The NYC3 Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet backup features.
Test case is:
4 nodes. 1 2 and 3 send transactions between each other,
fourth node is a miner.
1 2 3 each mine a block to start, then
Miner creates 100 blocks so 1 2 3 each have 50 mature
coins to spend.
Then 5 iterations of 1/2/3 sending coins amongst
themselves to get transactions in the wallets,
and the miner mining one block.
Wallets are backed up using dumpwallet/backupwallet.
Then 5 more iterations of transactions and mining a block.
Miner then generates 101 more blocks, so any
transaction fees paid mature.
Sanity check:
Sum(1,2,3,4 balances) == 114*50
1/2/3 are shutdown, and their wallets erased.
Then restore using wallet.dat backup. And
confirm 1/2/3/4 balances are same as before.
Shutdown again, restore using importwallet,
and confirm again balances are correct.
"""
from decimal import Decimal
import os
from random import randint
import shutil
from test_framework.test_framework import NYC3TestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, connect_nodes, sync_blocks, sync_mempools
class WalletBackupTest(NYC3TestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# nodes 1, 2,3 are spenders, let's give them a keypool=100
self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
self.sync_all()
def one_send(self, from_node, to_address):
if (randint(1,2) == 1):
amount = Decimal(randint(1,10)) / Decimal(10)
self.nodes[from_node].sendtoaddress(to_address, amount)
def do_one_round(self):
a0 = self.nodes[0].getnewaddress()
a1 = self.nodes[1].getnewaddress()
a2 = self.nodes[2].getnewaddress()
self.one_send(0, a1)
self.one_send(0, a2)
self.one_send(1, a0)
self.one_send(1, a2)
self.one_send(2, a0)
self.one_send(2, a1)
# Have the miner (node3) mine a block.
# Must sync mempools before mining.
sync_mempools(self.nodes)
self.nodes[3].generate(1)
sync_blocks(self.nodes)
# As above, this mirrors the original bash test.
def start_three(self):
self.start_node(0)
self.start_node(1)
self.start_node(2)
connect_nodes(self.nodes[0], 3)
connect_nodes(self.nodes[1], 3)
connect_nodes(self.nodes[2], 3)
connect_nodes(self.nodes[2], 0)
def stop_three(self):
self.stop_node(0)
self.stop_node(1)
self.stop_node(2)
def erase_three(self):
os.remove(os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'))
os.remove(os.path.join(self.nodes[1].datadir, 'regtest', 'wallets', 'wallet.dat'))
os.remove(os.path.join(self.nodes[2].datadir, 'regtest', 'wallets', 'wallet.dat'))
def run_test(self):
self.log.info("Generating initial blockchain")
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
self.nodes[2].generate(1)
sync_blocks(self.nodes)
self.nodes[3].generate(100)
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 50)
assert_equal(self.nodes[3].getbalance(), 0)
self.log.info("Creating transactions")
# Five rounds of sending each other transactions.
for i in range(5):
self.do_one_round()
self.log.info("Backing up")
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
self.nodes[0].dumpwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].backupwallet(os.path.join(self.nodes[1].datadir, 'wallet.bak'))
self.nodes[1].dumpwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].backupwallet(os.path.join(self.nodes[2].datadir, 'wallet.bak'))
self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.log.info("More transactions")
for i in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
self.nodes[3].generate(101)
self.sync_all()
balance0 = self.nodes[0].getbalance()
balance1 = self.nodes[1].getbalance()
balance2 = self.nodes[2].getbalance()
balance3 = self.nodes[3].getbalance()
total = balance0 + balance1 + balance2 + balance3
# At this point, there are 214 blocks (103 for setup, then 10 rounds, then 101.)
# 114 are mature, so the sum of all wallets should be 114 * 50 = 5700.
assert_equal(total, 5700)
##
# Test restoring spender wallets from backups
##
self.log.info("Restoring using wallet.dat")
self.stop_three()
self.erase_three()
# Start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'chainstate'))
# Restore wallets from backup
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'))
shutil.copyfile(os.path.join(self.nodes[1].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, 'regtest', 'wallets', 'wallet.dat'))
shutil.copyfile(os.path.join(self.nodes[2].datadir, 'wallet.bak'), os.path.join(self.nodes[2].datadir, 'regtest', 'wallets', 'wallet.dat'))
self.log.info("Re-starting nodes")
self.start_three()
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
self.log.info("Restoring using dumped wallet")
self.stop_three()
self.erase_three()
#start node2 with no chain
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'blocks'))
shutil.rmtree(os.path.join(self.nodes[2].datadir, 'regtest', 'chainstate'))
self.start_three()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
self.nodes[0].importwallet(os.path.join(self.nodes[0].datadir, 'wallet.dump'))
self.nodes[1].importwallet(os.path.join(self.nodes[1].datadir, 'wallet.dump'))
self.nodes[2].importwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
sync_blocks(self.nodes)
assert_equal(self.nodes[0].getbalance(), balance0)
assert_equal(self.nodes[1].getbalance(), balance1)
assert_equal(self.nodes[2].getbalance(), balance2)
# Backup to source wallet file must fail
sourcePaths = [
os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', 'wallet.dat'),
os.path.join(self.nodes[0].datadir, 'regtest', '.', 'wallets', 'wallet.dat'),
os.path.join(self.nodes[0].datadir, 'regtest', 'wallets', ''),
os.path.join(self.nodes[0].datadir, 'regtest', 'wallets')]
for sourcePath in sourcePaths:
assert_raises_rpc_error(-4, "backup failed", self.nodes[0].backupwallet, sourcePath)
if __name__ == '__main__':
WalletBackupTest().main()
| 37.971564 | 147 | 0.651023 |
f7522d2ca321ae026aceeb862d016255fdc67539 | 684 | py | Python | integrations/airflow/marquez_airflow/__init__.py | ravwojdyla/marquez | 8636604e5dcffe1832ea39d9b72e9bbab7b272db | [
"Apache-2.0"
] | null | null | null | integrations/airflow/marquez_airflow/__init__.py | ravwojdyla/marquez | 8636604e5dcffe1832ea39d9b72e9bbab7b272db | [
"Apache-2.0"
] | null | null | null | integrations/airflow/marquez_airflow/__init__.py | ravwojdyla/marquez | 8636604e5dcffe1832ea39d9b72e9bbab7b272db | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
__author__ = """Marquez Project"""
__version__ = "0.13.1"
__all__ = ["DAG"]
from marquez_airflow.dag import DAG
| 34.2 | 74 | 0.739766 |
d77a8995e54efd87653ce25836b10892a2015f2b | 674 | py | Python | restaurant_project/manage.py | lukart80/restaurant | 419786cd87a7bd15c82b2fda8ad7c5e3e1f6c9cd | [
"MIT"
] | null | null | null | restaurant_project/manage.py | lukart80/restaurant | 419786cd87a7bd15c82b2fda8ad7c5e3e1f6c9cd | [
"MIT"
] | null | null | null | restaurant_project/manage.py | lukart80/restaurant | 419786cd87a7bd15c82b2fda8ad7c5e3e1f6c9cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'restaurant_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.304348 | 82 | 0.683976 |
7719d55eab9700c1a48e9c68b7305e7ea92336b5 | 12,492 | py | Python | HDPython/ast/AST_member_function_converter.py | HardwareDesignWithPython/HDPython | aade03aaa092b1684fa12bffd17674cf1c45f5ac | [
"MIT"
] | null | null | null | HDPython/ast/AST_member_function_converter.py | HardwareDesignWithPython/HDPython | aade03aaa092b1684fa12bffd17674cf1c45f5ac | [
"MIT"
] | null | null | null | HDPython/ast/AST_member_function_converter.py | HardwareDesignWithPython/HDPython | aade03aaa092b1684fa12bffd17674cf1c45f5ac | [
"MIT"
] | 1 | 2021-10-20T20:08:16.000Z | 2021-10-20T20:08:16.000Z | import functools
import copy
import HDPython.hdl_converter as hdl
from HDPython.base import print_cnvt, gTemplateIndent, v_deepcopy
from HDPython.lib_enums import getDefaultVarSig ,setDefaultVarSig, varSig, InOut_t
from HDPython.base_helpers import join_str
from HDPython.v_function import v_procedure, v_function ,v_Arch
from HDPython.global_settings import sort_archetecture
from HDPython.ast.AST_MemFunctionCalls import memFunctionCall, call_func, checkIfFunctionexists, hasMissingSymbol, get_function_varSig_suffix, GetNewArgList
from HDPython.ast.ast_hdl_error import HDPython_error,Hanlde_errors
def get_function_definition(b_list, name):
ret = []
for x in b_list:
if x.name == name:
ret.append(x)
return ret
class AST_member_function_converter:
def __init__(self,ClassInstance, astParser ,parent ):
self.ClassInstance = ClassInstance
self.ClassName = type(ClassInstance).__name__
self.astParser = astParser
self.class_Node = astParser.getClassByName(self.ClassName )
self.parent = parent
def sourceFileName(self):
return self.astParser.sourceFileName
def lineno(self):
return self.class_Node.lineno
def col_offset(self):
return self.class_Node.col_offset
def Name(self):
return self.ClassName
def get_architecture_definition(self):
for f in self.class_Node.body:
if f.name in self.astParser.functionNameVetoList:
continue
if f.name in self.ClassInstance.__hdl_converter__.functionNameVetoList:
continue
if not (f.decorator_list and f.decorator_list[0].id == 'architecture') :
continue
if not (f.name not in [x["name"] for x in self.ClassInstance.__hdl_converter__.archetecture_list ]):
continue
yield f
@Hanlde_errors("error while processing class architecture")
def Extract_architecture(self):
for arch in self.get_architecture_definition():
self.extractArchetectureForClass(arch)
def get_architecture_vars(self):
if sort_archetecture():
return sorted(self.ClassInstance.__local_symbols__, key=lambda element_: element_["type_name"])
return self.ClassInstance.__local_symbols__
def unfold_architecture(self,Arc):
try:
body = self.astParser.Unfold_body(Arc) ## get local vars
return body
except Exception as inst:
err_msg = HDPython_error(
self.astParser.sourceFileName,
Arc.lineno,
Arc.col_offset,
type(self.ClassInstance).__name__,
"FileName: " + Arc.name +", Unable to Unfold AST, Error In extractArchetectureForClass: body = self.Unfold_body(Arc)"
)
raise Exception(err_msg,HDPython_error,inst)
def extractArchetectureForClass(self,Arc):
ClassInstance = self.ClassInstance
ret = None
primary = ClassInstance.__hdl_converter__.get_primary_object(ClassInstance)
ClassInstance.__hdl_converter__ = primary.__hdl_converter__
ClassInstance = copy.deepcopy(ClassInstance)
self.astParser.reset_buffers()
self.astParser.FuncArgs.append({
"name":"self",
"symbol": ClassInstance,
"ScopeType": InOut_t.InOut_tt
})
self.astParser.local_function = ClassInstance.__init__.__globals__
ClassInstance.__hdl_name__ = "!!SELF!!"
self.astParser.Archetecture_vars = self.get_architecture_vars()
body=self.unfold_architecture(Arc)
if self.astParser.Missing_template:
hdl.FlagFor_TemplateMissing( ClassInstance )
else:
ret = v_Arch(
body=body,
Symbols=self.astParser.LocalVar,
Arch_vars=self.astParser.Archetecture_vars,
ports=ClassInstance.getMember()
)
self.ClassInstance.__hdl_converter__.archetecture_list.append({
"name" : Arc.name,
"symbol" : ret
})
self.astParser.reset_buffers()
def get_function_definitions(self):
for f in self.class_Node.body:
if f.name in self.astParser.functionNameVetoList:
continue
if f.name in self.ClassInstance.__hdl_converter__.functionNameVetoList:
continue
if f.decorator_list and f.decorator_list[0].id == 'architecture' :
continue
yield f
@Hanlde_errors("error while processing templates")
def __request_function_with_default_arguments__(self):
for function_node in self.get_function_definitions():
self.ClassInstance.set_vhdl_name ( "self",True)
Arglist = []
Arglist.append({
"name":"self",
"symbol": v_deepcopy(self.ClassInstance),
"ScopeType": InOut_t.InOut_tt
})
Arglist[-1]["symbol"]._Inout = InOut_t.InOut_tt
Arglist += list(self.astParser.get_func_args_list(function_node))
exist = checkIfFunctionexists(self.ClassInstance,function_node.name , Arglist)
if exist:
continue
print_cnvt(str(gTemplateIndent) +'<request_new_template name="'+ str(function_node.name)+'"/>' )
self.ClassInstance.__hdl_converter__.MemfunctionCalls.append(
memFunctionCall(
name= function_node.name,
args= [x["symbol"] for x in Arglist],
obj= self.ClassInstance,
call_func = None,
func_args = None,
setDefault = True,
varSigIndependent = False
))
@Hanlde_errors("error while creating function from template")
def extractFunctionsForClass2(self):
fun_ret = []
for temp in self.ClassInstance.__hdl_converter__.MemfunctionCalls:
if temp.call_func is not None:
continue
f,newArglist = self.get_arglistlocal_extractFunctionsForClass2(temp)
if newArglist is None:
continue
ArglistLocal_length = len(newArglist)
self.astParser.Missing_template = False
ret = self.extractFunctionsForClass_impl(
f[0],
newArglist ,
temp.setDefault ,
temp
)
if self.astParser.Missing_template:
self.ClassInstance.__hdl_converter__.MissingTemplate = True
continue
temp.call_func = call_func
temp.func_args = newArglist[0: ArglistLocal_length] #deepcopy
if ret:
fun_ret.append( ret )
return fun_ret
def get_arglistlocal_extractFunctionsForClass2(self, temp):
ArglistLocal = []
ArglistLocal.append({
"name":"self",
"symbol": v_deepcopy(self.ClassInstance),
"ScopeType": InOut_t.InOut_tt
})
f = get_function_definition(self.class_Node.body, temp.name)
if len(f) == 0:
raise Exception(
"unable to find function template: ",
temp["name"],
self.ClassInstance
)
ArglistLocal += list(self.astParser.get_func_args_list(f[0]))
newArglist = GetNewArgList(
f[0].name,
ArglistLocal,
temp
)
return f,newArglist
def extractFunctionsForClass_impl(self, funcDef, FuncArgs , setDefault = False , MemFunction_template= None ):
if hasMissingSymbol(FuncArgs):
return None
self.astParser.push_scope("function")
self.astParser.reset_buffers()
self.astParser.parent = self.parent
self.astParser.FuncArgs = FuncArgs
FuncArgsLocal = copy.copy(FuncArgs)
varSigSuffix = get_function_varSig_suffix(self.astParser.FuncArgs)
self.astParser.local_function = self.ClassInstance.__init__.__globals__
body = self.unfold_body(funcDef)
bodystr= self.convert_to_string(body)
argList = [
hdl.to_arglist(
x["symbol"],
x['name'],
type(self.ClassInstance).__name__,
withDefault = setDefault and (x["name"] != "self"),
astParser=self.astParser
)
for x in FuncArgsLocal
]
ArglistProcedure = join_str(argList,Delimeter="; ")
ret = self.make_function_or_procedure(funcDef.name, body.get_type(), bodystr,FuncArgsLocal, ArglistProcedure, varSigSuffix)
if body.get_type() is not None:
MemFunction_template.varSigIndependent = True
self.astParser.pop_scope()
return ret
def unfold_body(self,Function_node):
dummy_DefaultVarSig = getDefaultVarSig()
setDefaultVarSig(varSig.variable_t)
try:
body = self.astParser.Unfold_body(Function_node)
return body
except Exception as inst:
err_msg = HDPython_error(
self.astParser.sourceFileName,
Function_node.lineno,
Function_node.col_offset,
type(self.ClassInstance).__name__,
"Function Name: " + Function_node.name +", Unable to Unfold AST. Error In extractFunctionsForClass_impl: body = self.Unfold_body(funcDef)"
)
raise Exception(err_msg,ClassInstance,inst)
finally:
setDefaultVarSig(dummy_DefaultVarSig)
def convert_to_string(self, body):
dummy_DefaultVarSig = getDefaultVarSig()
setDefaultVarSig(varSig.variable_t)
try:
bodystr= str(body)
return bodystr
except Exception as inst:
err_msg = HDPython_error(
self.astParser.sourceFileName,
funcDef.lineno,
funcDef.col_offset,
type(ClassInstance).__name__,
"Function Name: " + funcDef.name +", Unable to Convert AST to String, Error In extractFunctionsForClass_impl: bodystr= str(body)"
)
raise Exception(err_msg,ClassInstance,inst)
finally:
setDefaultVarSig(dummy_DefaultVarSig)
def make_function_or_procedure(self,functionName, returnType , bodystr, FuncArgsLocal,ArglistProcedure, varSigSuffix):
actual_function_name = hdl.function_name_modifier(self.ClassInstance, functionName, varSigSuffix)
if returnType is not None:
ArglistProcedure = ArglistProcedure.replace(" in "," ").replace(" out "," ").replace(" inout "," ")
ret = v_function(
name=actual_function_name,
body=bodystr,
VariableList=self.astParser.get_local_var_def(),
returnType=returnType,
argumentList=ArglistProcedure,
isFreeFunction=True
)
return ret
ret = v_procedure(
name=actual_function_name,
body=bodystr,
VariableList=self.astParser.get_local_var_def(),
argumentList=ArglistProcedure,
isFreeFunction=True
)
return ret
def get_functions(self):
primary = self.ClassInstance.__hdl_converter__.get_primary_object(self.ClassInstance)
self.ClassInstance.__hdl_converter__ = primary.__hdl_converter__
self.ClassInstance.__hdl_converter__.MissingTemplate = False
print_cnvt(str(gTemplateIndent) +'<processing name="' + str(self.ClassName ) +'" MemfunctionCalls="' +str(len(self.ClassInstance.__hdl_converter__.MemfunctionCalls)) +'">')
self.Extract_architecture()
self.__request_function_with_default_arguments__()
fun_ret = self.extractFunctionsForClass2()
print_cnvt(str(gTemplateIndent)+'</processing>')
return fun_ret
| 34.991597 | 181 | 0.603026 |
8b711a4790de04eee948de5630d49dc0f23e306b | 33,077 | py | Python | test/functional/rpc_fundrawtransaction.py | CoinBitCore/temp | a2ebefc861aa979b441286a203d574197459b0d6 | [
"MIT"
] | null | null | null | test/functional/rpc_fundrawtransaction.py | CoinBitCore/temp | a2ebefc861aa979b441286a203d574197459b0d6 | [
"MIT"
] | null | null | null | test/functional/rpc_fundrawtransaction.py | CoinBitCore/temp | a2ebefc861aa979b441286a203d574197459b0d6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Coinbit Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from test_framework.test_framework import CoinbitTestFramework
from test_framework.util import *
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(CoinbitTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_network(self, split=False):
self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
connect_nodes_bi(self.nodes, 1, 2)
connect_nodes_bi(self.nodes, 0, 2)
connect_nodes_bi(self.nodes, 0, 3)
def run_test(self):
min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
feeTolerance = 2 * min_relay_tx_fee/1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
# ensure that setting changePosition in fundraw with an exact match is handled properly
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].validateaddress(watchonly_address)["pubkey"]
watchonly_amount = Decimal(200)
self.nodes[3].importpubkey(watchonly_pubkey, "", True)
watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, watchonly_amount)
self.nodes[0].sendtoaddress(self.nodes[3].getnewaddress(), watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
###############
# simple test #
###############
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test that we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0) #test if we have enough inputs
##############################
# simple test with two coins #
##############################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
################################
# simple test with two outputs #
################################
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert(len(dec_tx['vin']) > 0)
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
#########################################################################
# test a fundrawtransaction with a VIN greater than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
#####################################################################
# test a fundrawtransaction with which will not get a change output #
#####################################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : Decimal(5.0) - fee - feeTolerance }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
####################################################
# test a fundrawtransaction with an invalid option #
####################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
############################################################
# test a fundrawtransaction with an invalid change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "changeAddress must be a valid coinbit address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
############################################################
# test a fundrawtransaction with a provided change address #
############################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['addresses'][0])
#########################################################
# test a fundrawtransaction with a provided change type #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
#########################################################################
# test a fundrawtransaction with a VIN smaller than the required amount #
#########################################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
###########################################
# test a fundrawtransaction with two VINs #
###########################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
#########################################################
# test a fundrawtransaction with two VINs and two vOUTs #
#########################################################
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['addresses'][0] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
##############################################
# test a fundrawtransaction with invalid vin #
##############################################
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
############################################################
#compare fee of a standard pubkeyhash transaction
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction with multiple outputs
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1,self.nodes[1].getnewaddress():1.2,self.nodes[1].getnewaddress():0.1,self.nodes[1].getnewaddress():1.3,self.nodes[1].getnewaddress():0.2,self.nodes[1].getnewaddress():0.3}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a 2of2 multisig p2sh transaction
# create 2of2 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
mSigObj = self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
#compare fee of a standard pubkeyhash transaction
# create 4of5 addr
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].validateaddress(addr1)
addr2Obj = self.nodes[1].validateaddress(addr2)
addr3Obj = self.nodes[1].validateaddress(addr3)
addr4Obj = self.nodes[1].validateaddress(addr4)
addr5Obj = self.nodes[1].validateaddress(addr5)
mSigObj = self.nodes[1].addmultisigaddress(4, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey'], addr4Obj['pubkey'], addr5Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance)
############################################################
############################################################
# spend a 2of2 multisig transaction over fundraw
# create 2of2 addr
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].validateaddress(addr1)
addr2Obj = self.nodes[2].validateaddress(addr2)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
# send 1.2 CTB to msig addr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[2].fundrawtransaction(rawtx)
signedTx = self.nodes[2].signrawtransaction(fundedTx['hex'])
txId = self.nodes[2].sendrawtransaction(signedTx['hex'])
self.sync_all()
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
############################################################
# locked wallet test
self.stop_node(0)
self.nodes[1].node_encrypt_wallet("test")
self.stop_node(2)
self.stop_node(3)
self.start_nodes()
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(min_relay_tx_fee)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
connect_nodes_bi(self.nodes,0,3)
self.sync_all()
# drain the keypool
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
assert_raises_rpc_error(-4, "Keypool ran out, please call keypoolrefill first", self.nodes[1].fundrawtransaction, rawtx)
#refill the keypool
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#now we need to unlock
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# make sure funds are received at node1
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
###############################################
# multiple (~19) inputs tx test | Compare fee #
###############################################
#empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
#create same transaction over sendtoaddress
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
#compare fee
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert(feeDelta >= 0 and feeDelta <= feeTolerance*19) #~19 inputs
#############################################
# multiple (~19) inputs tx test | sign/send #
#############################################
#again, empty node1, send some small coins from node0 to node1
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
for i in range(0,20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
#fund a tx with ~20 small inputs
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransaction(fundedTx['hex'])
txId = self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
#####################################################
# test fundrawtransaction with OP_RETURN and no vin #
#####################################################
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
##################################################
# test a fundrawtransaction using only watchonly #
##################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx, {'includeWatching': True })
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], watchonly_txid)
assert("fee" in result.keys())
assert_greater_than(result["changepos"], -1)
###############################################################
# test fundrawtransaction using the entirety of watched funds #
###############################################################
inputs = []
outputs = {self.nodes[2].getnewaddress() : watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Backward compatibility test (2nd param is includeWatching)
result = self.nodes[3].fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 2)
assert(res_dec["vin"][0]["txid"] == watchonly_txid or res_dec["vin"][1]["txid"] == watchonly_txid)
assert_greater_than(result["fee"], 0)
assert_greater_than(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][result["changepos"]]["value"], watchonly_amount / 10)
signedtx = self.nodes[3].signrawtransaction(result["hex"])
assert(not signedtx["complete"])
signedtx = self.nodes[0].signrawtransaction(signedtx["hex"])
assert(signedtx["complete"])
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
#######################
# Test feeRate option #
#######################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[3].getnewaddress() : 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = self.nodes[3].fundrawtransaction(rawtx) # uses min_relay_tx_fee (set by settxfee)
result2 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee})
result3 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 10*min_relay_tx_fee})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
################################
# Test no address reuse occurs #
################################
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['addresses'][0]
assert(changeaddress != "")
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool
assert(changeaddress != nextaddr)
######################################
# Test subtractFeeFromOutputs option #
######################################
# Make sure there is exactly one input so coin selection can't skew the result
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2*min_relay_tx_fee, "subtractFeeFromOutputs": [0]})]
dec_tx = [self.nodes[3].decoderawtransaction(tx['hex']) for tx in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# split the fee between outputs 0, 2, and 3, but not output 1
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# output 1 is the same in both transactions
assert_equal(share[1], 0)
# the other 3 outputs are smaller as a result of subtractFeeFromOutputs
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# outputs 2 and 3 take the same share of the fee
assert_equal(share[2], share[3])
# output 0 takes at least as much share of the fee, and no more than 2 satoshis more, than outputs 2 and 3
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# the fee is the same in both transactions
assert_equal(result[0]['fee'], result[1]['fee'])
# the total subtracted from the outputs is equal to the fee
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
if __name__ == '__main__':
RawTransactionsTest().main()
| 45.125512 | 223 | 0.569429 |
b161f44162e000d2c8cab324757d9f5fc2cda976 | 4,100 | py | Python | setup.py | cisagov/assessment-data-import | 767141c276494879db0f73f9fe2465f0250c38cc | [
"CC0-1.0"
] | 2 | 2021-03-18T23:20:34.000Z | 2022-02-03T04:36:29.000Z | setup.py | cisagov/assessment-data-import-lambda | 767141c276494879db0f73f9fe2465f0250c38cc | [
"CC0-1.0"
] | 7 | 2019-12-05T14:18:34.000Z | 2021-08-10T17:14:39.000Z | setup.py | cisagov/assessment-data-import | 767141c276494879db0f73f9fe2465f0250c38cc | [
"CC0-1.0"
] | 1 | 2021-06-04T23:39:05.000Z | 2021-06-04T23:39:05.000Z | """
This is the setup module for assessment data import.
Based on:
- https://packaging.python.org/distributing/
- https://github.com/pypa/sampleproject/blob/master/setup.py
- https://blog.ionelmc.ro/2014/05/25/python-packaging/#the-structure
"""
# Standard Python Libraries
import codecs
from glob import glob
from os.path import abspath, basename, dirname, join, splitext
# Third-Party Libraries
from setuptools import find_packages, setup
def readme():
"""Read in and return the contents of the project's README.md file."""
with open("README.md", encoding="utf-8") as f:
return f.read()
# Below two methods were pulled from:
# https://packaging.python.org/guides/single-sourcing-package-version/
def read(rel_path):
"""Open a file for reading from a given relative path."""
here = abspath(dirname(__file__))
with codecs.open(join(here, rel_path), "r") as fp:
return fp.read()
def get_version(version_file):
"""Extract a version number from the given file path."""
for line in read(version_file).splitlines():
if line.startswith("__version__"):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
raise RuntimeError("Unable to find version string.")
setup(
name="adi",
# Versions should comply with PEP440
version=get_version("adi/_version.py"),
description="Imports assessment data to a Mongo database",
long_description=readme(),
long_description_content_type="text/markdown",
# Landing page for CISA's cybersecurity mission
url="https://www.cisa.gov/cybersecurity",
# Additional URLs for this project per
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#project-urls
project_urls={
"Source": "https://github.com/cisagov/assessment-data-import-lambda",
"Tracker": "https://github.com/cisagov/assessment-data-import-lambda/issues",
},
# Author details
author="Cybersecurity and Infrastructure Security Agency",
author_email="github@cisa.dhs.gov",
license="License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 4 - Beta",
# Indicate who your project is intended for
"Intended Audience :: Developers",
# Pick your license as you wish (should match "license" above)
"License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication",
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
python_requires=">=3.6",
# What does your project relate to?
keywords="adi assessment import",
packages=find_packages(where="."),
py_modules=[splitext(basename(path))[0] for path in glob("adi/*.py")],
install_requires=["boto3", "docopt", "pymongo", "pytz", "setuptools >= 24.2.0"],
extras_require={
"test": [
"coverage",
# coveralls 1.11.0 added a service number for calls from
# GitHub Actions. This caused a regression which resulted in a 422
# response from the coveralls API with the message:
# Unprocessable Entity for url: https://coveralls.io/api/v1/jobs
# 1.11.1 fixed this issue, but to ensure expected behavior we'll pin
# to never grab the regression version.
"coveralls != 1.11.0",
"pre-commit",
"pytest-cov",
"pytest",
]
},
# Conveniently allows one to run the CLI tool as `adi`
entry_points={"console_scripts": ["adi = adi.assessment_data_import:main"]},
)
| 38.679245 | 94 | 0.653659 |
fd2c919be04a6b400863f7e01031223eeca9f7fb | 1,162 | py | Python | src/pretalx/common/forms/validators.py | td00/pretalx | aff450de9420fca167e04345fa24ee7140fae819 | [
"Apache-2.0"
] | null | null | null | src/pretalx/common/forms/validators.py | td00/pretalx | aff450de9420fca167e04345fa24ee7140fae819 | [
"Apache-2.0"
] | null | null | null | src/pretalx/common/forms/validators.py | td00/pretalx | aff450de9420fca167e04345fa24ee7140fae819 | [
"Apache-2.0"
] | null | null | null | from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from zxcvbn import zxcvbn
class ZXCVBNValidator:
code = 'password_too_weak'
DEFAULT_USER_ATTRIBUTES = ('username', 'first_name', 'last_name', 'email')
def __init__(self, min_score=3, user_attributes=DEFAULT_USER_ATTRIBUTES):
if not (0 <= min_score <= 4):
raise Exception('min_score must be between 0 and 4!')
self.min_score = min_score
self.user_attributes = user_attributes
def __call__(self, value):
return self.validate(value)
def validate(self, password, user=None):
user_inputs = [getattr(user, attribute, None) for attribute in self.user_attributes]
user_inputs = [attr for attr in user_inputs if attr is not None]
results = zxcvbn(password, user_inputs=user_inputs)
if results.get('score', 0) < self.min_score:
feedback = ', '.join(results.get('feedback', {}).get('suggestions', []))
raise ValidationError(_(feedback), code=self.code, params={})
def get_help_text(self):
return _('Your password is too weak.')
| 40.068966 | 92 | 0.681583 |
72a62a082a2b6d6c71d7fdbe2c17a641627a74b5 | 1,706 | py | Python | setup.py | Relrin/sanic-base-extension | e5e6be3572ad52a8fab2565da6e64422d556ade3 | [
"BSD-3-Clause"
] | 4 | 2018-12-06T08:59:26.000Z | 2021-08-25T11:24:56.000Z | setup.py | Relrin/sanic-base-extension | e5e6be3572ad52a8fab2565da6e64422d556ade3 | [
"BSD-3-Clause"
] | null | null | null | setup.py | Relrin/sanic-base-extension | e5e6be3572ad52a8fab2565da6e64422d556ade3 | [
"BSD-3-Clause"
] | 1 | 2020-09-28T01:21:59.000Z | 2020-09-28T01:21:59.000Z | # -*- coding: utf-8 -*-
import os
import re
import ast
from setuptools import setup
_version_re = re.compile(r'__version__\s+=\s+(.*)')
requirements = []
with open('sanic_base_ext/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
def read(f):
return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [
dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))
]
setup(
name='sanic-base-extension',
version=version,
url='https://github.com/Relrin/sanic-base-extension',
license='BSD',
author='Valeryi Savich',
author_email='relrin78@gmail.com',
description='Flask-like extension support for Sanic framework',
long_description=read('README.rst'),
packages=get_packages('sanic_base_ext'),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=requirements,
classifiers=[
'Environment :: Web Environment',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
| 28.433333 | 74 | 0.639508 |
af888a7a508f7aa401a3a5870ebd1db9e34ce605 | 538 | py | Python | dbsp_drp/gui_helpers.py | finagle29/DBSP_DRP | d2f869f85e1425507dbc84e4e76fa44a6784f9d1 | [
"BSD-3-Clause"
] | 1 | 2021-05-17T23:28:25.000Z | 2021-05-17T23:28:25.000Z | dbsp_drp/gui_helpers.py | finagle29/DBSP_DRP | d2f869f85e1425507dbc84e4e76fa44a6784f9d1 | [
"BSD-3-Clause"
] | 23 | 2020-12-14T17:25:13.000Z | 2022-02-23T09:30:10.000Z | dbsp_drp/gui_helpers.py | finagle29/DBSP_DRP | d2f869f85e1425507dbc84e4e76fa44a6784f9d1 | [
"BSD-3-Clause"
] | null | null | null | """
Module for classes and functions shared between GUIs.
"""
from matplotlib.backend_tools import ToolBase
from PySide2 import QtWidgets
class HelpTool(ToolBase):
"""
Print help text.
"""
# keyboard shortcut
default_keymap = 'h'
description = 'Help'
image = 'help'
def __init__(self, *args, helptext="", **kwargs):
self.helptext = helptext
super().__init__(*args, **kwargs)
def trigger(self, *args, **kwargs):
QtWidgets.QMessageBox.information(None, "Help", self.helptext)
| 23.391304 | 70 | 0.654275 |
89037ce17a7bb60912a30f33436ba1596d396f18 | 6,339 | py | Python | modules/steps/base_pipeline_step.py | KTH/aspen | 3be9b55d21dfd950d1a82b2cf4f464cd1f1e9757 | [
"MIT"
] | null | null | null | modules/steps/base_pipeline_step.py | KTH/aspen | 3be9b55d21dfd950d1a82b2cf4f464cd1f1e9757 | [
"MIT"
] | 8 | 2019-10-10T08:03:02.000Z | 2022-01-11T11:28:58.000Z | modules/steps/base_pipeline_step.py | KTH/aspen | 3be9b55d21dfd950d1a82b2cf4f464cd1f1e9757 | [
"MIT"
] | null | null | null | """BasePipelineStep
Base class for all other pipeline steps. Wraps step running,
environment and step data verification, step chaining and
handles logging and exceptions"""
__author__ = 'tinglev'
#from memory_profiler import profile
from abc import ABCMeta, abstractmethod
import time
import os
import logging
from logging import LoggerAdapter
import subprocess
from modules.util import exceptions, data_defs, reporter_service, thread
class StepLogAdapter(LoggerAdapter):
def process(self, msg, kwargs):
return f'[{self.extra["step_name"]}] {msg}', kwargs
class BasePipelineStep:
__metaclass__ = ABCMeta
def __init__(self):
self.application_name = None
self.cluster_name = None
self.next_step = None
self.logger = logging.getLogger('BasePipelineStep')
self.configure_logger()
@abstractmethod
def run_step(self, pipeline_data): #pragma: no cover
""" Should return pipeline_data """
pass
@abstractmethod
def get_required_env_variables(self): #pragma: no cover
""" Should return a string array with the names of the environment
variables required by the current step """
pass
@abstractmethod
def get_required_data_keys(self): #pragma: no cover
""" Should return a string array with the names of the keys
that has to exist and have values in the data-object that
is passed between build steps """
pass
def get_step_name(self):
step_name = self.__class__.__name__
if self.application_name and self.cluster_name:
return f'{self.cluster_name}.{self.application_name}.{step_name}'
return step_name
def configure_logger(self):
self.log = StepLogAdapter(self.logger, {'step_name': self.get_step_name()})
def has_missing_step_data(self, data):
for key in self.get_required_data_keys():
if not data or not key in data:
return key
return None
def has_missing_environment_data(self):
for env in self.get_required_env_variables():
if not env in os.environ:
return env
if not os.environ.get(env):
self.log.warning('Environment variable "%s" exists but is empty', env)
return None
def set_app_and_cluster_name(self, pipeline_data):
if data_defs.APPLICATION_NAME in pipeline_data:
self.application_name = pipeline_data[data_defs.APPLICATION_NAME]
if data_defs.APPLICATION_CLUSTER in pipeline_data:
self.cluster_name = pipeline_data[data_defs.APPLICATION_CLUSTER]
def run_pipeline_step(self, pipeline_data):
self.set_app_and_cluster_name(pipeline_data)
# Update logger in case we now have app and cluster
self.configure_logger()
step_data_missing = self.has_missing_step_data(pipeline_data)
environment_missing = self.has_missing_environment_data()
self.check_environment_missing(pipeline_data, environment_missing)
self.check_step_data_missing(pipeline_data, step_data_missing)
self.log.debug('Running "%s"', self.get_step_name())
try:
pipeline_data = self.run_step(pipeline_data)
except Exception as ex: # pylint: disable=W0703
self.handle_pipeline_error(ex, pipeline_data)
if thread.thread_is_stoppped():
self.log.info('Sync thread has been stopped. Stopping pipeline.')
self.stop_pipeline()
if self.next_step:
self.next_step.run_pipeline_step(pipeline_data)
return pipeline_data
def check_environment_missing(self, pipeline_data, environment_missing):
if environment_missing:
self.log.error('Step environment missing "%s" for step "%s", and pipeline_data "%s"',
environment_missing, self.get_step_name(), pipeline_data)
raise exceptions.DeploymentError(f'Mandatory env {environment_missing} missing',
pipeline_data=pipeline_data,
step_name=self.get_step_name())
def check_step_data_missing(self, pipeline_data, step_data_missing):
if step_data_missing:
self.log.error('Step data "%s" missing for step "%s", and pipeline_data "%s"',
step_data_missing, self.get_step_name(), pipeline_data)
raise exceptions.DeploymentError('Step pipeline_data not ok',
pipeline_data=pipeline_data,
step_name=self.get_step_name())
def handle_pipeline_error(self, error, pipeline_data):
msg = str(error)
if isinstance(error, exceptions.AspenError):
msg = str(error)
error = exceptions.DeploymentError(msg, fatal=True, expected=False)
if isinstance(error, subprocess.CalledProcessError):
msg = str(error.output) # pylint: disable=E1101
if not isinstance(error, exceptions.DeploymentError):
# Convert all exceptions to deployment errors
error = exceptions.DeploymentError(msg)
# Mark them as unexpected
error.expected = False
# Complement error with step data
error = self.add_error_data(error, pipeline_data)
self.log.error('An error occured: "%s"', str(error), exc_info=True)
if error.fatal:
reporter_service.handle_fatal_error(error)
else:
reporter_service.handle_deployment_error(error)
# If the error was an AspenError, the Aspen pipeline will
# be stopped. This is a more friendly solution than
# using sys.exit()
self.stop_pipeline()
def add_error_data(self, deployment_error, pipeline_data):
deployment_error.pipeline_data = pipeline_data
deployment_error.step_name = self.get_step_name()
if deployment_error.retryable:
deployment_error.timestamp = time.time()
return deployment_error
def set_next_step(self, next_step):
self.next_step = next_step
return next_step
def stop_pipeline(self):
self.log.debug('Stopped pipeline at step "%s"', self.get_step_name())
self.next_step = None
| 41.431373 | 97 | 0.661619 |
988f57cf6ea7f1065eea4e27336cbc5040c5ae17 | 523 | py | Python | test_patient_gui.py | adamkrekorian/Patient-Monitoring-Station-BME547 | 8bab434464b106fe2ae1751bcdcbc93b6da4995f | [
"Unlicense"
] | null | null | null | test_patient_gui.py | adamkrekorian/Patient-Monitoring-Station-BME547 | 8bab434464b106fe2ae1751bcdcbc93b6da4995f | [
"Unlicense"
] | null | null | null | test_patient_gui.py | adamkrekorian/Patient-Monitoring-Station-BME547 | 8bab434464b106fe2ae1751bcdcbc93b6da4995f | [
"Unlicense"
] | null | null | null | import base64
import io
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
def test_file_to_b64_string():
""" Tests image encoder function
The test_file_to_b64_string() function tests the ability of the image
encoder function to encode an input image file into a base64 byte string.
Args:
Returns:
"""
from patient_gui import read_file_as_b64
b64str = read_file_as_b64("/Users/kaansahingur/Desktop/encode_test.jpg")
assert b64str[0:20] == "/9j/4AAQSkZJRgABAQAA"
| 24.904762 | 77 | 0.745698 |
ca38b25aef297a478abca4abd095db88c88a3bf2 | 11,035 | py | Python | instabot/bot/bot_get.py | bundle-marketing/instabot | d261e5edc475ece685f4184ca715842e34b512f4 | [
"Apache-2.0"
] | null | null | null | instabot/bot/bot_get.py | bundle-marketing/instabot | d261e5edc475ece685f4184ca715842e34b512f4 | [
"Apache-2.0"
] | null | null | null | instabot/bot/bot_get.py | bundle-marketing/instabot | d261e5edc475ece685f4184ca715842e34b512f4 | [
"Apache-2.0"
] | null | null | null | """
All methods must return media_ids that can be
passed into e.g. like() or comment() functions.
"""
from tqdm import tqdm
def get_media_owner(self, media_id):
self.api.media_info(media_id)
try:
return str(self.api.last_json["items"][0]["user"]["pk"])
except Exception as ex:
self.logger.error("Error: get_media_owner(%s)\n%s", media_id, ex)
return False
def get_user_tags_medias(self, user_id):
self.api.get_user_tags(user_id)
return [str(media['pk']) for media in self.api.last_json['items']]
def get_popular_medias(self):
self.api.get_popular_feed()
return [str(media['pk']) for media in self.api.last_json['items']]
def get_your_medias(self, as_dict=False):
self.api.get_self_user_feed()
if as_dict:
return self.api.last_json["items"]
return self.filter_medias(self.api.last_json["items"], False)
def get_archived_medias(self, as_dict=False):
self.api.get_archive_feed()
if as_dict:
return self.api.last_json["items"]
return self.filter_medias(self.api.last_json["items"], False)
def get_timeline_medias(self, filtration=True):
if not self.api.get_timeline_feed():
self.logger.warning("Error while getting timeline feed.")
return []
return self.filter_medias(self.api.last_json["feed_items"], filtration)
def get_user_medias(self, user_id, filtration=False, is_comment=False):
user_id = self.convert_to_user_id(user_id)
self.api.get_user_feed(user_id)
if self.api.last_json["status"] == 'fail':
self.logger.warning("This is a closed account.")
return []
return self.filter_medias(self.api.last_json["items"], False, is_comment=is_comment)
def get_total_user_medias(self, user_id):
user_id = self.convert_to_user_id(user_id)
medias = self.api.get_total_user_feed(user_id)
if self.api.last_json["status"] == 'fail':
self.logger.warning("This is a closed account.")
return []
return self.filter_medias(medias, filtration=False)
def get_last_user_medias(self, user_id, amount):
user_id = self.convert_to_user_id(user_id)
medias = self.api.get_last_user_feed(user_id, amount)
if self.api.last_json["status"] == 'fail':
self.logger.warning("This is a closed account.")
return []
return self.filter_medias(medias, filtration=False)
def get_user_likers(self, user_id, media_count=10):
your_likers = set()
media_items = self.get_user_medias(user_id, filtration=False)
if not media_items:
self.logger.warning("Can't get %s medias." % user_id)
return []
for media_id in tqdm(media_items[:media_count],
desc="Getting %s media likers" % user_id):
media_likers = self.get_media_likers(media_id)
your_likers |= set(media_likers)
return list(your_likers)
def get_hashtag_medias(self, hashtag, filtration=True):
if not self.api.get_hashtag_feed(hashtag):
self.logger.warning("Error while getting hashtag feed.")
return []
return self.filter_medias(self.api.last_json["items"], filtration)
def get_total_hashtag_medias(self, hashtag, amount=100, filtration=False):
medias = self.api.get_total_hashtag_feed(hashtag, amount)
return self.filter_medias(medias, filtration=filtration)
def get_geotag_medias(self, geotag, filtration=True):
# TODO: returns list of medias from geotag
pass
def get_locations_from_coordinates(self, latitude, longitude):
self.api.search_location(lat=latitude, lng=longitude)
all_locations = self.api.last_json["items"]
filtered_locations = []
for location in all_locations:
location_lat = location["location"]["lat"]
location_lng = location["location"]["lng"]
if int(location_lat) == int(latitude) and int(location_lng) == longitude:
filtered_locations.append(location)
return filtered_locations
def get_media_info(self, media_id):
if isinstance(media_id, dict):
return media_id
self.api.media_info(media_id)
if "items" not in self.api.last_json:
self.logger.info("Media with %s not found." % media_id)
return []
return self.api.last_json["items"]
def get_timeline_users(self):
if not self.api.get_timeline_feed():
self.logger.warning("Error while getting timeline feed.")
return []
return [str(i['user']['pk']) for i in self.api.last_json['items'] if i.get('user')]
def get_hashtag_users(self, hashtag):
if not self.api.get_hashtag_feed(hashtag):
self.logger.warning("Error while getting hastag feed.")
return []
return [str(i['user']['pk']) for i in self.api.last_json['items']]
def get_geotag_users(self, geotag):
# TODO: returns list user_ids who just posted on this geotag
pass
def get_user_id_from_username(self, username):
if username not in self._usernames:
self.api.search_username(username)
self.very_small_delay()
if "user" in self.api.last_json:
self._usernames[username] = str(self.api.last_json["user"]["pk"])
else:
return None
return self._usernames[username]
def get_username_from_user_id(self, user_id):
user_info = self.get_user_info(user_id)
if user_info and "username" in user_info:
return str(user_info["username"])
return None # Not found
def get_user_info(self, user_id, use_cache=True):
user_id = self.convert_to_user_id(user_id)
if not use_cache or user_id not in self._user_infos:
self.api.get_username_info(user_id)
last_json = self.api.last_json
if last_json is None or 'user' not in last_json:
return False
user_info = last_json['user']
self._user_infos[user_id] = user_info
return self._user_infos[user_id]
def get_user_followers(self, user_id, nfollows):
user_id = self.convert_to_user_id(user_id)
followers = self.api.get_total_followers(user_id, nfollows)
return [str(item['pk']) for item in followers][::-1] if followers else []
def get_user_following(self, user_id, nfollows=None):
user_id = self.convert_to_user_id(user_id)
following = self.api.get_total_followings(user_id, nfollows)
return [str(item['pk']) for item in following][::-1] if following else []
def get_comment_likers(self, comment_id):
self.api.get_comment_likers(comment_id)
if "users" not in self.api.last_json:
self.logger.info("Comment with %s not found." % comment_id)
return []
return list(map(lambda user: str(user['pk']), self.api.last_json["users"]))
def get_media_likers(self, media_id):
self.api.get_media_likers(media_id)
if "users" not in self.api.last_json:
self.logger.info("Media with %s not found." % media_id)
return []
return list(map(lambda user: str(user['pk']), self.api.last_json["users"]))
def get_media_comments(self, media_id, only_text=False):
self.api.get_media_comments(media_id)
if 'comments' not in self.api.last_json:
return []
if only_text:
return [str(item["text"]) for item in self.api.last_json['comments']]
return self.api.last_json['comments']
def get_media_comments_all(self, media_id, only_text=False, count=False):
has_more_comments = True
max_id = ''
comments = []
while has_more_comments:
self.api.get_media_comments(media_id, max_id=max_id)
for comment in self.api.last_json['comments']:
comments.append(comment)
has_more_comments = self.api.last_json['has_more_comments']
if count and len(comments) >= count:
comments = comments[:count]
has_more_comments = False
self.logger.info("Getting comments stopped by count (%s)." % count)
if has_more_comments:
max_id = self.api.last_json['next_max_id']
if only_text:
return [str(item["text"]) for item in sorted(
comments, key=lambda k: k['created_at_utc'], reverse=False)]
return sorted(comments, key=lambda k: k['created_at_utc'], reverse=False)
def get_media_commenters(self, media_id):
self.get_media_comments(media_id)
if 'comments' not in self.api.last_json:
return []
return [str(item["user"]["pk"]) for item in self.api.last_json['comments']]
def search_users(self, query):
self.api.search_users(query)
if "users" not in self.api.last_json:
self.logger.info("Users with %s not found." % query)
return []
return [str(user['pk']) for user in self.api.last_json['users']]
def get_comment(self):
try:
return self.comments_file.random().strip()
except IndexError:
return "Wow!"
def get_media_id_from_link(self, link):
if 'instagram.com/p/' not in link:
self.logger.error('Unexpected link')
return False
link = link.split('/')
code = link[link.index('p') + 1]
alphabet = {
'-': 62, '1': 53, '0': 52, '3': 55, '2': 54, '5': 57, '4': 56,
'7': 59, '6': 58, '9': 61, '8': 60, 'A': 0, 'C': 2, 'B': 1,
'E': 4, 'D': 3, 'G': 6, 'F': 5, 'I': 8, 'H': 7, 'K': 10, 'J': 9,
'M': 12, 'L': 11, 'O': 14, 'N': 13, 'Q': 16, 'P': 15, 'S': 18,
'R': 17, 'U': 20, 'T': 19, 'W': 22, 'V': 21, 'Y': 24, 'X': 23,
'Z': 25, '_': 63, 'a': 26, 'c': 28, 'b': 27, 'e': 30, 'd': 29,
'g': 32, 'f': 31, 'i': 34, 'h': 33, 'k': 36, 'j': 35, 'm': 38,
'l': 37, 'o': 40, 'n': 39, 'q': 42, 'p': 41, 's': 44, 'r': 43,
'u': 46, 't': 45, 'w': 48, 'v': 47, 'y': 50, 'x': 49, 'z': 51,
}
result = 0
for char in code:
result = result * 64 + alphabet[char]
return result
def get_link_from_media_id(self, media_id):
alphabet = {
'-': 62, '1': 53, '0': 52, '3': 55, '2': 54, '5': 57, '4': 56,
'7': 59, '6': 58, '9': 61, '8': 60, 'A': 0, 'C': 2, 'B': 1,
'E': 4, 'D': 3, 'G': 6, 'F': 5, 'I': 8, 'H': 7, 'K': 10, 'J': 9,
'M': 12, 'L': 11, 'O': 14, 'N': 13, 'Q': 16, 'P': 15, 'S': 18,
'R': 17, 'U': 20, 'T': 19, 'W': 22, 'V': 21, 'Y': 24, 'X': 23,
'Z': 25, '_': 63, 'a': 26, 'c': 28, 'b': 27, 'e': 30, 'd': 29,
'g': 32, 'f': 31, 'i': 34, 'h': 33, 'k': 36, 'j': 35, 'm': 38,
'l': 37, 'o': 40, 'n': 39, 'q': 42, 'p': 41, 's': 44, 'r': 43,
'u': 46, 't': 45, 'w': 48, 'v': 47, 'y': 50, 'x': 49, 'z': 51,
}
result = ''
while media_id:
media_id, char = media_id // 64, media_id % 64
result += list(alphabet.keys())[list(alphabet.values()).index(char)]
return 'https://instagram.com/p/' + result[::-1] + '/'
def get_messages(self):
if self.api.getv2Inbox():
return self.api.last_json
else:
self.logger.info("Messages were not found, something went wrong.")
return None
def convert_to_user_id(self, x):
x = str(x)
if not x.isdigit():
x = x.lstrip('@')
x = self.get_user_id_from_username(x)
# if type is not str than it is int so user_id passed
return x
| 34.270186 | 88 | 0.63072 |
c6b8f0f12a77ad0f5338da3b7b09775841958249 | 12,012 | py | Python | forte/data/readers/html_reader.py | swapnull7/forte | 737a72afd440d40c3826c3a7c5e4e44235c0f701 | [
"Apache-2.0"
] | null | null | null | forte/data/readers/html_reader.py | swapnull7/forte | 737a72afd440d40c3826c3a7c5e4e44235c0f701 | [
"Apache-2.0"
] | null | null | null | forte/data/readers/html_reader.py | swapnull7/forte | 737a72afd440d40c3826c3a7c5e4e44235c0f701 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The reader that reads html data into Datapacks.
"""
from html.parser import HTMLParser
from html import unescape
import os
import re
from typing import Iterator
from forte.data.span import Span
from forte.data.data_pack import DataPack
from forte.data.data_utils_io import dataset_path_iterator
from forte.data.readers.base_reader import PackReader
from ft.onto.base_ontology import Document
# Regular expressions used for parsing. Borrowed from
# https://github.com/python/cpython/blob/3.6/Lib/html/parser.py
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
# Note:
# 1) if you change tagfind/attrfind remember to update locatestarttagend too;
# 2) if you change tagfind/attrfind and/or locatestarttagend the parser will
# explode, so don't do it.
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
tagfind_tolerant = re.compile(r'([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*')
attrfind_tolerant = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend_tolerant = re.compile(r"""
<[a-zA-Z][^\t\n\r\f />\x00]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
(?:\s*,)* # possibly followed by a comma
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile(r'</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
__all__ = [
"HTMLReader",
]
class ForteHTMLParser(HTMLParser):
r"""Parser that stores spans that HTMLReader can use.
"""
def __init__(self):
super().__init__()
self.spans = []
def collect_span(self, begin, end):
self.spans.append((Span(begin, end), ''))
# We override the original goahead method and collect the information
# we need to successfully remove tag information and retrieve the original
# html document without any loss.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
if self.convert_charrefs and not self.cdata_elem:
j = rawdata.find('<', i)
if j < 0:
# if we can't find the next <, either we are at the end
# or there's more text incoming. If the latter is True,
# we can't pass the text to handle_data in case we have
# a charref cut in half at end. Try to determine if
# this is the case before proceeding by looking for an
# & near the end and see if it's followed by a space or ;.
amppos = rawdata.rfind('&', max(i, n - 34))
if (amppos >= 0 and
not re.compile(r'[\s;]').search(rawdata, amppos)):
break # wait till we get all the text
j = n
else:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j:
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(unescape(rawdata[i:j]))
else:
self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n:
break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
self.collect_span(i, k)
elif startswith("</", i):
k = self.parse_endtag(i)
self.collect_span(i, k)
elif startswith("<!--", i):
k = self.parse_comment(i)
self.collect_span(i, k)
elif startswith("<?", i):
k = self.parse_pi(i)
self.collect_span(i, k)
elif startswith("<!", i):
k = self.parse_html_declaration(i)
self.collect_span(i, k)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(unescape(rawdata[i:k]))
else:
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k - 1):
k = k - 1
i = self.updatepos(i, k)
else:
if ";" in rawdata[i:]: # bail by consuming &#
self.handle_data(rawdata[i:i + 2])
i = self.updatepos(i, i + 2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k - 1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
k = match.end()
if k <= i:
k = n
i = self.updatepos(i, i + 1)
# incomplete
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
if self.convert_charrefs and not self.cdata_elem:
self.handle_data(unescape(rawdata[i:n]))
else:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
# pylint: disable=attribute-defined-outside-init
self.rawdata = rawdata[i:]
class HTMLReader(PackReader):
r""":class:`HTMLReader` is designed to read in list of html strings.
It takes in list of html strings, cleans the HTML tags and stores the
cleaned text in pack.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_with_fileloc = False
self.init_with_html = False
def _collect(self, content) -> Iterator[str]: # type: ignore
r"""Could be called with a directory, a particular file location or a
list of strings. If the string is an HTML string, it will be cleaned.
Args:
content: either a string, or list of string
Returns: Iterator over the content based on type of input
"""
if isinstance(content, str):
# Check if directory
if os.path.isdir(content):
self.init_with_fileloc = True
# TODO: maybe extend it to .txt also if need be?
return dataset_path_iterator(content, ".html")
# If file path to a single file, just return the filepath
elif os.path.isfile(content):
def data_yielder(data):
yield data
self.init_with_fileloc = True
return data_yielder(content)
else: # Treat it as a string
content = [content]
if isinstance(content, list): # Must be a list of strings now
self.init_with_html = True
def data_iterator(data):
for html_string in data:
yield html_string
return data_iterator(content)
else:
raise TypeError(f"HTMLReader supports only strings and list of"
f" strings, Please make sure your inputs are"
f" correct!"
f"Found {type(content)} instead!")
def _parse_pack(self, data_source: str) -> Iterator[DataPack]:
r"""Takes a string which could be either a filepath or html_content and
converts into a DataPack.
Args:
data_source: str that contains text of a document or a filepath
Returns: DataPack containing Document.
"""
pack = DataPack()
# Check if data_source is a filepath
if self.init_with_fileloc:
with open(data_source, "r",
encoding="utf8",
errors='ignore') as file:
text = file.read()
# else, must be a string with actual data
else:
text = data_source
self.set_text(pack, text)
# Note that pack.text can be different from the text passed in, due to
# the text_replace_operation
Document(pack, 0, len(pack.text))
yield pack
def text_replace_operation(self, text: str):
r"""Replace html tag locations with blank string.
Args:
text: The original html text to be cleaned.
Returns: List[Tuple[Span, str]]: the replacement operations
"""
parser = ForteHTMLParser()
parser.feed(text)
return parser.spans
def _cache_key_function(self, collection):
# check if collection is file or html string
if self.init_with_fileloc:
return os.path.basename(collection)
# If html string
else:
return str(hash(collection)) + '.html'
| 38.012658 | 80 | 0.507992 |
2451d6fde78a2952dc1ae08414282787b178b11c | 1,241 | py | Python | Baekjoon/Greedy/11399_ATM.py | Gonnnnn/Algorithm | f9cbbbc64e5d62ed75a5e3d5edb7b8cdae6e18e2 | [
"Apache-2.0"
] | null | null | null | Baekjoon/Greedy/11399_ATM.py | Gonnnnn/Algorithm | f9cbbbc64e5d62ed75a5e3d5edb7b8cdae6e18e2 | [
"Apache-2.0"
] | null | null | null | Baekjoon/Greedy/11399_ATM.py | Gonnnnn/Algorithm | f9cbbbc64e5d62ed75a5e3d5edb7b8cdae6e18e2 | [
"Apache-2.0"
] | null | null | null | # 문제
# 인하은행에는 ATM이 1대밖에 없다. 지금 이 ATM앞에 N명의 사람들이 줄을 서있다. 사람은 1번부터 N번까지 번호가 매겨져 있으며, i번 사람이 돈을 인출하는데 걸리는 시간은 Pi분이다.
# 사람들이 줄을 서는 순서에 따라서, 돈을 인출하는데 필요한 시간의 합이 달라지게 된다. 예를 들어, 총 5명이 있고, P1 = 3, P2 = 1, P3 = 4, P4 = 3, P5 = 2 인 경우를 생각해보자. [1, 2, 3, 4, 5] 순서로 줄을 선다면, 1번 사람은 3분만에 돈을 뽑을 수 있다. 2번 사람은 1번 사람이 돈을 뽑을 때 까지 기다려야 하기 때문에, 3+1 = 4분이 걸리게 된다. 3번 사람은 1번, 2번 사람이 돈을 뽑을 때까지 기다려야 하기 때문에, 총 3+1+4 = 8분이 필요하게 된다. 4번 사람은 3+1+4+3 = 11분, 5번 사람은 3+1+4+3+2 = 13분이 걸리게 된다. 이 경우에 각 사람이 돈을 인출하는데 필요한 시간의 합은 3+4+8+11+13 = 39분이 된다.
# 줄을 [2, 5, 1, 4, 3] 순서로 줄을 서면, 2번 사람은 1분만에, 5번 사람은 1+2 = 3분, 1번 사람은 1+2+3 = 6분, 4번 사람은 1+2+3+3 = 9분, 3번 사람은 1+2+3+3+4 = 13분이 걸리게 된다. 각 사람이 돈을 인출하는데 필요한 시간의 합은 1+3+6+9+13 = 32분이다. 이 방법보다 더 필요한 시간의 합을 최소로 만들 수는 없다.
# 줄을 서 있는 사람의 수 N과 각 사람이 돈을 인출하는데 걸리는 시간 Pi가 주어졌을 때, 각 사람이 돈을 인출하는데 필요한 시간의 합의 최솟값을 구하는 프로그램을 작성하시오.
# 입력
# 첫째 줄에 사람의 수 N(1 ≤ N ≤ 1,000)이 주어진다. 둘째 줄에는 각 사람이 돈을 인출하는데 걸리는 시간 Pi가 주어진다. (1 ≤ Pi ≤ 1,000)
# 출력
# 첫째 줄에 각 사람이 돈을 인출하는데 필요한 시간의 합의 최솟값을 출력한다.
from queue import PriorityQueue
que = PriorityQueue()
num_of_ppl = int(input())
time = list(map(int, input().split()))
for i in time:
que.put(i)
min_time = 0
temp = 0
for i in range(que.qsize()):
temp += que.get()
min_time += temp
print(min_time) | 40.032258 | 400 | 0.628525 |
058f2c38f5d13c7b7b776fc51c772db54182ea04 | 380 | py | Python | cmlkit/utility/__init__.py | sirmarcel/cmlk | e099bf3e255b60675e8e1b3ad29db750dbd6faf3 | [
"MIT"
] | 24 | 2018-06-22T17:31:20.000Z | 2022-03-29T14:52:49.000Z | cmlkit/utility/__init__.py | sirmarcel/cmlk | e099bf3e255b60675e8e1b3ad29db750dbd6faf3 | [
"MIT"
] | 8 | 2019-06-06T14:51:57.000Z | 2021-06-30T19:43:13.000Z | cmlkit/utility/__init__.py | sirmarcel/cmlk | e099bf3e255b60675e8e1b3ad29db750dbd6faf3 | [
"MIT"
] | 5 | 2018-07-30T18:53:08.000Z | 2022-02-10T09:12:15.000Z | """Kitchen sink of various convenient things."""
from .elements import charges_to_elements
from .timing import timed, time_repeat
from .conversion import convert, unconvert
from .indices import fourway_split, threeway_split, twoway_split
from .humanhash import humanize
from .opt_lgs import OptimizerLGS
from .import_qmmlpack import import_qmmlpack, import_qmmlpack_experimental
| 38 | 74 | 0.844737 |
fa29e8513dd838cdea3972e233757eea995260ea | 4,911 | py | Python | Scientific Computing with Python/Shape Calculator/test_module.py | SassoStark/FreeCodeCamp | d6510de402436ed3560734c5f333272fa6fd64d8 | [
"MIT"
] | null | null | null | Scientific Computing with Python/Shape Calculator/test_module.py | SassoStark/FreeCodeCamp | d6510de402436ed3560734c5f333272fa6fd64d8 | [
"MIT"
] | null | null | null | Scientific Computing with Python/Shape Calculator/test_module.py | SassoStark/FreeCodeCamp | d6510de402436ed3560734c5f333272fa6fd64d8 | [
"MIT"
] | null | null | null | import unittest
import shape_calculator
class UnitTests(unittest.TestCase):
def setUp(self):
self.rect = shape_calculator.Rectangle(3, 6)
self.sq = shape_calculator.Square(5)
def test_subclass(self):
actual = issubclass(shape_calculator.Square, shape_calculator.Rectangle)
expected = True
self.assertEqual(actual, expected, 'Expected Square class to be a subclass of the Rectangle class.')
def test_distinct_classes(self):
actual = shape_calculator.Square is not shape_calculator.Rectangle
expected = True
self.assertEqual(actual, expected, 'Expected Square class to be a distinct class from the Rectangle class.')
def test_square_is_square_and_rectangle(self):
actual = isinstance(self.sq, shape_calculator.Square) and isinstance(self.sq, shape_calculator.Rectangle)
expected = True
self.assertEqual(actual, expected, 'Expected square object to be an instance of the Square class and the Rectangle class.')
def test_rectangle_string(self):
actual = str(self.rect)
expected = "Rectangle(width=3, height=6)"
self.assertEqual(actual, expected, 'Expected string representation of rectangle to be "Rectangle(width=3, height=6)"')
def test_square_string(self):
actual = str(self.sq)
expected = "Square(side=5)"
self.assertEqual(actual, expected, 'Expected string representation of square to be "Square(side=5)"')
def test_area(self):
actual = self.rect.get_area()
expected = 18
self.assertEqual(actual, expected, 'Expected area of rectangle to be 18')
actual = self.sq.get_area()
expected = 25
self.assertEqual(actual, expected, 'Expected area of square to be 25')
def test_perimeter(self):
actual = self.rect.get_perimeter()
expected = 18
self.assertEqual(actual, expected, 'Expected perimeter of rectangle to be 18')
actual = self.sq.get_perimeter()
expected = 20
self.assertEqual(actual, expected, 'Expected perimeter of square to be 20')
def test_diagonal(self):
actual = self.rect.get_diagonal()
expected = 6.708203932499369
self.assertEqual(actual, expected, 'Expected diagonal of rectangle to be 6.708203932499369')
actual = self.sq.get_diagonal()
expected = 7.0710678118654755
self.assertEqual(actual, expected, 'Expected diagonal of square to be 7.0710678118654755')
def test_set_atributes(self):
self.rect.set_width(7)
self.rect.set_height(8)
self.sq.set_side(2)
actual = str(self.rect)
expected = "Rectangle(width=7, height=8)"
self.assertEqual(actual, expected, 'Expected string representation of rectangle after setting new values to be "Rectangle(width=7, height=8)"')
actual = str(self.sq)
expected = "Square(side=2)"
self.assertEqual(actual, expected, 'Expected string representation of square after setting new values to be "Square(side=2)"')
self.sq.set_width(4)
actual = str(self.sq)
expected = "Square(side=4)"
self.assertEqual(actual, expected, 'Expected string representation of square after setting width to be "Square(side=4)"')
def test_rectangle_picture(self):
self.rect.set_width(7)
self.rect.set_height(3)
actual = self.rect.get_picture()
expected = "*******\n*******\n*******\n"
self.assertEqual(actual, expected, 'Expected rectangle picture to be different.')
def test_squaree_picture(self):
self.sq.set_side(2)
actual = self.sq.get_picture()
expected = "**\n**\n"
self.assertEqual(actual, expected, 'Expected square picture to be different.')
def test_big_picture(self):
self.rect.set_width(51)
self.rect.set_height(3)
actual = self.rect.get_picture()
expected = "Too big for picture."
self.assertEqual(actual, expected, 'Expected message: "Too big for picture."')
def test_get_amount_inside(self):
self.rect.set_height(10)
self.rect.set_width(15)
actual = self.rect.get_amount_inside(self.sq)
expected = 6
self.assertEqual(actual, expected, 'Expected `get_amount_inside` to return 6.')
def test_get_amount_inside_two_rectangles(self):
rect2 = shape_calculator.Rectangle(4, 8)
actual = rect2.get_amount_inside(self.rect)
expected = 1
self.assertEqual(actual, expected, 'Expected `get_amount_inside` to return 1.')
def test_get_amount_inside_none(self):
rect2 = shape_calculator.Rectangle(2, 3)
actual = rect2.get_amount_inside(self.rect)
expected = 0
self.assertEqual(actual, expected, 'Expected `get_amount_inside` to return 0.')
if __name__ == "__main__":
unittest.main()
| 42.336207 | 151 | 0.669721 |
ae66e537af960a0ea5f727d27cd128a65e3efd9a | 1,223 | py | Python | aliyun-python-sdk-cs/aliyunsdkcs/request/v20151215/DescribeClusterDetailRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cs/aliyunsdkcs/request/v20151215/DescribeClusterDetailRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cs/aliyunsdkcs/request/v20151215/DescribeClusterDetailRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class DescribeClusterDetailRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'CS', '2015-12-15', 'DescribeClusterDetail','cs')
self.set_uri_pattern('/clusters/[ClusterId]')
self.set_method('GET')
def get_ClusterId(self):
return self.get_path_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_path_param('ClusterId',ClusterId) | 38.21875 | 78 | 0.764513 |
3ec3ca850eec92eb1dbabff6750e577feb1a1272 | 101,758 | py | Python | jadrian.py | jskDr/jamespy | 729c496732d8ec2d6ba25d6b97ef2aa02065c18c | [
"MIT"
] | null | null | null | jadrian.py | jskDr/jamespy | 729c496732d8ec2d6ba25d6b97ef2aa02065c18c | [
"MIT"
] | null | null | null | jadrian.py | jskDr/jamespy | 729c496732d8ec2d6ba25d6b97ef2aa02065c18c | [
"MIT"
] | null | null | null | """
adrin_rp.py
This file collects functions and classes used in Redox potential prediction in metabolism,
which I am collaborating with Adrian.
"""
import numpy as np
import pandas as pd
from collections import OrderedDict
# Optimization codes are described
from sklearn import cross_validation
from scipy.optimize import minimize
from numpy import array
import jpandas as jpd
import jgrid
#################################################
# Utilities
#################################################
def to_kegg_id( pdr, cn = "KEGG_ID_Adrian", fname = None, disp = False):
"""
New column is added. This new colulum is simplied KEGG_ID,
which is generated from full KEGG_ID whether it is Adrian or Ed's.
"""
pdw = pdr.copy()
KEGG_ID_l = list()
for kid_a in pdr[cn]:
tx_all, rx_all = kid_a.split(" = ")
kid = "{} = {}".format( tx_all[:6], rx_all[:6])
if disp:
print kid_a, "==>", kid
KEGG_ID_l.append( kid)
pdw["KEGG_ID"] = KEGG_ID_l
if fname:
"""
If file name is given, the results will be directly saved.
"""
pdw.to_csv( fname, index = False)
return pdw
#################################################
# Application codes
#################################################
def init():
"""
The two data frames are loaded.
The result data frames are allocated to global variables since it will used later.
However, they are not loaded although realod() is performed. When reload() is performed,
only code will be changed except the init() is performed.
"""
global pdr_code, pdr_reaction, pdr2, pdr3
pdr_code = jpd.pd.read_csv('KEGG_to_smiles.csv')
pdr_reaction = jpd.pd.read_csv('DataFrame_Redox_Reactions.csv')
pdr2 = jpd.pd.read_csv( 'DataFrame_Redox_Reactions_with_smiles122.csv')
pdr3 = jpd.pd.read_csv( 'redox_quantum_chemistry_v2+pred_comma.csv')
def getsm( mid = 'C00025'):
return pdr_code[ pdr_code['KEGG_ID'] == mid].SMILES.tolist()[0]
def sep_ids( pdw):
x1 = list()
x2 = list()
for x in pdr_reaction['KEGG_ID']:
x1.append( x[:6])
x2.append( x[9:9+6])
pdw['left_id'] = x1
pdw['right_id'] = x2
return pdw
def get_2smiles( pdw):
left_smiles = map( getsm, pdw.left_id)
right_smiles = map( getsm, pdw.right_id)
pdw['left_smiles'] = left_smiles
pdw['right_smiles'] = right_smiles
return pdw
def gen_pdw():
"""
pdr_reaction will be extended to have the two smiles codes since
they are needed to make a model for regression from SMILES to redox potential.
The result data frame will be saved to a file for a later use.
"""
pdw = pdr_reaction.copy()
pdw = sep_ids( pdw)
pdw = get_2smiles( pdw)
pdw.to_csv('DataFrame_Redox_Reactions_with_smiles.csv', index = False)
return pdw
import os
from rdkit import Chem
from rdkit.Chem import AllChem
#from rdkit.Chem.Draw import IPythonConsole
from rdkit.Chem import Draw
from rdkit import DataStructs
from rdkit.Chem import Descriptors #as Descriptors
from rdkit.Chem import PandasTools #as PandasTools
from rdkit.Chem import FragmentCatalog
class Frag():
"""
This class investigate molecules whether they have a specific fragment.
"""
def __init__( self, FunctionalGroups_txt = "FunctionalGroups.txt"):
fName=os.path.join(FunctionalGroups_txt)
self.fparams = FragmentCatalog.FragCatParams(1,6,fName)
def search( self, a_smiles = 'OC(=O)[C@H](CC(=O)O)N'):
"""
It results frag_map which indicates the map of matching fragments.
If only the first fragment is matched, only the first element of the
vector is turned as True for example.
"""
frag_map = list()
for indx in range(self.fparams.GetNumFuncGroups()):
patt = self.fparams.GetFuncGroup(indx)
m = Chem.MolFromSmiles( a_smiles)
match=m.HasSubstructMatch( patt)
frag_map.append( match)
return frag_map
def search_idx( self, frag_idx, s_l):
"""
It searches all molecules in a vector whether the molecules have
the given fragment. Hence, each element of the return vector is
corresponding to each element of a vector of SMILES.
Moreover, exclusiveness is also tested by calculating a sum of
frag_map. If the sum is more than one, it is not exclusive for single
fragment when the corresponding smiles_map is True.
"""
smiles_map = list()
exclusive_map = list()
for s in s_l:
frag_map = self.search( s)
smiles_map.append(frag_map[ frag_idx] == True)
exclusive_map.append( sum( frag_map))
return smiles_map, exclusive_map
import scipy.io
class GroupContr():
"""
This is a group contribution method, which can preditcs, for example, redox potential
by addiding up all contributions from fragments.
This will use one matlab data file, which includes index data of KEGG_ID,
and a collection of vectors with respect to the index data.
If you ask group information for a molecule,
it search the index of it first. Then, it extract the contribution vector of the
founded index.
"""
def __init__( self, fname_mat = 'sheet/kegg_group_decomposition.mat', disp = False):
mat = scipy.io.loadmat( fname_mat)
self.cids = mat['cids']
self.group_mat = mat['gv_mat']
self.disp = disp
def get_index( self, subs_id = 'C00376'):
"""
It finds the index of a given kegg_id.
There will be a method to improve the speed of founding using C or C++ codes.
It is also possible to make C++ class all this class later on.
"""
cids_result = np.where( self.cids == subs_id)[0]
if len(cids_result) > 0:
return cids_result[0]
else:
# if no cids_id is founded.
if self.disp:
print 'No found for', subs_id
print 'The search result is', cids_result
print 'So, the output becomes -1 for notification.'
return -1
#return np.where( self.cids == subs_id)[0][0]
def get_2indices_kegg_id( self, kegg_id = 'C00376 = C00473'):
"""
The reaction will be separated to subs and prod.
"""
sub_prod = kegg_id.split(' = ')
return map( self.get_index, sub_prod)
def get_group_descriptor( self, idx):
"""
It returns the group contribution descriptor of a given molecule.
"""
return self.group_mat[ idx, :]
def get_2group_descriptors_kegg_id( self, idx2):
#print "DEBUG: idx2 -->", idx2
return self.group_mat[ idx2[0], :], self.group_mat[ idx2[1], :]
def kegg_id_list_to_2group_descriptors( self, kegg_id_list):
"""
It returns group contribution descriptors for subs and prods separately.
The return lists can be stored in the original csv file as additional information.
"""
# list of subs's group descriptors, a list of prod's group descriptors
subs_gd_list = list()
prod_gd_list = list()
no_cids_list = list()
for kegg_id in kegg_id_list:
idx2 = self.get_2indices_kegg_id( kegg_id)
if -1 in idx2:
no_cids_list.append( True)
subs_gd_list.append( None)
prod_gd_list.append( None)
else:
no_cids_list.append( False)
gd2_list = self.get_2group_descriptors_kegg_id( idx2)
subs_gd_list.append( gd2_list[0])
prod_gd_list.append( gd2_list[1])
return subs_gd_list, prod_gd_list, no_cids_list
class ML_GroupContr( GroupContr):
"""
This class performs machine learning based on group contribution.
"""
def __init__(self, fname_csv = 'sheet/Redox_ForJames_TPSS0_2015_11_04_comma.csv',
fname_mat = 'sheet/kegg_group_decomposition.mat', disp = False):
GroupContr.__init__( self, fname_mat, disp)
self.pdr = pd.read_csv( fname_csv)
def k2d( self, save_fname = None):
"""
It translate kegg_id_list to descriptors
"""
xM_subs, xM_prod, no_csid_list = self.kegg_id_list_to_2group_descriptors( self.pdr.KEGG_ID)
self.pdw = self.pdr.copy()
self.pdw[ 'xM_subs'] = xM_subs
self.pdw[ 'xM_prod'] = xM_prod
self.pdw[ 'no_csid'] = no_csid_list
if save_fname != None:
"""
If filename is given as an argument, the result data
will be stored together with the original data.
"""
self.pdw.to_csv( save_fname, index = False)
return self.pdw
def k2d_valid( self, save_fname = None):
self.k2d()
self.pdw_valid = self.pdw[ self.pdw[ 'no_csid'] == False]
if save_fname != None:
"""
If filename is given as an argument, the result data
will be stored together with the original data.
"""
self.pdw_valid.to_csv( save_fname, index = False)
return self.pdw_valid
def get_xM_yV(self, a_pdw_valid):
"""
From string arrays, original arrays will be recovered.
"""
#xM_subs_list = [ eval(x) for x in a_pdw_valid[ 'xM_subs'].tolist()]
xM_subs_list = a_pdw_valid[ 'xM_subs'].tolist()
xM_subs = np.mat( xM_subs_list)
#xM_prod_list = [ eval(x) for x in a_pdw_valid[ 'xM_prod'].tolist()]
xM_prod_list = a_pdw_valid[ 'xM_prod'].tolist()
xM_prod = np.mat( xM_prod_list)
xM = np.concatenate( [xM_subs, xM_prod], axis = 1)
if self.disp:
print 'xM_subs.shape, xM_prod.shape, xM.shape =', xM_subs.shape, xM_prod.shape, xM.shape
yV = np.mat( a_pdw_valid[ 'Em']).T
return xM, yV
def get_sM_yV(self, a_pdw_valid):
"""
sM is xM_prod - xM_subs
"""
#xM_subs_list = [ eval(x) for x in a_pdw_valid[ 'xM_subs'].tolist()]
xM_subs_list = a_pdw_valid[ 'xM_subs'].tolist()
xM_subs = np.mat( xM_subs_list)
#xM_prod_list = [ eval(x) for x in a_pdw_valid[ 'xM_prod'].tolist()]
xM_prod_list = a_pdw_valid[ 'xM_prod'].tolist()
xM_prod = np.mat( xM_prod_list)
sM = xM_prod - xM_subs
if self.disp:
print 'xM_subs.shape, xM_prod.shape, xM.shape =', xM_subs.shape, xM_prod.shape, sM.shape
yV = np.mat( a_pdw_valid[ 'Em']).T
return sM, yV
#######################
# Regression codes
#######################
import jutil
from sklearn import linear_model, cross_validation
import matplotlib.pyplot as plt
def plot_per_type( pdr, E_QC = "E_QC", Em = "Em", type_name = "Type", type_l = [1,2,3,4]):
"""
plot data for each group with different color
pdr: pandas dataframe
type_id: name of the type column
type_l: list of illustration types
"""
for type_id in type_l:
pdr_new = pdr[ pdr[ type_name] == type_id]
xM = np.mat( pdr_new[ E_QC].values).T
print xM.shape
yV = np.mat( pdr_new[ Em].values).T
print yV.shape
plt.plot( yV, xM, '.', label = "Type={}".format(type_id))
plt.xlabel('Experiment')
plt.ylabel('Estimation')
plt.legend(loc=2)
def single_regress( pdr, E_QC = "E_QC", Em = "Em"):
xM_all = np.mat( pdr[E_QC].values).T
print xM_all.shape
yV_all = np.mat( pdr[Em].values).T
print yV_all.shape
print "No regression case"
plt.figure()
jutil.regress_show3( yV_all, xM_all)
plt.show()
print "Simple regression case"
plt.figure()
jutil.mlr3( xM_all, yV_all)
plt.show()
def single_regress4( pdr, E_QC = "E_QC", Em = "Em", disp = True, graph = True):
xM_all = np.mat( pdr[E_QC].values).T
print xM_all.shape
yV_all = np.mat( pdr[Em].values).T
print yV_all.shape
print "No regression case"
#plt.figure()
jutil.regress_show4( yV_all, xM_all, disp = disp, graph = graph)
#plt.show()
print "Simple regression case"
#plt.figure()
jutil.mlr4_coef( xM_all, yV_all, disp = disp, graph = graph)
#plt.show()
def sep_noregress( pdr, E_QC = "E_QC", Em = "Em", type_name = "Type", type_l = [1,2,3,4]):
for type_id in type_l:
print "Type", type_id
pdr_new = pdr[ pdr[ type_name] == type_id]
xM = np.mat( pdr_new[E_QC].values).T
print xM.shape
yV = np.mat( pdr_new[Em].values).T
print yV.shape
jutil.regress_show3( yV, xM)
def sep_noregress4( pdr, E_QC = "E_QC", Em = "Em", type_name = "Type", type_l = [1,2,3,4]):
for type_id in type_l:
print "Type", type_id
pdr_new = pdr[ pdr[ type_name] == type_id]
xM = np.mat( pdr_new[E_QC].values).T
print xM.shape
yV = np.mat( pdr_new[Em].values).T
print yV.shape
jutil.regress_show4( yV, xM)
def sep_regress4( pdr, E_QC = "E_QC", Em = "Em", type_name = "Type", type_l = [1,2,3,4]):
# Select interesting parts only
#pdr = pdr_org.query( "Type in {}".format( type_id_l))
for type_id in type_l:
print "Type", type_id
pdr_new = pdr[ pdr[type_name] == type_id]
xM = np.mat( pdr_new[E_QC].values).T
print xM.shape
yV = np.mat( pdr_new[Em].values).T
print yV.shape
jutil.mlr4_coef( xM, yV)
def sep_regress_merge( pdr, E_QC = "E_QC", Em = "Em", type_name = "Type", type_l = [1,2,3,4]):
# Select interesting parts only
# pdr = pdr_org.query( "Type in {}".format( type_id_l))
yV_pred_all_list = list()
for type_id in type_l:
print "Type", type_id
pdr_new = pdr[ pdr[ type_name] == type_id]
xM = np.mat( pdr_new[E_QC].values).T
print xM.shape
yV = np.mat( pdr_new[Em].values).T
print yV.shape
clf = linear_model.LinearRegression()
clf.fit( xM, yV)
yV_pred = clf.predict( xM).ravel()
jutil.regress_show3( yV, yV_pred)
yV_pred_all_list.append( yV_pred)
print "Merging"
yV_pred_all = np.mat( np.concatenate( yV_pred_all_list, axis = 0)).T
yV_all = np.mat( pdr[Em].values).T
plt.figure()
jutil.regress_show3( yV_all, yV_pred_all)
plt.show()
plt.figure()
jutil.mlr3( yV_pred_all, yV_all)
plt.show()
def sep_regress_merge4( pdr, E_QC = "E_QC", Em = "Em", type_name = "Type", type_l = [1,2,3,4]):
# Select interesting parts only
# pdr = pdr_org.query( "Type in {}".format( type_id_l))
yV_pred_all_list = list()
for type_id in type_l:
print "Type", type_id
pdr_new = pdr[ pdr[ type_name] == type_id]
xM = np.mat( pdr_new[E_QC].values).T
print xM.shape
yV = np.mat( pdr_new[Em].values).T
print yV.shape
clf = linear_model.LinearRegression()
clf.fit( xM, yV)
yV_pred = clf.predict( xM).ravel()
jutil.regress_show4( yV, yV_pred)
yV_pred_all_list.append( yV_pred)
print "Merging"
yV_pred_all = np.mat( np.concatenate( yV_pred_all_list, axis = 0)).T
yV_all = np.mat( pdr[Em].values).T
plt.figure()
jutil.regress_show4( yV_all, yV_pred_all)
plt.show()
plt.figure()
jutil.mlr4_coef( yV_pred_all, yV_all)
plt.show()
def emul_get( pdr, E_QC = "E_QC", Em = "Em", type_name = "Type", type_l = [1,2,3,4]):
"""
New descirptor = [x, 0, 0 ,0] for type_1, vice versa.
"""
xM_l = list()
for ix, type_id in enumerate(type_l):
#print "Type[{0}] -> {1}".format( ix, type_id)
pdr_new = pdr[ pdr[ type_name] == type_id]
x = pdr_new[E_QC].values
x_ext = np.zeros( (len( type_l)*2, x.shape[0]), dtype = float)
x_ext[ix][:] = x
x_ext[len( type_l)+ix][:] = np.ones( x.shape[0], dtype = float)
xM_l.append( x_ext)
xM = np.mat( np.concatenate( xM_l, axis = 1)).T
yV = np.mat( pdr[Em].values).T
return xM, yV
def emul_regress_merge4( pdr, E_QC = "E_QC", Em = "Em", type_name = "Type", type_l = [1,2,3,4]):
"""
New descirptor = [x, 0, 0 ,0] for type_1, vice versa.
"""
xM, yV = emul_get( pdr, E_QC = E_QC, Em = Em, type_name = type_name, type_l = type_l)
jutil.mlr4_coef( xM, yV)
def _emul_regress_merge4( pdr, E_QC = "E_QC", Em = "Em", type_name = "Type", type_l = [1,2,3,4]):
"""
New descirptor = [x, 0, 0 ,0] for type_1, vice versa.
"""
xM_l = list()
for ix, type_id in enumerate(type_l):
print "Type[{0}] -> {1}".format( ix, type_id)
pdr_new = pdr[ pdr[ type_name] == type_id]
x = pdr_new[E_QC].values
x_ext = np.zeros( (len( type_l)*2, x.shape[0]), dtype = float)
x_ext[ix][:] = x
x_ext[len( type_l)+ix][:] = np.ones( x.shape[0], dtype = float)
xM_l.append( x_ext)
xM = np.mat( np.concatenate( xM_l, axis = 1)).T
yV = np.mat( pdr[Em].values).T
jutil.mlr4_coef( xM, yV)
def hybrid_get( pdr, E_QC = "E_QC", Em = "Em", type_name = "Type", type_l = [1,2,3,4]):
"""
It generates hybrid descriptors. One additional descriptors for all x values regardless of
type. Using this, overfitting can be reduced.
"""
xM_l = list()
for ix, type_id in enumerate(type_l):
# print "Type[{0}] -> {1}".format( ix, type_id)
pdr_new = pdr[ pdr[ type_name] == type_id]
x = pdr_new[E_QC].values
x_ext = np.zeros( (len( type_l)*2+1, x.shape[0]), dtype = float)
x_ext[ix][:] = x
x_ext[len( type_l)+ix][:] = np.ones( x.shape[0], dtype = float)
x_ext[len( type_l)*2][:] = x
xM_l.append( x_ext)
xM = np.mat( np.concatenate( xM_l, axis = 1)).T
yV = np.mat( pdr[Em].values).T
return xM, yV
def hybrid_regress_merge4( pdr, E_QC = "E_QC", Em = "Em", type_name = "Type", type_l = [1,2,3,4]):
"""
A new descriptor is a sub function which is hybrid_get().
"""
xM, yV = hybrid_get( pdr, E_QC = E_QC, Em = Em, type_name = type_name, type_l = type_l)
jutil.mlr4_coef( xM, yV)
def _hybrid_regress_merge4( pdr, E_QC = "E_QC", Em = "Em", type_name = "Type", type_l = [1,2,3,4]):
"""
New descirptor = [x, 0, 0 ,0] for type_1, vice versa.
"""
xM_l = list()
for ix, type_id in enumerate(type_l):
print "Type[{0}] -> {1}".format( ix, type_id)
pdr_new = pdr[ pdr[ type_name] == type_id]
x = pdr_new[E_QC].values
x_ext = np.zeros( (len( type_l)*2+1, x.shape[0]), dtype = float)
x_ext[ix][:] = x
x_ext[len( type_l)+ix][:] = np.ones( x.shape[0], dtype = float)
x_ext[len( type_l)*2][:] = x
xM_l.append( x_ext)
xM = np.mat( np.concatenate( xM_l, axis = 1)).T
yV = np.mat( pdr[Em].values).T
jutil.mlr4_coef( xM, yV)
def cv_train_test( xMa, yVa, tr, ts):
"""
Regression and test is performed for given data
with cross-validation streams
"""
xM = xMa[ tr, :]
yV = yVa[ tr, 0]
clf = linear_model.LinearRegression()
clf.fit( xM, yV)
# The testing information is extracted.
xM_test = xMa[ ts, :]
yV_test = yVa[ ts, 0]
return yV_test.A1, clf.predict( xM_test).ravel()
class Cv_sep_regress( object):
def __init__( self, pdr, E_QC = "E_QC", Em = "Em", type_name = "Type", type_l = [1,2,3,4],
disp = False, graph = False):
# This parameter will be used in the run() function.
self.type_l = type_l
self.disp = disp
self.graph = graph
self.xMa = {}
self.yVa = {}
# self.kfa = {}
for type_id in type_l:
pdr_new = pdr[ pdr[ type_name] == type_id]
self.xMa[type_id] = np.mat( pdr_new[ E_QC].values).T
self.yVa[type_id] = np.mat( pdr_new[ Em].values).T
# kfa[type_id] = cross_validation.KFold( np.shape(yVa[type_id])[0], n_folds=5, shuffle=True)
def run(self, n_folds = 5):
# Without initializatoin just before for-loop, the list is stacked continously.
yV_test_all_list = list()
yV_pred_all_list = list()
kfa = {}
kfa_tr = {}
kfa_ts = {}
for type_id in self.type_l:
kfa[type_id] = cross_validation.KFold( np.shape( self.yVa[type_id])[0],
n_folds = n_folds, shuffle=True)
kfa_tr[type_id] = list()
kfa_ts[type_id] = list()
for tr, ts in kfa[ type_id]:
kfa_tr[type_id].append( tr)
kfa_ts[type_id].append( ts)
r2_l, RMSE_l, MAE_l = list(), list(), list()
for kf_id in range( n_folds):
yV_test_all_list = list()
yV_pred_all_list = list()
for type_id in self.type_l:
tr = kfa_tr[ type_id][ kf_id]
ts = kfa_ts[ type_id][ kf_id]
xM = self.xMa[ type_id]
yV = self.yVa[ type_id]
yV_test, yV_pred = cv_train_test( xM, yV, tr, ts)
yV_test_all_list.append( yV_test)
yV_pred_all_list.append( yV_pred)
yV_test_all = np.mat( np.concatenate( yV_test_all_list, axis = 0)).T
yV_pred_all = np.mat( np.concatenate( yV_pred_all_list, axis = 0)).T
r2, RMSE, MAE = jutil.regress_show3( yV_test_all, yV_pred_all, disp = self.disp, graph = self.graph)
r2_l.append( r2), RMSE_l.append( RMSE), MAE_l.append( MAE)
if self.disp:
print 'Mean and Std of R2 are', np.mean( r2_l), np.std( r2_l)
print 'Mean and Std of RMSE are', np.mean( RMSE_l), np.std( RMSE_l)
print 'Mean and Std of MAE are', np.mean( MAE_l), np.std( MAE_l)
return r2_l, RMSE_l, MAE_l
def run4(self, n_folds = 5):
# Without initializatoin just before for-loop, the list is stacked continously.
yV_test_all_list = list()
yV_pred_all_list = list()
kfa = {}
kfa_tr = {}
kfa_ts = {}
for type_id in self.type_l:
kfa[type_id] = cross_validation.KFold( np.shape( self.yVa[type_id])[0],
n_folds = n_folds, shuffle=True)
kfa_tr[type_id] = list()
kfa_ts[type_id] = list()
for tr, ts in kfa[ type_id]:
kfa_tr[type_id].append( tr)
kfa_ts[type_id].append( ts)
r2_l, RMSE_l, MAE_l, DAE_l = list(), list(), list(), list()
for kf_id in range( n_folds):
yV_test_all_list = list()
yV_pred_all_list = list()
for type_id in self.type_l:
tr = kfa_tr[ type_id][ kf_id]
ts = kfa_ts[ type_id][ kf_id]
xM = self.xMa[ type_id]
yV = self.yVa[ type_id]
yV_test, yV_pred = cv_train_test( xM, yV, tr, ts)
yV_test_all_list.append( yV_test)
yV_pred_all_list.append( yV_pred)
yV_test_all = np.mat( np.concatenate( yV_test_all_list, axis = 0)).T
yV_pred_all = np.mat( np.concatenate( yV_pred_all_list, axis = 0)).T
r2, RMSE, MAE, DAE = jutil.regress_show4( yV_test_all, yV_pred_all, disp = self.disp, graph = self.graph)
r2_l.append( r2), RMSE_l.append( RMSE), MAE_l.append( MAE), DAE_l.append( DAE)
if self.disp:
print 'Mean and Std of R2 are', np.mean( r2_l), np.std( r2_l)
print 'Mean and Std of RMSE are', np.mean( RMSE_l), np.std( RMSE_l)
print 'Mean and Std of MAE are', np.mean( MAE_l), np.std( MAE_l)
print 'Mean and Std of DAE are', np.mean( DAE_l), np.std( DAE_l)
return r2_l, RMSE_l, MAE_l, DAE_l
def run_id(self, n_folds = 5):
# Without initializatoin just before for-loop, the list is stacked continously.
yV_test_all_list = list()
yV_pred_all_list = list()
kfa = {}
kfa_tr = {}
kfa_ts = {}
r2_id_d, RMSE_id_d, MAE_id_d = {}, {}, {}
yV_test_d, yV_pred_d = {}, {}
for type_id in self.type_l:
kfa[type_id] = cross_validation.KFold( np.shape( self.yVa[type_id])[0],
n_folds = n_folds, shuffle=True)
kfa_tr[type_id] = list()
kfa_ts[type_id] = list()
for tr, ts in kfa[ type_id]:
kfa_tr[type_id].append( tr)
kfa_ts[type_id].append( ts)
for x in [yV_test_d, yV_pred_d, r2_id_d, RMSE_id_d, MAE_id_d]:
x[type_id] = list()
r2_l, RMSE_l, MAE_l = list(), list(), list()
for kf_id in range( n_folds):
yV_test_all_list = list()
yV_pred_all_list = list()
for type_id in self.type_l:
tr = kfa_tr[ type_id][ kf_id]
ts = kfa_ts[ type_id][ kf_id]
xM = self.xMa[ type_id]
yV = self.yVa[ type_id]
yV_test, yV_pred = cv_train_test( xM, yV, tr, ts)
yV_test_all_list.append( yV_test)
yV_pred_all_list.append( yV_pred)
yV_test_d[type_id].append( yV_test)
yV_pred_d[type_id].append( yV_pred)
yV_test_all = np.mat( np.concatenate( yV_test_all_list, axis = 0)).T
yV_pred_all = np.mat( np.concatenate( yV_pred_all_list, axis = 0)).T
r2, RMSE, MAE = jutil.regress_show3( yV_test_all, yV_pred_all, disp = self.disp, graph = self.graph)
r2_l.append( r2), RMSE_l.append( RMSE), MAE_l.append( MAE)
for type_id in self.type_l:
for kf_id in range( n_folds):
r2, RMSE, MAE = jutil.regress_show3( yV_test_d[type_id][kf_id], yV_pred_d[type_id][kf_id],
disp = self.disp, graph = self.graph)
r2_id_d[type_id].append( r2)
RMSE_id_d[type_id].append( RMSE)
MAE_id_d[type_id].append( MAE)
if self.disp:
print 'Mean and Std of R2 are', np.mean( r2_l), np.std( r2_l)
print 'Mean and Std of RMSE are', np.mean( RMSE_l), np.std( RMSE_l)
print 'Mean and Std of MAE are', np.mean( MAE_l), np.std( MAE_l)
return (r2_l, RMSE_l, MAE_l), (r2_id_d, RMSE_id_d, MAE_id_d)
def run4_id(self, n_folds = 5):
# Without initializatoin just before for-loop, the list is stacked continously.
yV_test_all_list = list()
yV_pred_all_list = list()
kfa = {}
kfa_tr = {}
kfa_ts = {}
r2_id_d, RMSE_id_d, MAE_id_d, DAE_id_d = {}, {}, {}, {}
yV_test_d, yV_pred_d = {}, {}
for type_id in self.type_l:
kfa[type_id] = cross_validation.KFold( np.shape( self.yVa[type_id])[0],
n_folds = n_folds, shuffle=True)
kfa_tr[type_id] = list()
kfa_ts[type_id] = list()
for tr, ts in kfa[ type_id]:
kfa_tr[type_id].append( tr)
kfa_ts[type_id].append( ts)
for x in [yV_test_d, yV_pred_d, r2_id_d, RMSE_id_d, MAE_id_d, DAE_id_d]:
x[type_id] = list()
r2_l, RMSE_l, MAE_l, DAE_l = list(), list(), list(), list()
for kf_id in range( n_folds):
yV_test_all_list = list()
yV_pred_all_list = list()
for type_id in self.type_l:
tr = kfa_tr[ type_id][ kf_id]
ts = kfa_ts[ type_id][ kf_id]
xM = self.xMa[ type_id]
yV = self.yVa[ type_id]
yV_test, yV_pred = cv_train_test( xM, yV, tr, ts)
yV_test_all_list.append( yV_test)
yV_pred_all_list.append( yV_pred)
yV_test_d[type_id].append( yV_test)
yV_pred_d[type_id].append( yV_pred)
yV_test_all = np.mat( np.concatenate( yV_test_all_list, axis = 0)).T
yV_pred_all = np.mat( np.concatenate( yV_pred_all_list, axis = 0)).T
r2, RMSE, MAE, DAE = jutil.regress_show4( yV_test_all, yV_pred_all, disp = self.disp, graph = self.graph)
r2_l.append( r2), RMSE_l.append( RMSE), MAE_l.append( MAE), DAE_l.append( DAE)
for type_id in self.type_l:
for kf_id in range( n_folds):
r2, RMSE, MAE, DAE = jutil.regress_show4( yV_test_d[type_id][kf_id], yV_pred_d[type_id][kf_id],
disp = self.disp, graph = self.graph)
r2_id_d[type_id].append( r2)
RMSE_id_d[type_id].append( RMSE)
MAE_id_d[type_id].append( MAE)
DAE_id_d[type_id].append( DAE)
if self.disp:
print 'Mean and Std of R2 are', np.mean( r2_l), np.std( r2_l)
print 'Mean and Std of RMSE are', np.mean( RMSE_l), np.std( RMSE_l)
print 'Mean and Std of MAE are', np.mean( MAE_l), np.std( MAE_l)
print 'Mean and Std of DAE are', np.mean( DAE_l), np.std( DAE_l)
return (r2_l, RMSE_l, MAE_l, DAE_l), (r2_id_d, RMSE_id_d, MAE_id_d, DAE_id_d)
def run_iter(self, Niter = 10, n_folds = 5):
r2_ll, RMSE_ll, MAE_ll = list(), list(), list()
for ii in range( Niter):
r2_l, RMSE_l, MAE_l = self.run( n_folds)
r2_ll.extend( r2_l), RMSE_ll.extend( RMSE_l), MAE_ll.extend( MAE_l)
if self.disp:
print 'Mean and Std of R2 with 10 times 5-fold CV are', np.mean( r2_ll), np.std( r2_ll)
print 'Mean and Std of RMSE with 10 times 5-fold CV are', np.mean( RMSE_ll), np.std( RMSE_ll)
print 'Mean and Std of MAE with 10 times 5-fold CV are', np.mean( MAE_ll), np.std( MAE_ll)
pdw = pd.DataFrame()
pdw['Measure'] = ['R2', 'R2', 'RMSE', 'RMSE', 'MAE', 'MAE']
pdw['Mode'] = ['Mean', 'STD'] * 3
pdw['Value'] = [np.mean( r2_ll), np.std( r2_ll),
np.mean( RMSE_ll), np.std( RMSE_ll),
np.mean( MAE_ll), np.std( MAE_ll)]
return pdw
def run4_iter(self, Niter = 10, n_folds = 5):
r2_ll, RMSE_ll, MAE_ll, DAE_ll = list(), list(), list(), list()
for ii in range( Niter):
r2_l, RMSE_l, MAE_l, DAE_l = self.run4( n_folds)
r2_ll.extend( r2_l), RMSE_ll.extend( RMSE_l), MAE_ll.extend( MAE_l), DAE_ll.extend( DAE_l)
if self.disp:
print 'Mean and Std of R2 with 10 times 5-fold CV are', np.mean( r2_ll), np.std( r2_ll)
print 'Mean and Std of RMSE with 10 times 5-fold CV are', np.mean( RMSE_ll), np.std( RMSE_ll)
print 'Mean and Std of MAE with 10 times 5-fold CV are', np.mean( MAE_ll), np.std( MAE_ll)
print 'Mean and Std of DAE with 10 times 5-fold CV are', np.mean( DAE_ll), np.std( DAE_ll)
pdw = pd.DataFrame()
pdw['Measure'] = ['R2', 'R2', 'RMSE', 'RMSE', 'MAE', 'MAE', 'DAE', 'DAE']
pdw['Mode'] = ['Mean', 'STD'] * 4
pdw['Value'] = [np.mean( r2_ll), np.std( r2_ll),
np.mean( RMSE_ll), np.std( RMSE_ll),
np.mean( MAE_ll), np.std( MAE_ll),
np.mean( DAE_ll), np.std( DAE_ll)]
return pdw
def run_id_iter(self, Niter = 10, n_folds = 5):
r2_ll, RMSE_ll, MAE_ll = list(), list(), list()
r2_lld, RMSE_lld, MAE_lld = {}, {}, {}
for type_id in self.type_l:
r2_lld[type_id] = list()
RMSE_lld[type_id] = list()
MAE_lld[type_id] = list()
for ii in range( Niter):
(r2_l, RMSE_l, MAE_l), (r2_ld, RMSE_ld, MAE_ld) = self.run_id( n_folds)
# Processing for merged results
r2_ll.extend( r2_l), RMSE_ll.extend( RMSE_l), MAE_ll.extend( MAE_l)
# Processing for each result
for type_id in self.type_l:
r2_lld[type_id].extend( r2_ld[type_id])
RMSE_lld[type_id].extend( RMSE_ld[type_id])
MAE_lld[type_id].extend( MAE_ld[type_id])
if self.disp:
print 'I. Merged results:'
print 'Mean and Std of R2 with 10 times 5-fold CV are', np.mean( r2_ll), np.std( r2_ll)
print 'Mean and Std of RMSE with 10 times 5-fold CV are', np.mean( RMSE_ll), np.std( RMSE_ll)
print 'Mean and Std of MAE with 10 times 5-fold CV are', np.mean( MAE_ll), np.std( MAE_ll)
print 'II. Each results:'
for type_id in self.type_l:
print "Type", type_id
print 'Mean and Std of R2 with 10 times 5-fold CV are', np.mean( r2_ld[ type_id]), np.std( r2_ld[type_id])
print 'Mean and Std of RMSE with 10 times 5-fold CV are', np.mean( RMSE_ld[ type_id]), np.std( RMSE_ld[type_id])
print 'Mean and Std of MAE with 10 times 5-fold CV are', np.mean( MAE_ld[type_id]), np.std( MAE_ld[type_id])
pdw_l = []
pdw = pd.DataFrame()
pdw['Measure'] = ['R2', 'R2', 'RMSE', 'RMSE', 'MAE', 'MAE']
pdw['Mode'] = ['Mean', 'STD'] * 3
pdw['Value'] = [np.mean( r2_ll), np.std( r2_ll), np.mean( RMSE_ll), np.std( RMSE_ll), np.mean( MAE_ll), np.std( MAE_ll)]
pdw['Type'] = [ "Merged"] * 6 # checking whether different types can be included in a column vector.
pdw_l.append( pdw)
for type_id in self.type_l:
pdw = pd.DataFrame()
pdw['Measure'] = ['R2', 'R2', 'RMSE', 'RMSE', 'MAE', 'MAE']
pdw['Mode'] = ['Mean', 'STD'] * 3
pdw['Value'] = [np.mean( r2_ld[ type_id]), np.std( r2_ld[ type_id]),
np.mean( RMSE_ld[ type_id]), np.std( RMSE_ld[ type_id]),
np.mean( MAE_ld[ type_id]), np.std( MAE_ld[ type_id])]
pdw['Type'] = [ type_id] * 6
pdw_l.append( pdw)
return pd.concat( pdw_l)
def run4_id_iter(self, Niter = 10, n_folds = 5):
r2_ll, RMSE_ll, MAE_ll, DAE_ll = list(), list(), list(), list()
r2_lld, RMSE_lld, MAE_lld, DAE_lld = {}, {}, {}, {}
for type_id in self.type_l:
r2_lld[type_id] = list()
RMSE_lld[type_id] = list()
MAE_lld[type_id] = list()
DAE_lld[type_id] = list()
for ii in range( Niter):
(r2_l, RMSE_l, MAE_l, DAE_l), (r2_ld, RMSE_ld, MAE_ld, DAE_ld) = self.run4_id( n_folds)
# Processing for merged results
r2_ll.extend( r2_l), RMSE_ll.extend( RMSE_l), MAE_ll.extend( MAE_l), DAE_ll.extend( DAE_l)
# Processing for each result
for type_id in self.type_l:
r2_lld[type_id].extend( r2_ld[type_id])
RMSE_lld[type_id].extend( RMSE_ld[type_id])
MAE_lld[type_id].extend( MAE_ld[type_id])
DAE_lld[type_id].extend( DAE_ld[type_id])
if self.disp:
print 'I. Merged results:'
print 'Mean and Std of R2 with 10 times 5-fold CV are', np.mean( r2_ll), np.std( r2_ll)
print 'Mean and Std of RMSE with 10 times 5-fold CV are', np.mean( RMSE_ll), np.std( RMSE_ll)
print 'Mean and Std of MAE with 10 times 5-fold CV are', np.mean( MAE_ll), np.std( MAE_ll)
print 'Mean and Std of MAE with 10 times 5-fold CV are', np.mean( DAE_ll), np.std( DAE_ll)
print 'II. Each results:'
for type_id in self.type_l:
print "Type", type_id
print 'Mean and Std of R2 with 10 times 5-fold CV are', np.mean( r2_ld[ type_id]), np.std( r2_ld[type_id])
print 'Mean and Std of RMSE with 10 times 5-fold CV are', np.mean( RMSE_ld[ type_id]), np.std( RMSE_ld[type_id])
print 'Mean and Std of MAE with 10 times 5-fold CV are', np.mean( MAE_ld[type_id]), np.std( MAE_ld[type_id])
print 'Mean and Std of DAE with 10 times 5-fold CV are', np.mean( DAE_ld[type_id]), np.std( DAE_ld[type_id])
pdw_l = []
pdw = pd.DataFrame()
pdw['Measure'] = ['R2', 'R2', 'RMSE', 'RMSE', 'MAE', 'MAE', 'DAE', 'DAE']
pdw['Mode'] = ['Mean', 'STD'] * 4
pdw['Value'] = [np.mean( r2_ll), np.std( r2_ll),
np.mean( RMSE_ll), np.std( RMSE_ll),
np.mean( MAE_ll), np.std( MAE_ll),
np.mean( DAE_ll), np.std( DAE_ll)]
pdw['Type'] = [ "Merged"] * 8 # checking whether different types can be included in a column vector.
pdw_l.append( pdw)
for type_id in self.type_l:
pdw = pd.DataFrame()
pdw['Measure'] = ['R2', 'R2', 'RMSE', 'RMSE', 'MAE', 'MAE', 'DAE', 'DAE']
pdw['Mode'] = ['Mean', 'STD'] * 4
pdw['Value'] = [np.mean( r2_ld[ type_id]), np.std( r2_ld[ type_id]),
np.mean( RMSE_ld[ type_id]), np.std( RMSE_ld[ type_id]),
np.mean( MAE_ld[ type_id]), np.std( MAE_ld[ type_id]),
np.mean( DAE_ld[ type_id]), np.std( DAE_ld[ type_id])]
pdw['Type'] = [ type_id] * 8
pdw_l.append( pdw)
return pd.concat( pdw_l)
def opt_func(w, X,y):
U = np.linalg.norm(X.dot(w) - y)
return U
def coefNormalized(w):
return sum(w)-1
class Becca( object):
def __init__(self, X_shape1, opt_func, coefNormalized):
self.init_cond = np.ones(X_shape1)/X_shape1
self.cons = []
self.cons.append({'type': 'eq', 'fun': coefNormalized })
self.bounds = []
for i in range(0,len(self.init_cond)):
self.bounds.append((0,100))
self.opt_func = opt_func
self.coefNormalized = coefNormalized
def run(self, X, Gr):
#Optimize ensemble
kf = cross_validation.KFold(Gr.shape[0], n_folds=5)
errors = []
coeffs = []
for train_index, test_index in kf:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = Gr[train_index], Gr[test_index]
res = minimize(self.opt_func, self.init_cond, args=(X_train, y_train),
method='SLSQP',
constraints = self.cons, bounds = self.bounds)
error = np.abs(X_test.dot(res['x'])-y_test)
errors.append(error)
coeff = res['x']
coeffs.append(coeff)
print "Average Mean-AE:", np.mean(np.mean(errors,axis=1))
print "Average Std-AE:", np.mean(np.std(errors, axis = 1))
print "Average Median-AE:", np.mean(np.median(errors, axis = 1))
return errors, coeffs
def alphaNormalized(w):
alphas= w[::2]
return sum(alphas)-1
def betaNormalized(w):
betas= w[1::2]
return sum(betas)-1
class Becca_half( object):
def __init__(self, X_shape1, opt_func, alphaNormalized, betaNormalized):
nhalf = X_shape1/2
self.init_cond = np.ones(X_shape1)/nhalf
# Add constraints
self.cons = []
self.cons.append({'type': 'eq', 'fun': alphaNormalized })
self.cons.append({'type': 'eq', 'fun': betaNormalized })
self.bounds = []
for i in range(0,len(self.init_cond)):
self.bounds.append((0,100))
self.opt_func = opt_func
self.coefNormalized = coefNormalized
def run(self, X, Gr):
#Optimize ensemble
kf = cross_validation.KFold(Gr.shape[0], n_folds= 5)
errors = []
coeffs = []
for train_index, test_index in kf:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = Gr[train_index], Gr[test_index]
res = minimize(self.opt_func, self.init_cond, args=(X_train, y_train),
method='SLSQP',
constraints = self.cons, bounds = self.bounds)
print res
error = np.abs(X_test.dot(res['x'])-y_test)
errors.append(error)
coeff = res['x']
coeffs.append(coeff)
print "Average Mean-AE:", np.mean(np.mean(errors,axis=1))
print "Average Std-AE:", np.mean(np.std(errors, axis = 1))
print "Average Median-AE:", np.mean(np.median(errors, axis = 1))
return errors, coeffs
def run_base(self, X, Gr):
#Optimize ensemble
kf = cross_validation.KFold(Gr.shape[0], n_folds= 5)
errors = []
coeffs = []
for train_index, test_index in kf:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = Gr[train_index], Gr[test_index]
res = {}
res['x'] = np.array( [0, 0, 1, 1.0]).T
error = np.abs(X_test.dot( res['x'])-y_test)
print error.shape
errors.append(error)
coeff = res['x']
coeffs.append(coeff)
print "Average Mean-AE:", np.mean(np.mean(errors,axis=1))
print "Average Std-AE:", np.mean(np.std(errors, axis = 1))
print "Average Median-AE:", np.mean(np.median(errors, axis = 1))
return errors, coeffs
# Becca codes
def convert_GtoE( G):
kcal_to_eV = 0.0433641
z = 2.0
V_to_mV = 1000
E = G * -kcal_to_eV * V_to_mV / z
return E
def opt_func(w, X,y):
U = np.linalg.norm(X.dot(w) - y)
return U
def coefNormalized(w):
return sum(w)-1
class Becca( object):
def __init__(self, X_shape1,
opt_func = opt_func,
coefNormalized = coefNormalized):
self.init_cond = np.ones(X_shape1)/X_shape1
self.cons = []
self.cons.append({'type': 'eq', 'fun': coefNormalized })
self.bounds = []
for i in range(0,len(self.init_cond)):
self.bounds.append((0,1))
self.opt_func = opt_func
self.coefNormalized = coefNormalized
def run(self, X, Gr):
#Optimize ensemble
kf = cross_validation.KFold(Gr.shape[0], n_folds=5)
errors = []
coeffs = []
for train_index, test_index in kf:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = Gr[train_index], Gr[test_index]
res = minimize(self.opt_func, self.init_cond, args=(X_train, y_train),
method='SLSQP',
constraints = self.cons, bounds = self.bounds)
error = np.abs(X_test.dot(res['x'])-y_test)
errors.append(error)
coeff = res['x']
coeffs.append(coeff)
print "Average Mean-AE:", np.mean(np.mean(errors,axis=1))
print "Average Std-AE:", np.mean(np.std(errors, axis = 1))
print "Average Median-AE:", np.mean(np.median(errors, axis = 1))
return errors, coeffs
def alphaNormalized(w):
alphas= w[::2]
return sum(alphas)-1
def betaNormalized(w):
betas= w[1::2]
return sum(betas)-1
class Becca_half( object):
def __init__(self, X_shape1,
opt_func = opt_func,
alphaNormalized = alphaNormalized,
betaNormalized = betaNormalized):
nhalf = X_shape1/2
self.init_cond = np.ones(X_shape1)/nhalf
# Add constraints
self.cons = []
self.cons.append({'type': 'eq', 'fun': alphaNormalized })
self.cons.append({'type': 'eq', 'fun': betaNormalized })
self.bounds = []
for i in range(0,len(self.init_cond)):
self.bounds.append((0,1))
self.opt_func = opt_func
self.coefNormalized = coefNormalized
def condition( self, on = False):
"""
Trun off conditions if on is False.
If on is True, trun on conditions.
"""
if on:
self.cons = self.cons_save
else:
self.cons_save = self.cons
self.cons = []
def run(self, X, Gr, Navg = 10, disp = False):
#Optimize ensemble
errors = []
coeffs = []
for it in range( Navg):
kf = cross_validation.KFold(Gr.shape[0], n_folds= 5, shuffle = True)
for train_index, test_index in kf:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = Gr[train_index], Gr[test_index]
res = minimize(self.opt_func, self.init_cond, args=(X_train, y_train),
method='SLSQP',
constraints = self.cons, bounds = self.bounds)
error = np.abs(X_test.dot(res['x'])-y_test)
if disp:
print res
print error.shape
errors.append(error)
coeff = res['x']
coeffs.append(coeff)
if disp:
print np.shape(errors)
# map() is used since this is not symetric list
# the number of inner-elements of each elements are not the same.
print "Average Mean-AE:", np.mean(map(np.mean, errors))
print "Average Std-AE:", np.mean(map(np.std, errors))
print "Average Median-AE:", np.mean(map(np.median, errors))
return errors, coeffs
def run_base(self, X, Gr):
#Optimize ensemble
kf = cross_validation.KFold(Gr.shape[0], n_folds= 5)
errors = []
coeffs = []
for train_index, test_index in kf:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = Gr[train_index], Gr[test_index]
res = {}
res['x'] = np.array( [0, 0, 1.0, 1.0]).T
error = np.abs(X_test.dot( res['x'])-y_test)
# print error.shape
errors.append(error)
coeff = res['x']
coeffs.append(coeff)
print "Average Mean-AE:", np.mean(np.mean(errors,axis=1))
print "Average Std-AE:", np.mean(np.std(errors, axis = 1))
print "Average Median-AE:", np.mean(np.median(errors, axis = 1))
return errors, coeffs
def run_alpha(self, X, Gr, alpha = convert_GtoE(1.0)):
"""
The results will be multiplied by alpha so that
the value will be equivalent to the corresponding target value.
"""
kf = cross_validation.KFold(Gr.shape[0], n_folds= 5)
errors = []
coeffs = []
for train_index, test_index in kf:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = Gr[train_index], Gr[test_index]
res = minimize(self.opt_func, self.init_cond, args=(X_train, y_train),
method='SLSQP',
constraints = self.cons, bounds = self.bounds)
error = np.abs(alpha * X_test.dot(res['x'])-y_test)
errors.append(error)
coeff = res['x']
coeffs.append(coeff)
print "Average Mean-AE:", np.mean(np.mean(errors,axis=1))
print "Average Std-AE:", np.mean(np.std(errors, axis = 1))
print "Average Median-AE:", np.mean(np.median(errors, axis = 1))
return errors, coeffs
def run_alpha_base(self, X, Gr, alpha = convert_GtoE(1.0)):
"""
The results will be multiplied by alpha so that
the value will be equivalent to the corresponding target value.
"""
kf = cross_validation.KFold(Gr.shape[0], n_folds= 5)
errors = []
coeffs = []
for train_index, test_index in kf:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = Gr[train_index], Gr[test_index]
res = {}
res['x'] = np.array( [0, 0, 1.0, 1.0]).T
error = np.abs(alpha * X_test.dot(res['x'])-y_test)
errors.append(error)
coeff = res['x']
coeffs.append(coeff)
print "Average Mean-AE:", np.mean(np.mean(errors,axis=1))
print "Average Std-AE:", np.mean(np.std(errors, axis = 1))
print "Average Median-AE:", np.mean(np.median(errors, axis = 1))
return errors, coeffs
def get_xM4_yV( pdr, Em_type = "Em_in"):
"""
read saved arrays in each element.
"""
xM4_str = pdr.xM4
xM4_l = [ eval( x) for x in xM4_str]
xM = np.mat( xM4_l)
yV = np.mat( pdr[Em_type]).T
return xM, yV
def get_xM2_yV( pdr, Em_type = "Em_in", mode = "B3LYP"):
xM4_str = pdr.xM4
xM4_l = [ eval( x) for x in xM4_str]
xM4 = np.mat( xM4_l)
if mode == "B3LYP":
xM2 = xM4[:,0:2]
elif mode == "TPSS0":
xM2 = xM4[:,2:4]
else:
raise ValueError("{} is not supported.".format( mode))
yV = np.mat( pdr[Em_type]).T
return xM2, yV
def _get_xM_yV_r0( pdr, Em_type = "Em_in", mode = "B3LYP"):
E_QC = "E_QC({})".format( mode)
xM = np.mat( pdr[E_QC]).T
yV = np.mat( pdr[Em_type]).T
return xM, yV
def get_xM_yV( pdr, Em_type = "Em_in", mode = "B3LYP"):
def get_xM_a( m):
E_QC = "E_QC({})".format( m)
xM_a = pdr[E_QC].values
return xM_a
if type(mode) == list:
xM = np.mat( map( get_xM_a, mode)).T
else:
xM = np.mat( get_xM_a( mode)).T
yV = np.mat( pdr[Em_type]).T
return xM, yV
class _Median_r0( object):
"""
This function will obtain all results for redox potential of metabolism.
Pandas frames are used for storing input and output.
The seprate results are obtained by different member functions.
"""
def __init__( self, input_od, more_in_od, disp = False):
"""
input_od is used all member functions
more_in_od is used only in initialization
"""
self.od = input_od
self.pdr = pd.read_csv( more_in_od["in_file"])
self.pdo = pd.DataFrame()
self.out_file = more_in_od['out_file']
self.types_l = list(set(self.pdr["Type"].tolist()))
print "All types are", self.types_l
# Define constant variables, which start by capital, in a class
# Hence, it can be used without input variable processing
self.Disp = disp
def get_xM_yV(self, pdr):
"""
Depending on an operation mode,
xM and yV are selected from the input dataframe.
"""
# Input ---------------------
# pdr = self.pdr
# ---------------------------
if self.od["H + S"][0] == True \
and self.od["QC Models (Family ID)"][0] == ["B3LYP", "TPSS0"]:
xM, yV = get_xM4_yV( pdr, "Em_in")
elif self.od["H + S"][0] == True \
and len(self.od["QC Models (Family ID)"][0]) == 1:
mode = self.od["QC Models (Family ID)"][0][0]
xM, yV = get_xM2_yV( pdr, "Em_in", mode = mode)
elif self.od["H + S"][0] == False \
and len(self.od["QC Models (Family ID)"][0]) == 1:
mode = self.od["QC Models (Family ID)"][0][0]
xM, yV = get_xM_yV( pdr, "Em_in", mode = mode)
return xM, yV
def each_noregress( self):
"""
Get median value for each group without regression.
"""
# Input ---------------------
pdr = self.pdr
# ---------------------------
pdo_frame = pd.DataFrame()
for type_id in self.types_l:
# If the base parameter is useful, use it.
od = self.od.copy() # inherent some values form a self variable
# xM4 terminology will not be used for generalization.
#xM, yV = jadrian.get_xM2_yV( pdr[ pdr.Type == type_id], "Em_in", mode = od['QC Models (Family ID)'][0][0])
xM, yV = self.get_xM_yV( pdr[ pdr.Type == type_id])
mdae = jgrid.mdae_no_regression( xM, yV, ldisp = self.Disp)
od['CV Mode'] = ['No regress']
od['Group(s)'] = [type_id]
od['AD: mean (MAD)'] = [ mdae]
od['AD: std'] = [ 0]
od['AD: vector'] = [[mdae]]
od['(coef_,intercept_)'] = ['t.b.d.']
# od['results'] = [yV.A1]
pdo_i = pd.DataFrame( od)
#print pdo_i['QC Models (Family ID)'][0]
#print pdo_i
pdo_frame = pdo_frame.append( pdo_i, ignore_index=True)
# Return output data using self variables ------------------
self._pdo_frame = pdo_frame
self.pdo = self.pdo.append( pdo_frame, ignore_index=True)
# ----------------------------------------------------------
def _each_r0( self):
"""
Get median value for each group.
These values will be used to generate global independent regression (ensemble).
"""
# Input ---------------------
pdr = self.pdr
# ---------------------------
pdo_frame = pd.DataFrame()
for type_id in [1,2,3,4]:
# If the base parameter is useful, use it.
od = self.od.copy() # inherent some values form a self variable
# xM4 terminology will not be used for generalization.
#xM, yV = jadrian.get_xM2_yV( pdr[ pdr.Type == type_id], "Em_in", mode = od['QC Models (Family ID)'][0][0])
xM, yV = self.get_xM_yV( pdr[ pdr.Type == type_id])
if type_id == 1:
o_d = jgrid.cv_LinearRegression_It( xM, yV, n_folds= xM.shape[0], scoring='median_absolute_error', N_it = 1, ldisp = self.Disp)
else:
o_d = jgrid.cv_LinearRegression_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
# saving results
# od = OrderedDict()
# od['QC Models (Family ID)'] = [["TPSS0", "B3LYP"]]
# od['H + S'] = [True]
# od['CV Mode'] = ['10*5KF']
# od['Em type'] = ['Chemical potential']
if type_id == 1:
od['CV Mode'] = ['LOO']
else:
od['CV Mode'] = ['10*5KF']
od['Group(s)'] = [type_id]
od['AD: mean (MAD)'] = [o_d['mean']]
od['AD: std'] = [o_d['std']]
od['AD: vector'] = [o_d['list']]
if disp or ldisp:
print "Type", type_id, ": len(vector) =", len( o_d['list'])
print "len(ci) =", len( o_d['ci'])
pdo_i = pd.DataFrame( od)
#print pdo_i['QC Models (Family ID)'][0]
#print pdo_i
pdo_frame = pdo_frame.append( pdo_i, ignore_index=True)
# Return output data using self variables ------------------
self._pdo_frame = pdo_frame
self.pdo = self.pdo.append( pdo_frame, ignore_index=True)
# ----------------------------------------------------------
def each( self):
"""
Get median value for each group.
These values will be used to generate global independent regression (ensemble).
"""
# Input ---------------------
pdr = self.pdr
# ---------------------------
pdo_frame = pd.DataFrame()
for type_id in self.types_l:
# If the base parameter is useful, use it.
od = self.od.copy() # inherent some values form a self variable
# xM4 terminology will not be used for generalization.
#xM, yV = jadrian.get_xM2_yV( pdr[ pdr.Type == type_id], "Em_in", mode = od['QC Models (Family ID)'][0][0])
xM, yV = self.get_xM_yV( pdr[ pdr.Type == type_id])
if type_id == 1:
o_d = jgrid.cv_LinearRegression_ci_It( xM, yV, n_folds= xM.shape[0], scoring='median_absolute_error', N_it = 1, ldisp = self.Disp)
else:
o_d = jgrid.cv_LinearRegression_ci_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
# saving results
# od = OrderedDict()
# od['QC Models (Family ID)'] = [["TPSS0", "B3LYP"]]
# od['H + S'] = [True]
# od['CV Mode'] = ['10*5KF']
# od['Em type'] = ['Chemical potential']
if type_id == 1:
od['CV Mode'] = ['LOO']
else:
od['CV Mode'] = ['10*5KF']
od['Group(s)'] = [type_id]
od['AD: mean (MAD)'] = [o_d['mean']]
od['AD: std'] = [o_d['std']]
od['AD: vector'] = [o_d['list']]
od['(coef_,intercept_)'] = [o_d['ci']]
if self.Disp:
print "Type", type_id, ": len(vector) =", len( o_d['list'])
print "len(ci) =", len( o_d['ci'])
pdo_i = pd.DataFrame( od)
#print pdo_i['QC Models (Family ID)'][0]
#print pdo_i
pdo_frame = pdo_frame.append( pdo_i, ignore_index=True)
# Return output data using self variables ------------------
self._pdo_frame = pdo_frame
self.pdo = self.pdo.append( pdo_frame, ignore_index=True)
# ----------------------------------------------------------
def indepedentMultiple(self, groups = [1,2,3,4]):
# Input --------------------
pdo_frame = self._pdo_frame
od = self.od.copy()
# --------------------------
ad_l = []
for group in groups:
p = pdo_frame[ pdo_frame['Group(s)'] == group]
n = len( p['AD: vector'].tolist()[0])
if self.Disp:
print "Group", group, "with", n, "AD elements"
ad_l.extend( p['AD: vector'].tolist()[0])
od['Group(s)'] = ['Independent multiple: {}'.format( groups)]
od['AD: mean (MAD)'] = [ np.mean( ad_l)]
od['AD: std'] = [ np.std( ad_l)]
od['AD: vector'] = [ ad_l]
od['(coef_,intercept_)'] = ['t.b.d.']
pdo_i = pd.DataFrame( od)
# Return -------------------------------------
self.pdo = self.pdo.append( pdo_i, ignore_index=True)
# --------------------------------------------
def indepedentMultiple_nr(self, groups = [1,2,3,4]):
"""
indepedentMultiple for no regress case
"""
# Input --------------------
pdo_frame = self._pdo_frame
od = self.od.copy()
# --------------------------
ad_l = []
for group in groups:
p = pdo_frame[ pdo_frame['Group(s)'] == group]
n = len( p['AD: vector'].tolist()[0])
if self.Disp:
print "Group", group, "with", n, "AD elements"
ad_l.extend( p['AD: vector'].tolist()[0])
od['CV Mode'] = ['No regress'] # this part is included.
od['Group(s)'] = ['Independent multiple: {}'.format( groups)]
od['AD: mean (MAD)'] = [ np.mean( ad_l)]
od['AD: std'] = [ np.std( ad_l)]
od['AD: vector'] = [ ad_l]
od['(coef_,intercept_)'] = ['t.b.d.']
# od['results'] = ['See each']
pdo_i = pd.DataFrame( od)
# Return -------------------------------------
self.pdo = self.pdo.append( pdo_i, ignore_index=True)
# --------------------------------------------
def Is_oneQC_noHS(self, od):
"""
If it is started from the capital,
it can be used in genral without self variables.
"""
if len(od["QC Models (Family ID)"][0]) == 1 and \
od['H + S'][0] == False:
return True
else:
return False
def each_indepedentMultiple(self):
if self.Is_oneQC_noHS( self.od):
"""
No regression is included only for one QC models and no H+S cases.
"""
self.each_noregress()
self.indepedentMultiple_nr(self.types_l)
if self.types_l != [1,2,4]:
# if full set is not [1,2,4], we run for [1,2,4]
self.indepedentMultiple_nr([1,2,4])
self.globalSingle_nr(self.types_l)
if self.types_l != [1,2,4]:
self.globalSingle_nr([1,2,4])
self.each()
self.indepedentMultiple(self.types_l)
if self.types_l != [1,2,4]:
self.indepedentMultiple([1,2,4])
def _globalSingle_r0(self, groups = [1,2,3,4]):
# Input ------------
od = self.od.copy()
pdr = self.pdr
# ------------------
# xM, yV = jadrian.get_xM2_yV( pdr, "Em_in", mode = od['QC Models (Family ID)'][0][0])
# xM, yV = get_xM4_yV( pdr, "Em_in")
xM, yV = self.get_xM_yV( pdr.query( "Type in {}".format( groups)))
o_d = jgrid.cv_LinearRegression_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
# Store results into the dataframe
od['Group(s)'] = ['Global single: {}'.format(groups)]
od['AD: mean (MAD)'] = [o_d['mean']]
od['AD: std'] = [o_d['std']]
od['AD: vector'] = [o_d['list']]
od['(coef_,intercept_)'] = ['t.b.d.']
pdo_i = pd.DataFrame( od)
# Return processing -----------------------------
self.pdo = self.pdo.append( pdo_i, ignore_index=True)
# -----------------------------------------------
def globalSingle(self, groups = [1,2,3,4]):
# Input ------------
od = self.od.copy()
pdr = self.pdr
# ------------------
# xM, yV = jadrian.get_xM2_yV( pdr, "Em_in", mode = od['QC Models (Family ID)'][0][0])
# xM, yV = get_xM4_yV( pdr, "Em_in")
xM, yV = self.get_xM_yV( pdr.query( "Type in {}".format( groups)))
# o_d = jgrid.cv_LinearRegression_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
o_d = jgrid.cv_LinearRegression_ci_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
# Store results into the dataframe
od['Group(s)'] = ['Global single: {}'.format(groups)]
od['AD: mean (MAD)'] = [o_d['mean']]
od['AD: std'] = [o_d['std']]
od['AD: vector'] = [o_d['list']]
# od['(coef_,intercept_)'] = ['t.b.d.']
od['(coef_,intercept_)'] = [o_d['ci']]
pdo_i = pd.DataFrame( od)
# Return processing -----------------------------
self.pdo = self.pdo.append( pdo_i, ignore_index=True)
# -----------------------------------------------
def globalSingle_nr(self, groups = [1,2,3,4]):
# Input ------------
od = self.od.copy()
pdr = self.pdr
# ------------------
# xM, yV = jadrian.get_xM2_yV( pdr, "Em_in", mode = od['QC Models (Family ID)'][0][0])
# xM, yV = get_xM4_yV( pdr, "Em_in")
xM, yV = self.get_xM_yV( pdr.query( "Type in {}".format( groups)))
mdae = jgrid.mdae_no_regression( xM, yV, ldisp = self.Disp)
od['CV Mode'] = ['No regress']
od['Group(s)'] = ['Global single: {}'.format(groups)]
od['AD: mean (MAD)'] = [ mdae]
od['AD: std'] = [ 0]
od['AD: vector'] = [[mdae]]
od['(coef_,intercept_)'] = ['t.b.d.']
pdo_i = pd.DataFrame( od)
#print pdo_i['QC Models (Family ID)'][0]
#print pdo_i
# Return processing -----------------------------
self.pdo = self.pdo.append( pdo_i, ignore_index=True)
# -----------------------------------------------
def globalSingles(self):
"""
[1,2,3,4] and [1,2,4] will be a set of groups, respectively.
"""
self.globalSingle( self.types_l)
if self.types_l != [1,2,4]:
self.globalSingle([1,2,4])
def run(self):
# Calculate median for each group
self.each_indepedentMultiple()
self.globalSingles()
# Returning self, it can be used recursive processing
print "The result dataframe is saved to", self.out_file
self.pdo.to_csv( self.out_file, index = False)
return self
def test(self):
print "Self testing is performed."
print "self.od\n", self.od
print "self.pdr.keys()\n", self.pdr.keys()
print "The final output data frame is as follows:"
print "self.pdo\n", self.pdo
return self
class Median( object):
"""
This function will obtain all results for redox potential of metabolism.
Pandas frames are used for storing input and output.
The seprate results are obtained by different member functions.
"""
def __init__( self, input_od, more_in_od, disp = False):
"""
input_od is used all member functions
more_in_od is used only in initialization
"""
self.od = input_od
self.pdr = pd.read_csv( more_in_od["in_file"])
self.pdo = pd.DataFrame()
self.out_file = more_in_od['out_file']
self.types_l = list(set(self.pdr["Type"].tolist()))
print "All types are", self.types_l
# Define constant variables, which start by capital, in a class
# Hence, it can be used without input variable processing
self.Disp = disp
def _get_xM_yV_r0(self, pdr):
"""
Depending on an operation mode,
xM and yV are selected from the input dataframe.
"""
# Input ---------------------
# pdr = self.pdr
# ---------------------------
if self.od["H + S"][0] == True \
and self.od["QC Models (Family ID)"][0] == ["B3LYP", "TPSS0"]:
xM, yV = get_xM4_yV( pdr, "Em_in")
elif self.od["H + S"][0] == True \
and len(self.od["QC Models (Family ID)"][0]) == 1:
mode = self.od["QC Models (Family ID)"][0][0]
xM, yV = get_xM2_yV( pdr, "Em_in", mode = mode)
elif self.od["H + S"][0] == False \
and len(self.od["QC Models (Family ID)"][0]) == 1:
mode = self.od["QC Models (Family ID)"][0][0]
xM, yV = get_xM_yV( pdr, "Em_in", mode = mode)
return xM, yV
def get_xM_yV(self, pdr):
"""
Depending on an operation mode,
xM and yV are selected from the input dataframe.
"""
# Input ---------------------
# pdr = self.pdr
# ---------------------------
if self.od["H + S"][0] == True \
and self.od["QC Models (Family ID)"][0] == ["B3LYP", "TPSS0"]:
xM, yV = get_xM4_yV( pdr, "Em_in")
elif self.od["H + S"][0] == True \
and len(self.od["QC Models (Family ID)"][0]) == 1:
mode = self.od["QC Models (Family ID)"][0][0]
xM, yV = get_xM2_yV( pdr, "Em_in", mode = mode)
elif self.od["H + S"][0] == False:
# and len(self.od["QC Models (Family ID)"][0]) == 1:
mode_l = self.od["QC Models (Family ID)"][0]
xM, yV = get_xM_yV( pdr, "Em_in", mode = mode_l)
return xM, yV
def each_noregress( self):
"""
Get median value for each group without regression.
"""
# Input ---------------------
pdr = self.pdr
# ---------------------------
pdo_frame = pd.DataFrame()
for type_id in self.types_l:
# If the base parameter is useful, use it.
od = self.od.copy() # inherent some values form a self variable
# xM4 terminology will not be used for generalization.
#xM, yV = jadrian.get_xM2_yV( pdr[ pdr.Type == type_id], "Em_in", mode = od['QC Models (Family ID)'][0][0])
xM, yV = self.get_xM_yV( pdr[ pdr.Type == type_id])
mdae = jgrid.mdae_no_regression( xM, yV, ldisp = self.Disp)
od['CV Mode'] = ['No regress']
od['Group(s)'] = [type_id]
od['AD: mean (MAD)'] = [ mdae]
od['AD: std'] = [ 0]
od['AD: vector'] = [[mdae]]
od['(coef_,intercept_)'] = ['t.b.d.']
od['results'] = [xM.A1.tolist()]
pdo_i = pd.DataFrame( od)
#print pdo_i['QC Models (Family ID)'][0]
#print pdo_i
pdo_frame = pdo_frame.append( pdo_i, ignore_index=True)
# Return output data using self variables ------------------
self._pdo_frame = pdo_frame
self.pdo = self.pdo.append( pdo_frame, ignore_index=True)
# ----------------------------------------------------------
def _each_r0( self):
"""
Get median value for each group.
These values will be used to generate global independent regression (ensemble).
"""
# Input ---------------------
pdr = self.pdr
# ---------------------------
pdo_frame = pd.DataFrame()
for type_id in [1,2,3,4]:
# If the base parameter is useful, use it.
od = self.od.copy() # inherent some values form a self variable
# xM4 terminology will not be used for generalization.
#xM, yV = jadrian.get_xM2_yV( pdr[ pdr.Type == type_id], "Em_in", mode = od['QC Models (Family ID)'][0][0])
xM, yV = self.get_xM_yV( pdr[ pdr.Type == type_id])
if type_id == 1:
o_d = jgrid.cv_LinearRegression_It( xM, yV, n_folds= xM.shape[0], scoring='median_absolute_error', N_it = 1, ldisp = self.Disp)
else:
o_d = jgrid.cv_LinearRegression_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
# saving results
# od = OrderedDict()
# od['QC Models (Family ID)'] = [["TPSS0", "B3LYP"]]
# od['H + S'] = [True]
# od['CV Mode'] = ['10*5KF']
# od['Em type'] = ['Chemical potential']
if type_id == 1:
od['CV Mode'] = ['LOO']
else:
od['CV Mode'] = ['10*5KF']
od['Group(s)'] = [type_id]
od['AD: mean (MAD)'] = [o_d['mean']]
od['AD: std'] = [o_d['std']]
od['AD: vector'] = [o_d['list']]
if disp or ldisp:
print "Type", type_id, ": len(vector) =", len( o_d['list'])
print "len(ci) =", len( o_d['ci'])
pdo_i = pd.DataFrame( od)
#print pdo_i['QC Models (Family ID)'][0]
#print pdo_i
pdo_frame = pdo_frame.append( pdo_i, ignore_index=True)
# Return output data using self variables ------------------
self._pdo_frame = pdo_frame
self.pdo = self.pdo.append( pdo_frame, ignore_index=True)
# ----------------------------------------------------------
def _each_r1( self):
"""
Get median value for each group.
These values will be used to generate global independent regression (ensemble).
"""
# Input ---------------------
pdr = self.pdr
# ---------------------------
pdo_frame = pd.DataFrame()
for type_id in self.types_l:
# If the base parameter is useful, use it.
od = self.od.copy() # inherent some values form a self variable
# xM4 terminology will not be used for generalization.
#xM, yV = jadrian.get_xM2_yV( pdr[ pdr.Type == type_id], "Em_in", mode = od['QC Models (Family ID)'][0][0])
xM, yV = self.get_xM_yV( pdr[ pdr.Type == type_id])
if type_id == 1:
o_d = jgrid.cv_LinearRegression_ci_It( xM, yV, n_folds= xM.shape[0], scoring='median_absolute_error', N_it = 1, ldisp = self.Disp)
else:
o_d = jgrid.cv_LinearRegression_ci_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
# saving results
# od = OrderedDict()
# od['QC Models (Family ID)'] = [["TPSS0", "B3LYP"]]
# od['H + S'] = [True]
# od['CV Mode'] = ['10*5KF']
# od['Em type'] = ['Chemical potential']
if type_id == 1:
od['CV Mode'] = ['LOO']
else:
od['CV Mode'] = ['10*5KF']
od['Group(s)'] = [type_id]
od['AD: mean (MAD)'] = [o_d['mean']]
od['AD: std'] = [o_d['std']]
od['AD: vector'] = [o_d['list']]
od['(coef_,intercept_)'] = [o_d['ci']]
if self.Disp:
print "Type", type_id, ": len(vector) =", len( o_d['list'])
print "len(ci) =", len( o_d['ci'])
pdo_i = pd.DataFrame( od)
#print pdo_i['QC Models (Family ID)'][0]
#print pdo_i
pdo_frame = pdo_frame.append( pdo_i, ignore_index=True)
# Return output data using self variables ------------------
self._pdo_frame = pdo_frame
self.pdo = self.pdo.append( pdo_frame, ignore_index=True)
# ----------------------------------------------------------
def _each_r2( self):
"""
Get median value for each group.
These values will be used to generate global independent regression (ensemble).
"""
# Input ---------------------
pdr = self.pdr
# ---------------------------
pdo_frame = pd.DataFrame()
for type_id in self.types_l:
# If the base parameter is useful, use it.
od = self.od.copy() # inherent some values form a self variable
# xM4 terminology will not be used for generalization.
#xM, yV = jadrian.get_xM2_yV( pdr[ pdr.Type == type_id], "Em_in", mode = od['QC Models (Family ID)'][0][0])
xM, yV = self.get_xM_yV( pdr[ pdr.Type == type_id])
if type_id == 1:
o_d = jgrid.cv_LinearRegression_ci_pred_It( xM, yV, n_folds= xM.shape[0], scoring='median_absolute_error', N_it = 1, ldisp = self.Disp)
else:
o_d = jgrid.cv_LinearRegression_ci_pred_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
# saving results
# od = OrderedDict()
# od['QC Models (Family ID)'] = [["TPSS0", "B3LYP"]]
# od['H + S'] = [True]
# od['CV Mode'] = ['10*5KF']
# od['Em type'] = ['Chemical potential']
if type_id == 1:
od['CV Mode'] = ['LOO']
else:
od['CV Mode'] = ['10*5KF']
od['Group(s)'] = [type_id]
od['AD: mean (MAD)'] = [o_d['mean']]
od['AD: std'] = [o_d['std']]
od['AD: vector'] = [o_d['list']]
od['(coef_,intercept_)'] = [o_d['ci']]
od['results'] = [o_d['yVp']]
if self.Disp:
print "Type", type_id, ": len(vector) =", len( o_d['list'])
print "len(ci) =", len( o_d['ci'])
pdo_i = pd.DataFrame( od)
#print pdo_i['QC Models (Family ID)'][0]
#print pdo_i
pdo_frame = pdo_frame.append( pdo_i, ignore_index=True)
# Return output data using self variables ------------------
self._pdo_frame = pdo_frame
self.pdo = self.pdo.append( pdo_frame, ignore_index=True)
# ----------------------------------------------------------
def each( self):
"""
Get median value for each group.
These values will be used to generate global independent regression (ensemble).
The CV Mode can be controlled outside.
"""
# Input ---------------------
pdr = self.pdr
# ---------------------------
pdo_frame = pd.DataFrame()
for type_id in self.types_l:
# If the base parameter is useful, use it.
od = self.od.copy() # inherent some values form a self variable
# xM4 terminology will not be used for generalization.
#xM, yV = jadrian.get_xM2_yV( pdr[ pdr.Type == type_id], "Em_in", mode = od['QC Models (Family ID)'][0][0])
xM, yV = self.get_xM_yV( pdr[ pdr.Type == type_id])
if od['CV Mode'][0] == 'LOO' or type_id == 1:
o_d = jgrid.cv_LinearRegression_ci_pred_It( xM, yV, n_folds= xM.shape[0], scoring='median_absolute_error', N_it = 1, ldisp = self.Disp)
od['CV Mode'] = ['LOO']
else:
o_d = jgrid.cv_LinearRegression_ci_pred_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
od['Group(s)'] = [type_id]
od['AD: mean (MAD)'] = [o_d['mean']]
od['AD: std'] = [o_d['std']]
od['AD: vector'] = [o_d['list']]
od['(coef_,intercept_)'] = [o_d['ci']]
od['results'] = [o_d['yVp']]
if self.Disp:
print "Type", type_id, ": len(vector) =", len( o_d['list'])
print "len(ci) =", len( o_d['ci'])
pdo_i = pd.DataFrame( od)
#print pdo_i['QC Models (Family ID)'][0]
#print pdo_i
pdo_frame = pdo_frame.append( pdo_i, ignore_index=True)
# Return output data using self variables ------------------
self._pdo_frame = pdo_frame
self.pdo = self.pdo.append( pdo_frame, ignore_index=True)
# ----------------------------------------------------------
def indepedentMultiple(self, groups = [1,2,3,4]):
# Input --------------------
pdo_frame = self._pdo_frame
od = self.od.copy()
# --------------------------
ad_l = []
for group in groups:
p = pdo_frame[ pdo_frame['Group(s)'] == group]
n = len( p['AD: vector'].tolist()[0])
if self.Disp:
print "Group", group, "with", n, "AD elements"
ad_l.extend( p['AD: vector'].tolist()[0])
od['Group(s)'] = ['Independent multiple: {}'.format( groups)]
od['AD: mean (MAD)'] = [ np.mean( ad_l)]
od['AD: std'] = [ np.std( ad_l)]
od['AD: vector'] = [ ad_l]
od['(coef_,intercept_)'] = ['t.b.d.']
od['results'] = ['Look at each type']
pdo_i = pd.DataFrame( od)
# Return -------------------------------------
self.pdo = self.pdo.append( pdo_i, ignore_index=True)
# --------------------------------------------
def indepedentMultiple_nr(self, groups = [1,2,3,4]):
"""
indepedentMultiple for no regress case
"""
# Input --------------------
pdo_frame = self._pdo_frame
od = self.od.copy()
# --------------------------
ad_l = []
for group in groups:
p = pdo_frame[ pdo_frame['Group(s)'] == group]
n = len( p['AD: vector'].tolist()[0])
if self.Disp:
print "Group", group, "with", n, "AD elements"
ad_l.extend( p['AD: vector'].tolist()[0])
od['CV Mode'] = ['No regress'] # this part is included.
od['Group(s)'] = ['Independent multiple: {}'.format( groups)]
od['AD: mean (MAD)'] = [ np.mean( ad_l)]
od['AD: std'] = [ np.std( ad_l)]
od['AD: vector'] = [ ad_l]
od['(coef_,intercept_)'] = ['t.b.d.']
od['results'] = ['Look at each type']
pdo_i = pd.DataFrame( od)
# Return -------------------------------------
self.pdo = self.pdo.append( pdo_i, ignore_index=True)
# --------------------------------------------
def Is_oneQC_noHS(self, od):
"""
If it is started from the capital,
it can be used in genral without self variables.
"""
if len(od["QC Models (Family ID)"][0]) == 1 and \
od['H + S'][0] == False:
return True
else:
return False
def each_indepedentMultiple(self):
if self.Is_oneQC_noHS( self.od):
"""
No regression is included only for one QC models and no H+S cases.
"""
self.each_noregress()
self.indepedentMultiple_nr(self.types_l)
if self.types_l != [1,2,4]:
# if full set is not [1,2,4], we run for [1,2,4]
self.indepedentMultiple_nr([1,2,4])
self.globalSingle_nr(self.types_l)
if self.types_l != [1,2,4]:
self.globalSingle_nr([1,2,4])
self.each()
self.indepedentMultiple(self.types_l)
if self.types_l != [1,2,4]:
self.indepedentMultiple([1,2,4])
def _globalSingle_r0(self, groups = [1,2,3,4]):
# Input ------------
od = self.od.copy()
pdr = self.pdr
# ------------------
# xM, yV = jadrian.get_xM2_yV( pdr, "Em_in", mode = od['QC Models (Family ID)'][0][0])
# xM, yV = get_xM4_yV( pdr, "Em_in")
xM, yV = self.get_xM_yV( pdr.query( "Type in {}".format( groups)))
o_d = jgrid.cv_LinearRegression_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
# Store results into the dataframe
od['Group(s)'] = ['Global single: {}'.format(groups)]
od['AD: mean (MAD)'] = [o_d['mean']]
od['AD: std'] = [o_d['std']]
od['AD: vector'] = [o_d['list']]
od['(coef_,intercept_)'] = ['t.b.d.']
pdo_i = pd.DataFrame( od)
# Return processing -----------------------------
self.pdo = self.pdo.append( pdo_i, ignore_index=True)
# -----------------------------------------------
def _globalSingle_r1(self, groups = [1,2,3,4]):
# Input ------------
od = self.od.copy()
pdr = self.pdr
# ------------------
# xM, yV = jadrian.get_xM2_yV( pdr, "Em_in", mode = od['QC Models (Family ID)'][0][0])
# xM, yV = get_xM4_yV( pdr, "Em_in")
xM, yV = self.get_xM_yV( pdr.query( "Type in {}".format( groups)))
# o_d = jgrid.cv_LinearRegression_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
o_d = jgrid.cv_LinearRegression_ci_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
# Store results into the dataframe
od['Group(s)'] = ['Global single: {}'.format(groups)]
od['AD: mean (MAD)'] = [o_d['mean']]
od['AD: std'] = [o_d['std']]
od['AD: vector'] = [o_d['list']]
# od['(coef_,intercept_)'] = ['t.b.d.']
od['(coef_,intercept_)'] = [o_d['ci']]
pdo_i = pd.DataFrame( od)
# Return processing -----------------------------
self.pdo = self.pdo.append( pdo_i, ignore_index=True)
# -----------------------------------------------
def _globalSingle_r2(self, groups = [1,2,3,4]):
# Input ------------
od = self.od.copy()
pdr = self.pdr
# ------------------
# xM, yV = jadrian.get_xM2_yV( pdr, "Em_in", mode = od['QC Models (Family ID)'][0][0])
# xM, yV = get_xM4_yV( pdr, "Em_in")
xM, yV = self.get_xM_yV( pdr.query( "Type in {}".format( groups)))
# o_d = jgrid.cv_LinearRegression_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
o_d = jgrid.cv_LinearRegression_ci_pred_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
# Store results into the dataframe
od['Group(s)'] = ['Global single: {}'.format(groups)]
od['AD: mean (MAD)'] = [o_d['mean']]
od['AD: std'] = [o_d['std']]
od['AD: vector'] = [o_d['list']]
# od['(coef_,intercept_)'] = ['t.b.d.']
od['(coef_,intercept_)'] = [o_d['ci']]
# print "np.shape( o_d['yVp']) =", np.shape( o_d['yVp'])
od['results'] = [o_d['yVp']]
pdo_i = pd.DataFrame( od)
# Return processing -----------------------------
self.pdo = self.pdo.append( pdo_i, ignore_index=True)
# -----------------------------------------------
def globalSingle(self, groups = [1,2,3,4]):
# Input ------------
od = self.od.copy()
pdr = self.pdr
# ------------------
# xM, yV = jadrian.get_xM2_yV( pdr, "Em_in", mode = od['QC Models (Family ID)'][0][0])
# xM, yV = get_xM4_yV( pdr, "Em_in")
xM, yV = self.get_xM_yV( pdr.query( "Type in {}".format( groups)))
# o_d = jgrid.cv_LinearRegression_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
# o_d = jgrid.cv_LinearRegression_ci_pred_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
if od['CV Mode'][0] == 'LOO':
o_d = jgrid.cv_LinearRegression_ci_Itression_ci_pred_Itression_ci_pred_It( xM, yV, n_folds= xM.shape[0], scoring='median_absolute_error', N_it = 1, ldisp = self.Disp)
else:
o_d = jgrid.cv_LinearRegression_ci_pred_It( xM, yV, scoring='median_absolute_error', N_it = 10, ldisp = self.Disp)
# Store results into the dataframe
od['Group(s)'] = ['Global single: {}'.format(groups)]
od['AD: mean (MAD)'] = [o_d['mean']]
od['AD: std'] = [o_d['std']]
od['AD: vector'] = [o_d['list']]
# od['(coef_,intercept_)'] = ['t.b.d.']
od['(coef_,intercept_)'] = [o_d['ci']]
# print "np.shape( o_d['yVp']) =", np.shape( o_d['yVp'])
od['results'] = [o_d['yVp']]
pdo_i = pd.DataFrame( od)
# Return processing -----------------------------
self.pdo = self.pdo.append( pdo_i, ignore_index=True)
# -----------------------------------------------
def globalSingle_nr(self, groups = [1,2,3,4]):
# Input ------------
od = self.od.copy()
pdr = self.pdr
# ------------------
# xM, yV = jadrian.get_xM2_yV( pdr, "Em_in", mode = od['QC Models (Family ID)'][0][0])
# xM, yV = get_xM4_yV( pdr, "Em_in")
xM, yV = self.get_xM_yV( pdr.query( "Type in {}".format( groups)))
mdae = jgrid.mdae_no_regression( xM, yV, ldisp = self.Disp)
od['CV Mode'] = ['No regress']
od['Group(s)'] = ['Global single: {}'.format(groups)]
od['AD: mean (MAD)'] = [ mdae]
od['AD: std'] = [ 0]
od['AD: vector'] = [[mdae]]
od['(coef_,intercept_)'] = ['t.b.d.']
od['results'] = [ xM.A1.tolist()]
pdo_i = pd.DataFrame( od)
#print pdo_i['QC Models (Family ID)'][0]
#print pdo_i
# Return processing -----------------------------
self.pdo = self.pdo.append( pdo_i, ignore_index=True)
# -----------------------------------------------
def globalSingles(self):
"""
[1,2,3,4] and [1,2,4] will be a set of groups, respectively.
"""
self.globalSingle( self.types_l)
if self.types_l != [1,2,4]:
self.globalSingle([1,2,4])
def run(self):
# Calculate median for each group
self.each_indepedentMultiple()
self.globalSingles()
# Returning self, it can be used recursive processing
print "The result dataframe is saved to", self.out_file
self.pdo.to_csv( self.out_file, index = False)
return self
def test(self):
print "Self testing is performed."
print "self.od\n", self.od
print "self.pdr.keys()\n", self.pdr.keys()
print "The final output data frame is as follows:"
print "self.pdo\n", self.pdo
return self
class MedianMeanStd( object):
"""
This function will obtain all results for redox potential of metabolism.
Pandas frames are used for storing input and output.
The seprate results are obtained by different member functions.
"""
def __init__( self, input_od, more_in_od, disp = False):
"""
input_od is used all member functions
more_in_od is used only in initialization
"""
self.od = self.init_od( input_od)
self.pdr = pd.read_csv( more_in_od["in_file"])
self.pdo = pd.DataFrame()
self.out_file = more_in_od['out_file']
self.types_l = list(set(self.pdr["Type"].tolist()))
print "All types are", self.types_l
# Define constant variables, which start by capital, in a class
# Hence, it can be used without input variable processing
self.Disp = disp
def init_od( self, od): # list up all elements in od for ordering
# od['CV Mode'] = ['No regress']
# This is input_od list
# od = OrderedDict()
# od['QC Models (Family ID)'] = [["B3LYP"]]
# od['H + S'] = [False]
# od['CV Mode'] = ['10*5KF/LOO']
# od['Em type'] = ['Chemical potential']
# od['Regularization'] = ['None']
# od['Bounds/Constraints'] = ['None']
od['Regression'] = []
od['Group(s)'] = []
od['median_abs_err'] = []
od['mean_abs_err'] = []
od['std_abs_err'] = []
od['abs_err_vector'] = []
od['(coef_,intercept_)'] = []
od['results'] = []
return od
def _get_xM_yV_r0(self, pdr):
"""
Depending on an operation mode,
xM and yV are selected from the input dataframe.
"""
# Input ---------------------
# pdr = self.pdr
# ---------------------------
if self.od["H + S"][0] == True \
and self.od["QC Models (Family ID)"][0] == ["B3LYP", "TPSS0"]:
xM, yV = get_xM4_yV( pdr, "Em_in")
elif self.od["H + S"][0] == True \
and len(self.od["QC Models (Family ID)"][0]) == 1:
mode = self.od["QC Models (Family ID)"][0][0]
xM, yV = get_xM2_yV( pdr, "Em_in", mode = mode)
elif self.od["H + S"][0] == False \
and len(self.od["QC Models (Family ID)"][0]) == 1:
mode = self.od["QC Models (Family ID)"][0][0]
xM, yV = get_xM_yV( pdr, "Em_in", mode = mode)
return xM, yV
def get_xM_yV(self, pdr):
"""
Depending on an operation mode,
xM and yV are selected from the input dataframe.
"""
# Input ---------------------
# pdr = self.pdr
# ---------------------------
if self.od["H + S"][0] == True \
and self.od["QC Models (Family ID)"][0] == ["B3LYP", "TPSS0"]:
xM, yV = get_xM4_yV( pdr, "Em_in")
elif self.od["H + S"][0] == True \
and len(self.od["QC Models (Family ID)"][0]) == 1:
mode = self.od["QC Models (Family ID)"][0][0]
xM, yV = get_xM2_yV( pdr, "Em_in", mode = mode)
elif self.od["H + S"][0] == False:
# and len(self.od["QC Models (Family ID)"][0]) == 1:
mode_l = self.od["QC Models (Family ID)"][0]
xM, yV = get_xM_yV( pdr, "Em_in", mode = mode_l)
return xM, yV
def each_mean_base( self, pdr, type_id = 0):
"""
Working with only one model chemistry case.
if type_id is not defined, it becomes 0.
"""
od = self.od.copy()
xM, yV = self.get_xM_yV( pdr)
xv, yv = xM.A1, yV.A1
if od['CV Mode'][0] == 'LOO':
n = xv.shape[0]
kf = cross_validation.KFold( n, n)
yvp = yv.copy()
mean_yvp = yv.copy()
for train, test in kf:
xv_train = xv[ train]
mean_yvp[ test] = np.mean( xv_train)
yvp[ test] = yv[ test] - mean_yvp[test]
o_d = OrderedDict()
o_d['yVp'] = [yv.tolist()]
abs_e = np.abs(yv - yvp)
o_d['list'] = abs_e.tolist()
o_d['median_abs_err'] = np.median( abs_e)
o_d['mean_abs_err'] = np.mean( abs_e)
o_d['std_abs_err'] = np.std( abs_e)
o_d['ci'] = [ (None, np.mat(-x)) for x in mean_yvp]
else:
raise ValueError("This CV Mode {} is not supported yet.".format(od['CV Mode'][0]))
od['Regression'] = ['Mean_Compensation']
od['Group(s)'] = [type_id]
od['median_abs_err'] = [o_d['median_abs_err']]
od['mean_abs_err'] = [o_d['mean_abs_err']]
od['std_abs_err'] = [o_d['std_abs_err']]
od['abs_err_vector'] = [o_d['list']]
od['(coef_,intercept_)'] = [o_d['ci']]
od['results'] = [o_d['yVp']]
if self.Disp:
print "Type", type_id, ": len(vector) =", len( o_d['list'])
print "len(ci) =", len( o_d['ci'])
return od
def each_base( self, pdr, type_id = 0): # pdr = pdr[ pdr.Type == type_id]:
"""
if type_id is not defined, it becomes 0.
"""
od = self.od.copy()
xM, yV = self.get_xM_yV( pdr)
if od['CV Mode'][0] == 'LOO' or type_id == 1:
o_d = jgrid.cv_LinearRegression_ci_pred_full_It( xM, yV, n_folds= xM.shape[0],
N_it = 1, ldisp = self.Disp)
od['CV Mode'] = ['LOO']
else:
o_d = jgrid.cv_LinearRegression_ci_pred_full_It( xM, yV, N_it = 10, ldisp = self.Disp)
od['Regression'] = ['Linear']
od['Group(s)'] = [type_id]
od['median_abs_err'] = [o_d['median_abs_err']]
od['mean_abs_err'] = [o_d['mean_abs_err']]
od['std_abs_err'] = [o_d['std_abs_err']]
od['abs_err_vector'] = [o_d['list']]
od['(coef_,intercept_)'] = [o_d['ci']]
od['results'] = [o_d['yVp']]
if self.Disp:
print "Type", type_id, ": len(vector) =", len( o_d['list'])
print "len(ci) =", len( o_d['ci'])
return od
def _each_r0( self):
"""
Get median value for each group.
These values will be used to generate global independent regression (ensemble).
The CV Mode can be controlled outside.
"""
# Input ---------------------
pdr = self.pdr
# ---------------------------
pdo_frame = pd.DataFrame()
for type_id in self.types_l:
# If the base parameter is useful, use it.
od = self.od.copy() # inherent some values form a self variable
# xM4 terminology will not be used for generalization.
#xM, yV = jadrian.get_xM2_yV( pdr[ pdr.Type == type_id], "Em_in", mode = od['QC Models (Family ID)'][0][0])
xM, yV = self.get_xM_yV( pdr[ pdr.Type == type_id])
if od['CV Mode'][0] == 'LOO' or type_id == 1:
o_d = jgrid.cv_LinearRegression_ci_pred_full_It( xM, yV, n_folds= xM.shape[0], N_it = 1, ldisp = self.Disp)
od['CV Mode'] = ['LOO']
else:
o_d = jgrid.cv_LinearRegression_ci_pred_full_It( xM, yV, N_it = 10, ldisp = self.Disp)
od['Group(s)'] = [type_id]
od['median_abs_err'] = [o_d['median_abs_error']]
od['mean_abs_err'] = [o_d['mean_abs_err']]
od['std_abs_err'] = [o_d['std_abs_err']]
od['abs_err_vector'] = [o_d['list']]
od['(coef_,intercept_)'] = [o_d['ci']]
od['results'] = [o_d['yVp']]
if self.Disp:
print "Type", type_id, ": len(vector) =", len( o_d['list'])
print "len(ci) =", len( o_d['ci'])
pdo_i = pd.DataFrame( od)
#print pdo_i['QC Models (Family ID)'][0]
#print pdo_i
pdo_frame = pdo_frame.append( pdo_i, ignore_index=True)
# Return output data using self variables ------------------
self._pdo_frame = pdo_frame
self.pdo = self.pdo.append( pdo_frame, ignore_index=True)
# ----------------------------------------------------------
def each_mean( self):
"""
Get median value for each group.
These values will be used to generate global independent regression (ensemble).
The CV Mode can be controlled outside.
"""
# Input ---------------------
pdr = self.pdr
# ---------------------------
pdo_frame = pd.DataFrame()
for type_id in self.types_l:
od = self.each_mean_base( pdr[ pdr.Type == type_id], type_id)
pdo_i = pd.DataFrame( od)
pdo_frame = pdo_frame.append( pdo_i, ignore_index=True)
# Return output data using self variables ------------------
self._pdo_frame = pdo_frame
self.pdo = self.pdo.append( pdo_frame, ignore_index=True)
# ----------------------------------------------------------
def each( self):
"""
Get median value for each group.
These values will be used to generate global independent regression (ensemble).
The CV Mode can be controlled outside.
"""
# Input ---------------------
pdr = self.pdr
# ---------------------------
pdo_frame = pd.DataFrame()
for type_id in self.types_l:
od = self.each_base( pdr[ pdr.Type == type_id], type_id)
pdo_i = pd.DataFrame( od)
pdo_frame = pdo_frame.append( pdo_i, ignore_index=True)
# Return output data using self variables ------------------
self._pdo_frame = pdo_frame
self.pdo = self.pdo.append( pdo_frame, ignore_index=True)
# ----------------------------------------------------------
def indepedentMultiple(self, groups = [1,2,3,4], flag_regress = True):
# Input --------------------
pdo_frame = self._pdo_frame
od = self.od.copy()
# --------------------------
ad_l = []
for group in groups:
p = pdo_frame[ pdo_frame['Group(s)'] == group]
n = len( p['abs_err_vector'].tolist()[0])
if self.Disp:
print "Group", group, "with", n, "AD elements"
ad_l.extend( p['abs_err_vector'].tolist()[0])
od['Regression'] = ['Linear']
od['Group(s)'] = ['Independent multiple: {}'.format( groups)]
od['median_abs_err'] = [ np.median( ad_l)]
od['mean_abs_err'] = [ np.mean( ad_l)]
od['std_abs_err'] = [ np.std( ad_l)]
od['abs_err_vector'] = [ ad_l]
od['(coef_,intercept_)'] = ['t.b.d.']
od['results'] = ['Look at each type']
if flag_regress is not True:
od['CV Mode'] = ['No regress'] # this part is included.
od['Regression'] = ['No_Regression']
pdo_i = pd.DataFrame( od)
# Return -------------------------------------
self.pdo = self.pdo.append( pdo_i, ignore_index=True)
# --------------------------------------------
def indepedentMultiple_nr(self, groups = [1,2,3,4]):
"""
indepedentMultiple for no regress case
"""
self.indepedentMultiple(groups = groups, flag_regress = False)
def Is_oneQC_noHS(self, od):
"""
If it is started from the capital,
it can be used in genral without self variables.
"""
if len(od["QC Models (Family ID)"][0]) == 1 and \
od['H + S'][0] == False:
return True
else:
return False
def each_run(self):
#def each_indepedentMultiple(self):
if self.Is_oneQC_noHS( self.od):
"""
No regression is included only for one QC models and no H+S cases.
"""
self.each_noregress()
self.indepedentMultiple_nr(self.types_l)
if self.types_l != [1,2,4]:
# if full set is not [1,2,4], we run for [1,2,4]
self.indepedentMultiple_nr([1,2,4])
self.globalSingle_nr(self.types_l)
if self.types_l != [1,2,4]:
self.globalSingle_nr([1,2,4])
self.each_mean()
self.each()
self.indepedentMultiple(self.types_l)
if self.types_l != [1,2,4]:
self.indepedentMultiple([1,2,4])
self.globalSingle( self.types_l)
if self.types_l != [1,2,4]:
self.globalSingle([1,2,4])
def globalSingle(self, groups = [1,2,3,4]):
od = self.each_base( self.pdr.query( "Type in {}".format( groups)))
od['Group(s)'] = ['Global single: {}'.format(groups)]
pdo_i = pd.DataFrame( od)
# Return processing -----------------------------
self.pdo = self.pdo.append( pdo_i, ignore_index=True)
# -----------------------------------------------
def each_noregress_base( self, pdr): #pdr[ pdr.Type == type_id]
od = self.od.copy() # inherent some values form a self variable
# xM4 terminology will not be used for generalization.
#xM, yV = jadrian.get_xM2_yV( pdr[ pdr.Type == type_id], "Em_in", mode = od['QC Models (Family ID)'][0][0])
xM, yV = self.get_xM_yV( pdr)
ad_l = np.abs( xM - yV).A1.tolist()
od['Regression'] = ['No_Regression']
od['CV Mode'] = ['No regress']
od['median_abs_err'] = [ np.median( ad_l)]
od['mean_abs_err'] = [ np.mean( ad_l)]
od['std_abs_err'] = [ np.std( ad_l)]
od['abs_err_vector'] = [ ad_l]
od['(coef_,intercept_)'] = ['t.b.d.']
od['results'] = [xM.A1.tolist()]
return od
def each_noregress( self):
"""
Get median value for each group without regression.
"""
# Input ---------------------
pdr = self.pdr
# ---------------------------
pdo_frame = pd.DataFrame()
for type_id in self.types_l:
od = self.each_noregress_base( pdr[ pdr.Type == type_id])
od['Group(s)'] = [type_id]
pdo_i = pd.DataFrame( od)
pdo_frame = pdo_frame.append( pdo_i, ignore_index=True)
# Return output data using self variables ------------------
self._pdo_frame = pdo_frame
self.pdo = self.pdo.append( pdo_frame, ignore_index=True)
# ----------------------------------------------------------
def globalSingle_nr(self, groups = [1,2,3,4]):
od = self.each_noregress_base( self.pdr.query( "Type in {}".format( groups)))
od['Group(s)'] = ['Global single: {}'.format(groups)]
pdo_i = pd.DataFrame( od)
# Return processing -----------------------------
self.pdo = self.pdo.append( pdo_i, ignore_index=True)
# -----------------------------------------------
def globalSingles(self):
"""
[1,2,3,4] and [1,2,4] will be a set of groups, respectively.
"""
self.globalSingle( self.types_l)
if self.types_l != [1,2,4]:
self.globalSingle([1,2,4])
def run(self):
# Calculate median for each group
#self.each_indepedentMultiple()
self.each_run()
#self.globalSingles()
# Returning self, it can be used recursive processing
print "The result dataframe is saved to", self.out_file
self.pdo.to_csv( self.out_file, index = False)
return self
def test(self):
print "Self testing is performed."
print "self.od\n", self.od
print "self.pdr.keys()\n", self.pdr.keys()
print "The final output data frame is as follows:"
print "self.pdo\n", self.pdo
return self
def get_od_base( mode = "H+S & B3LYP+TPSS0"): # od is OrderedDict()
"""
initial parameters are prepared.
mode = "H+S & B3LYP+TPSS0" --> ["B3LYP", "TPSS0"] with speration of H and S
"H+S & B3LYP" --> ["B3LYP"] with speration of H and S
"H+S & TPSSO" --> ["TPSS0"] with speration of H and S
"""
if mode == "H+S&B3LYP+TPSS0":
od = OrderedDict()
od['QC Models (Family ID)'] = [["B3LYP", "TPSS0"]]
od['H + S'] = [True]
od['CV Mode'] = ['10*5KF/LOO']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/EmBT-xM4.csv"
aod['out_file'] = "sheet/out_" + mode + ".csv"
else:
raise ValueError("Not supported: {}".format( mode))
return od, aod
def _iter_od_base_r0(): # od is OrderedDict()
"""
initial parameters are prepared.
mode = "H+S & B3LYP+TPSS0" --> ["B3LYP", "TPSS0"] with speration of H and S
"H+S & B3LYP" --> ["B3LYP"] with speration of H and S
"H+S & TPSSO" --> ["TPSS0"] with speration of H and S
"""
############################################
# "B3LYP"
############################################
op_mode = "B3LYP"
print "Processing mode is", op_mode
od = OrderedDict()
od['QC Models (Family ID)'] = [["B3LYP"]]
od['H + S'] = [False]
od['CV Mode'] = ['10*5KF/LOO']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/EmBT-xM4.csv"
aod['out_file'] = "sheet/out_" + op_mode + ".csv"
aod['op_mode'] = op_mode
yield od, aod
############################################
# "TPSS0"
############################################
op_mode = "TPSS0"
print "Processing mode is", op_mode
od = OrderedDict()
od['QC Models (Family ID)'] = [["TPSS0"]]
od['H + S'] = [False]
od['CV Mode'] = ['10*5KF/LOO']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/EmBT-xM4.csv"
aod['out_file'] = "sheet/out_" + op_mode + ".csv"
aod['op_mode'] = op_mode
yield od, aod
############################################
# "B3LYP" * H + S
############################################
op_mode = "H+S_B3LYP"
print "Processing mode is", op_mode
od = OrderedDict()
od['QC Models (Family ID)'] = [["B3LYP"]]
od['H + S'] = [True]
od['CV Mode'] = ['10*5KF/LOO']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/EmBT-xM4.csv"
aod['out_file'] = "sheet/out_" + op_mode + ".csv"
aod['op_mode'] = op_mode
yield od, aod
############################################
# "TPSS0" * H + S
############################################
op_mode = "H+S_TPSS0"
print "Processing mode is", op_mode
od = OrderedDict()
od['QC Models (Family ID)'] = [["TPSS0"]]
od['H + S'] = [True]
od['CV Mode'] = ['10*5KF/LOO']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/EmBT-xM4.csv"
aod['out_file'] = "sheet/out_" + op_mode + ".csv"
aod['op_mode'] = op_mode
yield od, aod
############################################
# "B3LYP" + "TPSS0" * H + S
############################################
op_mode = "H+S_B3LYP+TPSS0"
print "Processing mode is", op_mode
od = OrderedDict()
od['QC Models (Family ID)'] = [["B3LYP", "TPSS0"]]
od['H + S'] = [True]
od['CV Mode'] = ['10*5KF/LOO']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/EmBT-xM4.csv"
aod['out_file'] = "sheet/out_" + op_mode + ".csv"
aod['op_mode'] = op_mode
yield od, aod
############################################
# "BEST"
############################################
op_mode = "Best"
print "Processing mode is", op_mode
od = OrderedDict()
od['QC Models (Family ID)'] = [["Best"]]
od['H + S'] = [False]
od['CV Mode'] = ['10*5KF/LOO']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/best.csv"
aod['out_file'] = "sheet/out_" + op_mode + ".csv"
aod['op_mode'] = op_mode
yield od, aod
def iter_od_base( default_d = {'CV Mode': ['10*5KF/LOO']}): # od is OrderedDict()
"""
initial parameters are prepared.
mode = "H+S & B3LYP+TPSS0" --> ["B3LYP", "TPSS0"] with speration of H and S
"H+S & B3LYP" --> ["B3LYP"] with speration of H and S
"H+S & TPSSO" --> ["TPSS0"] with speration of H and S
defalult_d['CV Mode'] == ['LOO'] is recommanded.
"""
############################################
# "B3LYP"
############################################
op_mode = "B3LYP"
print "Processing mode is", op_mode
od = OrderedDict()
od['QC Models (Family ID)'] = [["B3LYP"]]
od['H + S'] = [False]
# od['CV Mode'] = ['10*5KF/LOO']
od['CV Mode'] = default_d['CV Mode']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/EmBT-xM4.csv"
aod['out_file'] = "sheet/out_" + op_mode + ".csv"
aod['op_mode'] = op_mode
yield od, aod
############################################
# "TPSS0"
############################################
op_mode = "TPSS0"
print "Processing mode is", op_mode
od = OrderedDict()
od['QC Models (Family ID)'] = [["TPSS0"]]
od['H + S'] = [False]
# od['CV Mode'] = ['10*5KF/LOO']
od['CV Mode'] = default_d['CV Mode']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/EmBT-xM4.csv"
aod['out_file'] = "sheet/out_" + op_mode + ".csv"
aod['op_mode'] = op_mode
yield od, aod
############################################
# "B3LYP" * H + S
############################################
op_mode = "H+S_B3LYP"
print "Processing mode is", op_mode
od = OrderedDict()
od['QC Models (Family ID)'] = [["B3LYP"]]
od['H + S'] = [True]
# od['CV Mode'] = ['10*5KF/LOO']
od['CV Mode'] = default_d['CV Mode']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/EmBT-xM4.csv"
aod['out_file'] = "sheet/out_" + op_mode + ".csv"
aod['op_mode'] = op_mode
yield od, aod
############################################
# "TPSS0" * H + S
############################################
op_mode = "H+S_TPSS0"
print "Processing mode is", op_mode
od = OrderedDict()
od['QC Models (Family ID)'] = [["TPSS0"]]
od['H + S'] = [True]
# od['CV Mode'] = ['10*5KF/LOO']
od['CV Mode'] = default_d['CV Mode']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/EmBT-xM4.csv"
aod['out_file'] = "sheet/out_" + op_mode + ".csv"
aod['op_mode'] = op_mode
yield od, aod
############################################
# "B3LYP" + "TPSS0" (H+S is turned off)
############################################
op_mode = "B3LYP+TPSS0"
print "Processing mode is", op_mode
od = OrderedDict()
od['QC Models (Family ID)'] = [["B3LYP", "TPSS0"]]
od['H + S'] = [False]
# od['CV Mode'] = ['10*5KF/LOO']
od['CV Mode'] = default_d['CV Mode']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/EmBT-xM4.csv"
aod['out_file'] = "sheet/out_" + op_mode + ".csv"
aod['op_mode'] = op_mode
yield od, aod
############################################
# "B3LYP" + "TPSS0" * H + S
############################################
op_mode = "H+S_B3LYP+TPSS0"
print "Processing mode is", op_mode
od = OrderedDict()
od['QC Models (Family ID)'] = [["B3LYP", "TPSS0"]]
od['H + S'] = [True]
# od['CV Mode'] = ['10*5KF/LOO']
od['CV Mode'] = default_d['CV Mode']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/EmBT-xM4.csv"
aod['out_file'] = "sheet/out_" + op_mode + ".csv"
aod['op_mode'] = op_mode
yield od, aod
############################################
# "BEST"
############################################
op_mode = "Best"
print "Processing mode is", op_mode
od = OrderedDict()
od['QC Models (Family ID)'] = [["Best"]]
od['H + S'] = [False]
# od['CV Mode'] = ['10*5KF/LOO']
od['CV Mode'] = default_d['CV Mode']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/best.csv"
aod['out_file'] = "sheet/out_" + op_mode + ".csv"
aod['op_mode'] = op_mode
yield od, aod
############################################
# "B3LYP_pos"
############################################
op_mode = "B3LYP_pos"
print "Processing mode is", op_mode
od = OrderedDict()
od['QC Models (Family ID)'] = [["B3LYP_pos"]]
od['H + S'] = [False]
# od['CV Mode'] = ['10*5KF/LOO']
od['CV Mode'] = default_d['CV Mode']
od['Em type'] = ['Chemical potential']
od['Regularization'] = ['None']
od['Bounds/Constraints'] = ['None']
aod = OrderedDict()
aod['in_file'] = "sheet/b3lyp_pos.csv"
aod['out_file'] = "sheet/out_" + op_mode + ".csv"
aod['op_mode'] = op_mode
yield od, aod
def run_median( test_flag = False, default_d = {'CV Mode': '10*5KF/LOO'}):
"""
Multiple cases are invoked here.
Now save for each mode. Later, I will save all mode results at the same time.
"""
self_od = OrderedDict()
for od, aod in iter_od_base( default_d):
"""
All iteration results will be save to self_od
"""
op_mode = aod['op_mode']
if test_flag:
self_od[op_mode] = Median( od, aod).run().test()
else:
self_od[op_mode] = Median( od, aod).run()
# All pdo are collected and saved to out_x.csv where x is number of op_mode(s).
pdo_all = pd.DataFrame()
for s in self_od.values():
pdo_all = pdo_all.append( s.pdo, ignore_index = True)
all_out_file = "sheet/out_{}.csv".format( len(self_od))
print 'The collected dataframe is saved to', all_out_file
pdo_all.to_csv( all_out_file, index = False)
return self_od
def run_medianmeanstd( test_flag = False, default_d = {'CV Mode': ['LOO']}):
"""
Multiple cases are invoked here.
Now save for each mode. Later, I will save all mode results at the same time.
"""
self_od = OrderedDict()
for od, aod in iter_od_base( default_d):
"""
All iteration results will be save to self_od
"""
op_mode = aod['op_mode']
if test_flag:
self_od[op_mode] = MedianMeanStd( od, aod).run().test()
else:
self_od[op_mode] = MedianMeanStd( od, aod).run()
# All pdo are collected and saved to out_x.csv where x is number of op_mode(s).
pdo_all = pd.DataFrame()
for s in self_od.values():
pdo_all = pdo_all.append( s.pdo, ignore_index = True)
all_out_file = "sheet/out_mms_{}.csv".format( len(self_od))
print 'The collected dataframe is saved to', all_out_file
pdo_all.to_csv( all_out_file, index = False)
return self_od
def pd_coef( pd_fname = 'sheet/out_5.csv', idx = 39, graph = False):
"""
extract coefficients and intercepts.
Pandas dataframe is used.
"""
pdr = pd.read_csv( pd_fname)
ci39 = pdr['(coef_,intercept_)'][ idx]
ci39_l = eval( ci39)
c0_l, c1_l, i_l = [], [], []
for x in ci39_l:
x0 = x[0][0]
x1 = x[1][0]
c0_l.append(x0[0])
c1_l.append(x0[1])
i_l.append( x1)
# Data is saved to pandas dataframe
c1p0 = np.divide( c1_l, c0_l)
pdw_div = pd.DataFrame( np.array([c0_l, c1_l, i_l, c1p0]).T, columns=['c0', 'c1', 'in', 'c1/c0'])
if graph:
pdw_div.plot(kind='box')
return pdw_div
def collect_coef( idx_l = [32, 33, 34, 35, 38, 39], pd_fname = 'sheet/out_5.csv'):
"""
collect related coefs
"""
# The column of group will be included.
pd_out = pd.read_csv( pd_fname)
pdw_d = dict()
for idx in idx_l:
pdw_d[idx] = pd_coef( pd_fname = pd_fname, idx = idx)
# long group names are shortened
if 'Global single: ' in pd_out['Group(s)'][idx]:
lg = len('Global single: ')
gs = pd_out['Group(s)'][idx][lg:]
pdw_d[idx]['Group(s)'] = [ gs] * pdw_d[idx].shape[0]
else:
pdw_d[idx]['Group(s)'] = [pd_out['Group(s)'][idx]] * pdw_d[idx].shape[0]
pd_collect = pd.DataFrame()
for idx in idx_l:
pd_collect = pd_collect.append( pdw_d[idx], ignore_index = True)
# print pdw_d[idx]
# print pd_collect
return pd_collect
def pd_get_y_a( pdr, group = 2, mode = 'regress', method = 'B3LYP'):
pdr_B3LYP = pdr[ pdr['QC Models (Family ID)'] == "['{}']".format(method)]
B3LYP_d = dict()
B3LYP_d["no_regress"] = pdr_B3LYP[ (pdr_B3LYP['CV Mode'] == 'No regress') ]
B3LYP_d["regress"] = pdr_B3LYP[ (pdr_B3LYP['CV Mode'] != 'No regress') & (pdr_B3LYP['H + S'] == False)]
B3LYP_d["ensemble"] = pdr_B3LYP[ (pdr_B3LYP['CV Mode'] != 'No regress') & (pdr_B3LYP['H + S'] == True)]
p = B3LYP_d[mode]
#print p[ p['Group(s)'] == str(group)]['results'].tolist()
y_l = eval(p[ p['Group(s)'] == str(group)]['results'].tolist()[0])
return np.array(y_l)
def pd_get_y_a_qcmodels( pdr, group = 2, mode = 'regress', qcmodels = "['B3LYP', 'TPSS0']"):
pdr_B3LYP = pdr[ pdr['QC Models (Family ID)'] == qcmodels]
print pdr_B3LYP.shape
B3LYP_d = dict()
B3LYP_d["no_regress"] = pdr_B3LYP[ (pdr_B3LYP['CV Mode'] == 'No regress') ]
B3LYP_d["regress"] = pdr_B3LYP[ (pdr_B3LYP['CV Mode'] != 'No regress') & (pdr_B3LYP['H + S'] == False)]
B3LYP_d["ensemble"] = pdr_B3LYP[ (pdr_B3LYP['CV Mode'] != 'No regress') & (pdr_B3LYP['H + S'] == True)]
p = B3LYP_d[mode]
print p.shape
#print p[ p['Group(s)'] == str(group)]['results'].tolist()
y_l = eval(p[ p['Group(s)'] == str(group)]['results'].tolist()[0])
return np.array(y_l)
| 31.621504 | 169 | 0.605122 |
b89af081a47b08708cbf3950347e63ed8178ba4a | 5,977 | py | Python | sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_09_01_preview/models/_models.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_09_01_preview/models/_models.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/authorization/azure-mgmt-authorization/azure/mgmt/authorization/v2018_09_01_preview/models/_models.py | ankitarorabit/azure-sdk-for-python | dd90281cbad9400f8080754a5ef2f56791a5a88f | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import msrest.serialization
class RoleAssignment(msrest.serialization.Model):
"""Role Assignments.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The role assignment ID.
:vartype id: str
:ivar name: The role assignment name.
:vartype name: str
:ivar type: The role assignment type.
:vartype type: str
:param scope: The role assignment scope.
:type scope: str
:param role_definition_id: The role definition ID.
:type role_definition_id: str
:param principal_id: The principal ID.
:type principal_id: str
:param principal_type: The principal type of the assigned principal ID. Possible values
include: "User", "Group", "ServicePrincipal", "Unknown", "DirectoryRoleTemplate",
"ForeignGroup", "Application", "MSI", "DirectoryObjectOrGroup", "Everyone".
:type principal_type: str or ~azure.mgmt.authorization.v2018_09_01_preview.models.PrincipalType
:param can_delegate: The Delegation flag for the role assignment.
:type can_delegate: bool
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'str'},
'role_definition_id': {'key': 'properties.roleDefinitionId', 'type': 'str'},
'principal_id': {'key': 'properties.principalId', 'type': 'str'},
'principal_type': {'key': 'properties.principalType', 'type': 'str'},
'can_delegate': {'key': 'properties.canDelegate', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(RoleAssignment, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.scope = kwargs.get('scope', None)
self.role_definition_id = kwargs.get('role_definition_id', None)
self.principal_id = kwargs.get('principal_id', None)
self.principal_type = kwargs.get('principal_type', None)
self.can_delegate = kwargs.get('can_delegate', None)
class RoleAssignmentCreateParameters(msrest.serialization.Model):
"""Role assignment create parameters.
All required parameters must be populated in order to send to Azure.
:param role_definition_id: Required. The role definition ID used in the role assignment.
:type role_definition_id: str
:param principal_id: Required. The principal ID assigned to the role. This maps to the ID
inside the Active Directory. It can point to a user, service principal, or security group.
:type principal_id: str
:param principal_type: The principal type of the assigned principal ID. Possible values
include: "User", "Group", "ServicePrincipal", "Unknown", "DirectoryRoleTemplate",
"ForeignGroup", "Application", "MSI", "DirectoryObjectOrGroup", "Everyone".
:type principal_type: str or ~azure.mgmt.authorization.v2018_09_01_preview.models.PrincipalType
:param can_delegate: The delegation flag used for creating a role assignment.
:type can_delegate: bool
"""
_validation = {
'role_definition_id': {'required': True},
'principal_id': {'required': True},
}
_attribute_map = {
'role_definition_id': {'key': 'properties.roleDefinitionId', 'type': 'str'},
'principal_id': {'key': 'properties.principalId', 'type': 'str'},
'principal_type': {'key': 'properties.principalType', 'type': 'str'},
'can_delegate': {'key': 'properties.canDelegate', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(RoleAssignmentCreateParameters, self).__init__(**kwargs)
self.role_definition_id = kwargs['role_definition_id']
self.principal_id = kwargs['principal_id']
self.principal_type = kwargs.get('principal_type', None)
self.can_delegate = kwargs.get('can_delegate', None)
class RoleAssignmentFilter(msrest.serialization.Model):
"""Role Assignments filter.
:param principal_id: Returns role assignment of the specific principal.
:type principal_id: str
:param can_delegate: The Delegation flag for the role assignment.
:type can_delegate: bool
"""
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'can_delegate': {'key': 'canDelegate', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(RoleAssignmentFilter, self).__init__(**kwargs)
self.principal_id = kwargs.get('principal_id', None)
self.can_delegate = kwargs.get('can_delegate', None)
class RoleAssignmentListResult(msrest.serialization.Model):
"""Role assignment list operation result.
:param value: Role assignment list.
:type value: list[~azure.mgmt.authorization.v2018_09_01_preview.models.RoleAssignment]
:param next_link: The URL to use for getting the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[RoleAssignment]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RoleAssignmentListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
| 38.811688 | 99 | 0.643467 |
24059e5e903b8a9604208fdcf49bced8f677f07f | 3,536 | py | Python | utils/dataset_collection/airsim/airsim_fly.py | surirohit/multi-camera-deeptam | 37288ec8ac11b020418f88547b1e4d810343e63a | [
"Apache-2.0"
] | 5 | 2019-10-24T20:06:16.000Z | 2020-10-16T06:17:46.000Z | utils/dataset_collection/airsim/airsim_fly.py | surirohit/multi-camera-deeptam | 37288ec8ac11b020418f88547b1e4d810343e63a | [
"Apache-2.0"
] | null | null | null | utils/dataset_collection/airsim/airsim_fly.py | surirohit/multi-camera-deeptam | 37288ec8ac11b020418f88547b1e4d810343e63a | [
"Apache-2.0"
] | 2 | 2020-01-06T03:20:05.000Z | 2021-02-10T09:44:29.000Z | ## Contributor: Mayank Mittal
# Script to fly the spawned drone in lawn mower patter
'''
~ # #########
^ # # #
| # # #
| # # #
Y # # #
| # # #
| # # #
v # # #
~ ######### #
<-X_step->
'''
import airsim
import sys
import time
import math
########################@#### Control Variables ######@#######################
# Note: In UnrealEngine the coordinate system is inverted
X = 10 # final x coordinates
Y = -25 # length of path traversed along x
# The paramters H and X_step need to changed to collect more data
H = -2.5 # height of flight
X_step = 10.0
# defining variables for flight
YAW = 0 # yaw of the drone while flying (set to 0 assuming)
V = 0.5 # speed of drone while flying
##############################################################################
# color class to prettify the terminal outputs being printed
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# print in green
def printg(message=''):
if message != '':
print(bcolors.OKGREEN + message + bcolors.ENDC)
# Connect to the simulator!
client = airsim.MultirotorClient()
client.reset()
client.confirmConnection()
client.enableApiControl(True)
print("Connection successful to the drone!")
# arming the drone
if (client.isApiControlEnabled()):
if (client.armDisarm(True)):
print(bcolors.OKBLUE + "drone is armed" + bcolors.ENDC)
else:
print(bcolors.FAIL + "failed to arm the drone" + bcolors.ENDC)
sys.exit(1);
landed = client.getMultirotorState().landed_state
if landed == airsim.LandedState.Landed:
print(bcolors.OKBLUE + "drone should now be flying..." + bcolors.ENDC)
client.takeoffAsync().join()
else:
print(bcolors.WARNING + "it appears the drone is already flying" + bcolors.ENDC)
print(bcolors.WARNING + "kindly restart the simulator to ensure proper flying" + bcolors.ENDC)
client.hoverAsync().join()
# to pause the drone for a while to stabilize the flying altitude
client.moveToPositionAsync(0, 0, H, V, 60, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(False, YAW), -1,
0).join()
print(bcolors.OKBLUE + "moved to altitude " + str(H) + bcolors.ENDC)
x = 0 # initial x coordinates
y = Y
delay_x = abs(X_step / V) # delay for path along y
delay_y = abs(Y / V) # delay for path along x
while x < X:
printg("starting to move to (%3d, %3d)" % (x, y))
client.moveToPositionAsync(x, y, H, V, delay_y, airsim.DrivetrainType.MaxDegreeOfFreedom,
airsim.YawMode(False, YAW), -1, 0).join()
x = x + X_step
printg("starting to move to (%3d, %3d)" % (x, y))
client.moveToPositionAsync(x, y, H, V, delay_x, airsim.DrivetrainType.MaxDegreeOfFreedom,
airsim.YawMode(False, YAW), -1, 0).join()
printg("starting to move to (%3d, %3d)" % (x, y))
client.moveToPositionAsync(x, 0, H, V, delay_y, airsim.DrivetrainType.MaxDegreeOfFreedom,
airsim.YawMode(False, YAW), -1, 0).join()
x = x + X_step
printg("starting to move to (%3d, %3d)" % (x, y))
client.moveToPositionAsync(x, 0, H, V, delay_x, airsim.DrivetrainType.MaxDegreeOfFreedom,
airsim.YawMode(False, YAW), -1, 0).join()
# to record the time of flight of the drone
client.landAsync(10)
| 31.571429 | 116 | 0.597568 |
ece5fe39963336d8da39dc6aa036ff4945895d13 | 2,415 | py | Python | vmprof_viewer_client/decorator.py | blue-yonder/vmprof-viewer-client | 6b7219e04a1e224bcb46f486b1acc58817af3b33 | [
"MIT"
] | 4 | 2018-09-06T23:36:56.000Z | 2020-01-06T12:07:03.000Z | vmprof_viewer_client/decorator.py | blue-yonder/vmprof-viewer-client | 6b7219e04a1e224bcb46f486b1acc58817af3b33 | [
"MIT"
] | null | null | null | vmprof_viewer_client/decorator.py | blue-yonder/vmprof-viewer-client | 6b7219e04a1e224bcb46f486b1acc58817af3b33 | [
"MIT"
] | 1 | 2020-11-06T09:31:52.000Z | 2020-11-06T09:31:52.000Z | import functools
import tempfile
import threading
import datetime
import pytz
import vmprof
from .protocol import upload
from .config import make_config
_global_config = None
_upload_threads = []
def configure(project_name, url=None, period=None):
global _global_config
_global_config = make_config(project_name, url)
def profile(*args, **kwargs):
if args:
# Without options:
# @profile
# def foo(): ...
if len(args) > 1 or not callable(args[0]):
raise TypeError("profile() must be called with callable as only "
"positional argument (options must be given as keyword "
"arguments)")
return wrap_func(args[0])
else:
# With options:
# @profile(period=...)
# def foo(): ...
return functools.partial(wrap_func, options=kwargs)
def wrap_func(func, options=None):
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
assert _global_config, "Must call vmprof_viewer_client.configure(<project name>) first"
merged_options = dict(_global_config, **(options or {}))
# Wait for any background upload threads to finish, otherwise they will
# appear in the profile
for thread in _upload_threads:
thread.join()
tmpfile = tempfile.NamedTemporaryFile()
vmprof.enable(tmpfile.fileno(), memory=True, period=merged_options['period'])
try:
start_date = datetime.datetime.now(pytz.utc)
return func(*args, **kwargs)
finally:
vmprof.disable()
def _upload():
try:
stats = vmprof.read_profile(tmpfile.name)
top_level_function = func.__module__ + "." + func.__name__
period = merged_options['period'] * 10**6
meta = {
'start_date': start_date.isoformat(),
'top_level_function': top_level_function
}
upload(merged_options['url'],
merged_options['project_name'],
stats, period, meta)
finally:
tmpfile.close()
upload_thread = threading.Thread(target=_upload)
upload_thread.start()
_upload_threads.append(upload_thread)
return func_wrapper
| 31.776316 | 95 | 0.580124 |
e3fbb55c1ffb95c9300129dae0c1943d67605144 | 9,219 | py | Python | sphinxcontrib/confluencebuilder/translator/__init__.py | zeddee/confluencebuilder | c32b2338b17ec949f3e9c68e9a3b1fa3148e5cb3 | [
"BSD-2-Clause"
] | null | null | null | sphinxcontrib/confluencebuilder/translator/__init__.py | zeddee/confluencebuilder | c32b2338b17ec949f3e9c68e9a3b1fa3148e5cb3 | [
"BSD-2-Clause"
] | 2 | 2020-08-08T22:03:17.000Z | 2020-12-19T00:42:22.000Z | sphinxcontrib/confluencebuilder/translator/__init__.py | zeddee/confluencebuilder | c32b2338b17ec949f3e9c68e9a3b1fa3148e5cb3 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
:copyright: Copyright 2016-2020 Sphinx Confluence Builder Contributors (AUTHORS)
:license: BSD-2-Clause (LICENSE)
"""
from docutils import nodes
from docutils.nodes import NodeVisitor as BaseTranslator
from os import path
from sphinx.util.osutil import SEP
from sphinx.util.osutil import canon_path
from sphinxcontrib.confluencebuilder.logger import ConfluenceLogger
from sphinxcontrib.confluencebuilder.std.sphinx import DEFAULT_ALIGNMENT
from sphinxcontrib.confluencebuilder.std.sphinx import DEFAULT_HIGHLIGHT_STYLE
import io
import sys
class ConfluenceBaseTranslator(BaseTranslator):
_tracked_deprecated_raw_type = False
"""
confluence base extension translator
Base translator for the Confluence extension for Sphinx. This contains
common implementation shared by other translators in this extension which
can help process individual documents based on parsed node entries provided
by docutils (used by Sphinx).
Args:
document: the document being translated
builder: the sphinx builder instance
"""
def __init__(self, document, builder):
BaseTranslator.__init__(self, document)
self.builder = builder
self.warn = document.reporter.warning
config = builder.config
# acquire the active document name from the builder
assert 'source' in document
self.docname = canon_path(self.builder.env.path2doc(document['source']))
# determine the active document's parent path to assist it title mapping
# for relative document uris
# (see '_visit_reference_intern_uri')
if SEP in self.docname:
self.docparent = self.docname[0:self.docname.rfind(SEP) + 1]
else:
self.docparent = ''
self.assets = builder.assets
self.body = []
self.context = []
self.nl = '\n'
self._docnames = [self.docname]
self._literal = False
self._section_level = 1
if config.confluence_default_alignment:
self._default_alignment = config.confluence_default_alignment
else:
self._default_alignment = DEFAULT_ALIGNMENT
if config.highlight_language:
self._highlight = config.highlight_language
else:
self._highlight = DEFAULT_HIGHLIGHT_STYLE
self._linenothreshold = sys.maxsize
# ##########################################################################
# # #
# # base translator overrides #
# # #
# ##########################################################################
def visit_document(self, node):
pass
def depart_document(self, node):
self.document = ''
# prepend header (if any)
if self.builder.config.confluence_header_file is not None:
headerFile = path.join(self.builder.env.srcdir,
self.builder.config.confluence_header_file)
try:
with io.open(headerFile, encoding='utf-8') as file:
self.document += file.read() + self.nl
except (IOError, OSError) as err:
self.warn('error reading file {}: {}'.format(headerFile, err))
self.document += ''.join(self.body)
# append footer (if any)
if self.builder.config.confluence_footer_file is not None:
footerFile = path.join(self.builder.env.srcdir,
self.builder.config.confluence_footer_file)
try:
with io.open(footerFile, encoding='utf-8') as file:
self.document += file.read() + self.nl
except (IOError, OSError) as err:
self.warn('error reading file {}: {}'.format(footerFile, err))
def visit_Text(self, node):
text = node.astext()
if not self._literal:
text = text.replace(self.nl, ' ')
text = self._escape_text(text)
self.body.append(text)
raise nodes.SkipNode
def unknown_visit(self, node):
node_name = node.__class__.__name__
ignore_nodes = self.builder.config.confluence_adv_ignore_nodes
if node_name in ignore_nodes:
ConfluenceLogger.verbose('ignore node {} (conf)'.format(node_name))
raise nodes.SkipNode
# allow users to override unknown nodes
#
# A node handler allows an advanced user to provide implementation to
# process a node not supported by this extension. This is to assist in
# providing a quick alternative to supporting another third party
# extension in this translator (without having to take the time in
# building a third extension).
handler = self.builder.config.confluence_adv_node_handler
if handler and isinstance(handler, dict) and node_name in handler:
handler[node_name](self, node)
raise nodes.SkipNode
raise NotImplementedError('unknown node: ' + node_name)
# ---------
# structure
# ---------
def visit_section(self, node):
level = self._section_level
if not self.builder.config.confluence_adv_writer_no_section_cap:
MAX_CONFLUENCE_SECTIONS = 6
if self._section_level > MAX_CONFLUENCE_SECTIONS:
level = MAX_CONFLUENCE_SECTIONS
self._title_level = level
self._section_level += 1
def depart_section(self, node):
self._section_level -= 1
visit_topic = visit_section
depart_topic = depart_section
# ------------------
# sphinx -- glossary
# ------------------
def visit_glossary(self, node):
# ignore glossary wrapper; glossary is built with definition_list
pass
def depart_glossary(self, node):
pass
def visit_index(self, node):
# glossary index information is not needed; skipped
raise nodes.SkipNode
# --------------
# sphinx -- math
# --------------
def visit_displaymath(self, node):
# unsupported
raise nodes.SkipNode
def visit_eqref(self, node):
# unsupported
raise nodes.SkipNode
def visit_math(self, node):
# handled in "builder" at this time
raise nodes.SkipNode
def visit_math_block(self, node):
# handled in "builder" at this time
raise nodes.SkipNode
# -----------------
# sphinx -- toctree
# -----------------
def visit_toctree(self, node):
# skip hidden toctree entries
raise nodes.SkipNode
# -----------------------------------------------------
# docutils handling "to be completed" marked directives
# -----------------------------------------------------
def visit_citation_reference(self, node):
raise nodes.SkipNode
def visit_compact_paragraph(self, node):
pass
def depart_compact_paragraph(self, node):
pass
def visit_container(self, node):
pass
def depart_container(self, node):
pass
def visit_generated(self, node):
pass
def depart_generated(self, node):
pass
def visit_pending_xref(self, node):
raise nodes.SkipNode
def visit_problematic(self, node):
raise nodes.SkipNode
def visit_system_message(self, node):
raise nodes.SkipNode
# -------------
# miscellaneous
# -------------
def visit_acks(self, node):
raise nodes.SkipNode
def visit_comment(self, node):
raise nodes.SkipNode
def visit_line(self, node):
# ignoring; no need to handle specific line entries
pass
def depart_line(self, node):
pass
def visit_raw(self, node):
if 'confluence' in node.get('format', '').split():
if not self._tracked_deprecated_raw_type:
self._tracked_deprecated_raw_type = True
self.warn('the raw "confluence" type is deprecated; '
'use "confluence_storage" instead')
self.body.append(self.nl.join(node.astext().splitlines()))
raise nodes.SkipNode
def visit_sidebar(self, node):
# unsupported
raise nodes.SkipNode
def visit_substitution_definition(self, node):
raise nodes.SkipNode
def visit_start_of_file(self, node):
# track active inlined documents (singleconfluence builder) for anchors
self._docnames.append(node['docname'])
def depart_start_of_file(self, node):
self._docnames.pop()
# ##########################################################################
# # #
# # virtual methods #
# # #
# ##########################################################################
def _escape_text(self, node):
raise NotImplementedError('translator does not implement text escaping')
| 32.925 | 80 | 0.57555 |
28bed5efbf1ce8e0bda54789510bb25e2e480d41 | 271 | py | Python | openmoc/cuda/double/__init__.py | samuelshaner/OpenMOC-shaner | 52a9003eef0de0629aae4aa4030b5e8d2c3f9988 | [
"MIT"
] | null | null | null | openmoc/cuda/double/__init__.py | samuelshaner/OpenMOC-shaner | 52a9003eef0de0629aae4aa4030b5e8d2c3f9988 | [
"MIT"
] | null | null | null | openmoc/cuda/double/__init__.py | samuelshaner/OpenMOC-shaner | 52a9003eef0de0629aae4aa4030b5e8d2c3f9988 | [
"MIT"
] | null | null | null | import openmoc
import _openmoc_cuda_double
from openmoc_cuda_double import *
import signal
# Tell Python to recognize CTRL+C and stop the C++ extension module
# when this is passed in from the keyboard
signal.signal(signal.SIGINT, signal.SIG_DFL)
Timer = openmoc.Timer
| 24.636364 | 67 | 0.808118 |
20b2e3e32b7c66d0208f3258b3c38daa89dc0d38 | 297 | py | Python | test/sysinfo_test.py | peitur/docker-util | 6579c59b809a3dab80c440baa5fabc669cf88b9b | [
"Apache-2.0"
] | 1 | 2016-05-19T13:43:21.000Z | 2016-05-19T13:43:21.000Z | test/sysinfo_test.py | peitur/docker-util | 6579c59b809a3dab80c440baa5fabc669cf88b9b | [
"Apache-2.0"
] | 1 | 2020-11-23T10:21:16.000Z | 2020-11-23T10:25:16.000Z | test/sysinfo_test.py | peitur/docker-util | 6579c59b809a3dab80c440baa5fabc669cf88b9b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import sys,os,re
sys.path.append( "../lib" )
sys.path.append( "./lib" )
import Controller
import unittest
from pprint import pprint
class SysinfoTest( unittest.TestCase ):
def test_configuration( self ):
pass
def test_information( self ):
pass
| 13.5 | 39 | 0.666667 |
a1d9a084fd04e5bb827c7a46e4343f7636cc106c | 2,178 | py | Python | junit/__init__.py | airadier/publish-unit-test-result-action | e44dbcba11ebe8c0a1e4e7b3e24ba7945bb79c35 | [
"Apache-2.0"
] | null | null | null | junit/__init__.py | airadier/publish-unit-test-result-action | e44dbcba11ebe8c0a1e4e7b3e24ba7945bb79c35 | [
"Apache-2.0"
] | null | null | null | junit/__init__.py | airadier/publish-unit-test-result-action | e44dbcba11ebe8c0a1e4e7b3e24ba7945bb79c35 | [
"Apache-2.0"
] | null | null | null | from html import unescape
from typing import Optional, Iterable
from junitparser import *
from unittestresults import ParsedUnitTestResults, UnitTestCase
def parse_junit_xml_files(files: Iterable[str]) -> ParsedUnitTestResults:
"""Parses junit xml files and returns aggregated statistics as a ParsedUnitTestResults."""
junits = [(result_file, JUnitXml.fromfile(result_file)) for result_file in files]
suites = [(result_file, suite)
for result_file, junit in junits
for suite in (junit if junit._tag == "testsuites" else [junit])]
suite_tests = sum([suite.tests for result_file, suite in suites])
suite_skipped = sum([suite.skipped for result_file, suite in suites])
suite_failures = sum([suite.failures for result_file, suite in suites])
suite_errors = sum([suite.errors for result_file, suite in suites])
suite_time = int(sum([suite.time for result_file, suite in suites]))
def int_opt(string: Optional[str]) -> Optional[int]:
try:
return int(string) if string else None
except ValueError:
return None
cases = [
UnitTestCase(
result_file=result_file,
test_file=case._elem.get('file'),
line=int_opt(case._elem.get('line')),
class_name=case.classname,
test_name=case.name,
result=case.result._tag if case.result else 'success',
message=unescape(case.result.message) if case.result and case.result.message is not None else None,
content=unescape(case.result._elem.text) if case.result and case.result._elem.text is not None else None,
time=case.time
)
for result_file, suite in suites
for case in suite
if case.classname is not None and case.name is not None
]
return ParsedUnitTestResults(
files=len(junits),
# test state counts from suites
suites=len(suites),
suite_tests=suite_tests,
suite_skipped=suite_skipped,
suite_failures=suite_failures,
suite_errors=suite_errors,
suite_time=suite_time,
# test cases
cases=cases
)
| 38.210526 | 117 | 0.665289 |
68d914668ada25beab7cafdb467f0298a1393e34 | 897 | py | Python | app/products/views/vendor_views/express.py | phessabi/eshop | 6a5352753a0c27f9c3f0eda6eec696f49ef4a8eb | [
"Apache-2.0"
] | 1 | 2020-02-04T21:18:31.000Z | 2020-02-04T21:18:31.000Z | app/products/views/vendor_views/express.py | phessabi/eshop | 6a5352753a0c27f9c3f0eda6eec696f49ef4a8eb | [
"Apache-2.0"
] | 12 | 2020-01-01T11:46:33.000Z | 2022-03-12T00:10:01.000Z | app/products/views/vendor_views/express.py | phessabi/eshop | 6a5352753a0c27f9c3f0eda6eec696f49ef4a8eb | [
"Apache-2.0"
] | 1 | 2020-02-18T11:12:48.000Z | 2020-02-18T11:12:48.000Z | from rest_framework import status
from rest_framework.generics import UpdateAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
from _helpers.permissions import IsVendor
from products.models import Product
from products.serializers import ProductSerializer
class ExpressView(UpdateAPIView, GenericViewSet):
permission_classes = (IsAuthenticated, IsVendor)
serializer_class = ProductSerializer
queryset = Product.objects.all()
def update(self, request, *args, **kwargs):
vendor = request.user.vendor
instance = self.get_object()
if vendor.credit < instance.price * 0.10:
return Response(status=status.HTTP_402_PAYMENT_REQUIRED)
instance.express = True
instance.save()
return Response(status=status.HTTP_200_OK)
| 35.88 | 68 | 0.770346 |
2171283a13a12ad82e0befd3a0c64e3c6056b45a | 1,294 | py | Python | Clustering/K Means/k_means_clustering.py | vnc-edu/machine-learning | 5a212ae303c5db319c0474077a13eb663aff2c54 | [
"Apache-2.0"
] | null | null | null | Clustering/K Means/k_means_clustering.py | vnc-edu/machine-learning | 5a212ae303c5db319c0474077a13eb663aff2c54 | [
"Apache-2.0"
] | null | null | null | Clustering/K Means/k_means_clustering.py | vnc-edu/machine-learning | 5a212ae303c5db319c0474077a13eb663aff2c54 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
dataset = pd.read_csv('Mall_Customers.csv')
X = dataset.iloc[:, [3, 4]].values
wcss = []
for k in range(1, 11):
kmeans = KMeans(n_clusters=k, init='k-means++', random_state=42)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.xlabel('No of clusters (k)')
plt.xlabel('WCSS - Within-Cluster Sum of Square')
plt.title('The elbow method')
plt.show()
k = 5
kmeans = KMeans(n_clusters=k, init='k-means++', random_state=42)
y_kmeans = kmeans.fit_predict(X)
plt.scatter(X[y_kmeans == 0, 0], X[y_kmeans == 0, 1], s=100, c='red', label='cluster 1')
plt.scatter(X[y_kmeans == 1, 0], X[y_kmeans == 1, 1], s=100, c='green', label='cluster 2')
plt.scatter(X[y_kmeans == 2, 0], X[y_kmeans == 2, 1], s=100, c='blue', label='cluster 3')
plt.scatter(X[y_kmeans == 3, 0], X[y_kmeans == 3, 1], s=100, c='cyan', label='cluster 4')
plt.scatter(X[y_kmeans == 4, 0], X[y_kmeans == 4, 1], s=100, c='grey', label='cluster 5')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s=300, c='black', label='Centroids')
plt.title('Clusters of customers')
plt.xlabel('Annual Income (k$)')
plt.ylabel('Spending Score (1-100)')
plt.legend()
plt.show()
#%%
| 34.972973 | 110 | 0.667697 |
a4c391296015ad6cc9fcc117f47401e808936b90 | 515 | py | Python | dauphin/image/transforms/sharpness.py | PushparajaMurugan/dauphin | 4d9832c72288282e6b3d03be1b0ad8708282b005 | [
"Apache-2.0"
] | 18 | 2020-07-08T21:53:09.000Z | 2021-02-18T03:46:54.000Z | dauphin/image/transforms/sharpness.py | PushparajaMurugan/dauphin | 4d9832c72288282e6b3d03be1b0ad8708282b005 | [
"Apache-2.0"
] | 4 | 2020-08-30T17:47:35.000Z | 2020-12-04T12:09:41.000Z | dauphin/image/transforms/sharpness.py | PushparajaMurugan/dauphin | 4d9832c72288282e6b3d03be1b0ad8708282b005 | [
"Apache-2.0"
] | 8 | 2020-07-12T08:03:30.000Z | 2021-04-13T15:09:37.000Z | from PIL import ImageEnhance
from dauphin.image.transforms.transform import DauphinTransform
from dauphin.image.transforms.utils import categorize_value
class Sharpness(DauphinTransform):
value_range = (0.1, 1.9)
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
degree = categorize_value(self.level, self.value_range, "float")
return ImageEnhance.Sharpness(pil_img).enhance(degree), label
| 30.294118 | 72 | 0.733981 |
fb9fe3996c993f0ac6361e504400cb78419b7eb7 | 2,856 | py | Python | docker_registry/run.py | kirat-singh/docker-registry | ca53d728fb57302606892362820dfaa8aed105c5 | [
"Apache-2.0"
] | 1,568 | 2015-01-01T02:12:42.000Z | 2020-03-10T06:24:39.000Z | docker_registry/run.py | kirat-singh/docker-registry | ca53d728fb57302606892362820dfaa8aed105c5 | [
"Apache-2.0"
] | 316 | 2015-01-01T01:15:21.000Z | 2018-09-10T21:19:04.000Z | docker_registry/run.py | kirat-singh/docker-registry | ca53d728fb57302606892362820dfaa8aed105c5 | [
"Apache-2.0"
] | 596 | 2015-01-03T03:54:42.000Z | 2020-03-05T14:40:55.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse # noqa
import distutils.spawn
import getpass
import logging
import os
import sys
from .server import env
logger = logging.getLogger(__name__)
DESCRIPTION = """run the docker-registry with gunicorn, honoring the following
environment variables:
REGISTRY_HOST: TCP host or ip to bind to; default is 0.0.0.0
REGISTRY_PORT: TCP port to bind to; default is 5000
GUNICORN_WORKERS: number of worker processes gunicorn should start
GUNICORN_GRACEFUL_TIMEOUT: timeout in seconds for graceful worker restart
GUNICORN_SILENT_TIMEOUT: timeout in seconds for restarting silent workers
GUNICORN_USER: unix user to downgrade priviledges to
GUNICORN_GROUP: unix group to downgrade priviledges to
GUNICORN_ACCESS_LOG_FILE: File to log access to
GUNICORN_ERROR_LOG_FILE: File to log errors to
GUNICORN_OPTS: extra options to pass to gunicorn
"""
def run_gunicorn():
"""Exec gunicorn with our wsgi app.
Settings are taken from environment variables as listed in the help text.
This is intended to be called as a console_script entry point.
"""
# this only exists to provide help/usage text
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=argparse.RawTextHelpFormatter)
parser.parse_args()
gunicorn_path = distutils.spawn.find_executable('gunicorn')
if not gunicorn_path:
print('error: gunicorn executable not found', file=sys.stderr)
sys.exit(1)
address = '%s:%s' % (
env.source('REGISTRY_HOST'),
env.source('REGISTRY_PORT')
)
args = [
gunicorn_path, 'gunicorn',
'--access-logfile', env.source('GUNICORN_ACCESS_LOG_FILE'),
'--error-logfile', env.source('GUNICORN_ERROR_LOG_FILE'),
'--max-requests', '100',
'-k', 'gevent',
'--graceful-timeout', env.source('GUNICORN_GRACEFUL_TIMEOUT'),
'-t', env.source('GUNICORN_SILENT_TIMEOUT'),
'-w', env.source('GUNICORN_WORKERS'),
'-b', address,
]
if env.source('SETTINGS_FLAVOR') != 'prod':
args.append('--reload')
user = env.source('GUNICORN_USER')
group = env.source('GUNICORN_GROUP')
if user or group:
if getpass.getuser() == 'root':
if user:
logger.info('Downgrading privs to user %s' % user)
args.append('-u')
args.append(user)
if group:
logger.info('Downgrading privs to group %s' % user)
args.append('-g')
args.append(group)
else:
logger.warn('You asked we drop priviledges, but we are not root!')
args += env.source('GUNICORN_OPTS')
args.append('docker_registry.wsgi:application')
# Stringify all args and call
os.execl(*[str(v) for v in args])
| 31.384615 | 78 | 0.668417 |
64cfd6b7b481025dd362715f1b4a2afc35091bd3 | 2,687 | py | Python | ingest/ingest_echr.py | alexbas/index_thio | d9d75cee93522800fe8ef988247a240c78668a5c | [
"MIT"
] | 1 | 2021-04-17T20:01:20.000Z | 2021-04-17T20:01:20.000Z | ingest/ingest_echr.py | alexbas/index_thio | d9d75cee93522800fe8ef988247a240c78668a5c | [
"MIT"
] | null | null | null | ingest/ingest_echr.py | alexbas/index_thio | d9d75cee93522800fe8ef988247a240c78668a5c | [
"MIT"
] | null | null | null | from os import path
import hashlib
import json
import argparse
from urllib.request import urlretrieve
from zipfile import ZipFile
from tqdm import tqdm
from confluent_kafka import Producer
DATASET_ADDRESS = 'https://archive.org/download/ECHR-ACL2019/ECHR_Dataset.zip'
def should_download(fn):
# if file does not exits
if not path.exists(fn):
return True
# check if download was interrupted in the middle
md5hash = 'd5a592fb7e9882926a5ba3a78176e2b3'
md5 = hashlib.md5()
bs = 256 * 1024
with open(fn, 'rb') as f:
while True:
buffer = f.read(bs)
if not buffer:
break
md5.update(buffer)
return md5.hexdigest() != md5hash
def prepare_chunks(fn):
pass
def download(url, fn):
pbar = tqdm(desc=f"Downloading {url}")
def progress(i, bs, total):
if total > 0 and pbar.total is None:
pbar.total = total // 1024 ** 2
pbar.set_postfix_str("Mb")
pbar.update(bs / 1024 ** 2)
urlretrieve(url, fn, reporthook=progress)
def download_dataset():
url = DATASET_ADDRESS
fn = url.split('/')[-1]
if should_download(fn):
download(url, fn)
return fn
def generate_chunks(fn):
folder = 'EN_train/'
with ZipFile(fn) as z:
paths = [p for p in z.namelist() if p.startswith(folder) and p.endswith('json')]
for p in paths:
f = z.open(p)
data = json.load(f)
payload = {
'doc_id': data['ITEMID'],
'props': {
'docname': data['DOCNAME'],
'conclustion': data['CONCLUSION'],
'judges': data['JUDGES']
},
'content': ' '.join(data['TEXT'])
}
yield payload
def stream_data(gen, stream):
print("Starting streaming...")
for chunk in gen:
stream(chunk)
print('Done streaming.')
def kafka_stream(host):
def produce(chunk):
key = chunk['doc_id']
payload = json.dumps(chunk)
producer.produce(topic, payload, key)
config = {'bootstrap.servers': f'{host}:9092'}
producer = Producer(config)
topic = "ingest.echr"
return produce
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--kafka',
help='Host address of first kafka broker, localhost if omitted',
default='localhost')
args = parser.parse_args()
fn = download_dataset()
generator = generate_chunks(fn)
streamer = kafka_stream(args.kafka)
stream_data(generator, streamer)
if __name__ == '__main__':
main()
| 23.778761 | 88 | 0.582434 |
af0296ba2f611fa1d8ed82f6eb82167be81a28ca | 1,500 | py | Python | kata-python/zigzagconversion.py | crimsonskyrem/codewars | ca2ef839af068d6aecd5c321cb3848ae5f8bfea4 | [
"Unlicense"
] | 1 | 2022-01-10T05:43:41.000Z | 2022-01-10T05:43:41.000Z | kata-python/zigzagconversion.py | crimsonskyrem/codewars | ca2ef839af068d6aecd5c321cb3848ae5f8bfea4 | [
"Unlicense"
] | null | null | null | kata-python/zigzagconversion.py | crimsonskyrem/codewars | ca2ef839af068d6aecd5c321cb3848ae5f8bfea4 | [
"Unlicense"
] | null | null | null | def convert(s: str, numRows: int) -> str:
if numRows == 1:
return s
if numRows == 2:
return s[::2] + s[1::2]
arr = []
row = 0
column = 0
pos = numRows - 2
sl = list(s)
q = column % (numRows -1)
while sl:
if q == 0:
if row >= len(arr):
arr.append([sl.pop(0)])
else:
arr[row].append(sl.pop(0))
else:
if row == pos:
arr[row].append(sl.pop(0))
if pos-1 > 0:
pos -= 1
else:
arr[row].append('')
row += 1
if row == numRows:
row = 0
column += 1
q = column % (numRows -1)
if q == 0:
pos = numRows - 2
res = ''
for sub in arr:
res += ''.join(sub)
return res
def convert2(s: str, numRows: int) -> str:
if numRows == 1:
return s
split = numRows + (numRows - 2)
end = numRows - 1
first = s[::split]
last = s[end::split]
for i in range(1,end):
tmp1 = list(s[i::split])
tmp2 = list(s[split - i::split])
while tmp1 or tmp2:
if tmp1:
first += tmp1.pop(0)
if tmp2:
first += tmp2.pop(0)
return first + last
T = convert2("PAYPALISHIRING", 3)
print(T)
# for r in T:
# for c in r:
# print(c,end = " ")
# print() | 25 | 44 | 0.396 |
e756ca33740f7e77b79ec23db87016333edca065 | 34,133 | py | Python | synthetic_data/STLGenerateSignal_plotpaper.py | KTH-RPL-Planiacs/stl_multiclass | c4d7de8882780337382d266096d69cd2eb89d14e | [
"MIT"
] | null | null | null | synthetic_data/STLGenerateSignal_plotpaper.py | KTH-RPL-Planiacs/stl_multiclass | c4d7de8882780337382d266096d69cd2eb89d14e | [
"MIT"
] | null | null | null | synthetic_data/STLGenerateSignal_plotpaper.py | KTH-RPL-Planiacs/stl_multiclass | c4d7de8882780337382d266096d69cd2eb89d14e | [
"MIT"
] | null | null | null | from STL import STLFormula
import operator as operatorclass
import pulp as plp
import matplotlib.pyplot as plt
plt.rcParams['font.size'] = '12'
# plt.rcParams['mathtext.fontset'] = 'custom'
# plt.rcParams['mathtext.rm'] = 'Bitstream Vera Sans'
# plt.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'
# plt.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'
plt.rcParams['text.usetex'] = True
plt.rcParams['text.latex.preamble'] = [r'\usepackage{amssymb}']
from matplotlib.path import Path
import matplotlib.patches as patches
import numpy as np
import random
#CONSTANTS
M = 100000
M_up = 100000
M_low = 0.000001
#HARDCODED
#TODO: manage more dimensions
NB_DIMENSIONS = 2
def generate_signal_milp_quantitative(phi,start,rand_area,U,epsilon,OPTIMIZE_ROBUSTNESS):
"""
Function generating a signal satisfying an STL Formula.
Takes as input:
* phi: an STL Formula
* start: a vector of the form [x0,y0] for the starting point coordinates
* rand_area: the domain on which signals are generated. rand_area = [lb,ub] where lb is the lower bound and ub the upper bound of the domain.
* U: a basic control policy standing for how much can move in 1 time stamp, i.e. \forall t \in [0,T], |s[t]-s[t+1]| < U \pm \epsilon
* epsilon: basic control policy parameter
* OPTIMIZE_ROBUSTNESS: a flag whether the robustness of the generated signal w.r.t. phi has to be maximized or not
The encoding details of the MILP optimization problem follows the quantitative enconding of Raman et al., "Model predictive control with signaltemporal logic specifications" in 53rd IEEE Conference on Decision and Control. IEEE, 2014, pp. 81–87.
"""
dict_vars = {}
#objective, maximize robustness
rvar = plp.LpVariable('r_'+str(id(phi))+'_t_'+str(phi.horizon),cat='Continuous')
dict_vars['r_'+str(id(phi))+'_t_'+str(phi.horizon)] = rvar
#Initialize model
if OPTIMIZE_ROBUSTNESS:
opt_model = plp.LpProblem("MIP Model", plp.LpMaximize)
opt_model += rvar
else:
opt_model = plp.LpProblem("MIP Model")
#We want to optimize a signal. The lower and upperbounds are specified by the random area.
s = plp.LpVariable.dicts("s",(range(phi.horizon+1),range(NB_DIMENSIONS)),rand_area[0],rand_area[1],plp.LpContinuous)
#the start is specified
opt_model += s[0][0] == start[0]
opt_model += s[0][1] == start[1]
#basic control policy, i.e. how much can move in 1 time stamp
#\forall t \in [0,T], |s[t]-s[t+1]| < U \pm \epsilon
for t in range(0,phi.horizon):
opt_model += s[t+1][0]-s[t][0] <= random.uniform(U-epsilon,U+epsilon)
opt_model += -(s[t+1][0]-s[t][0]) <= random.uniform(U-epsilon,U+epsilon)
opt_model += s[t+1][1]-s[t][1] <= random.uniform(U-epsilon,U+epsilon)
opt_model += -(s[t+1][1]-s[t][1]) <= random.uniform(U-epsilon,U+epsilon)
#recursive function
def model_phi(phi,t,opt_model):
if isinstance(phi, STLFormula.Predicate):
try:
rvar = dict_vars['r_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
rvar = plp.LpVariable('r_'+str(id(phi))+'_t_'+str(t),cat='Continuous')
dict_vars['r_'+str(id(phi))+'_t_'+str(t)] = rvar
if phi.operator == operatorclass.gt or phi.operator == operatorclass.ge:
opt_model += s[t][phi.pi_index_signal] - phi.mu == rvar
else:
opt_model += -s[t][phi.pi_index_signal] + phi.mu == rvar
elif isinstance(phi, STLFormula.TrueF):
try:
rvar = dict_vars['r_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
rvar = plp.LpVariable('r_'+str(id(phi))+'_t_'+str(t),cat='Continuous')
dict_vars['r_'+str(id(phi))+'_t_'+str(t)] = rvar
opt_model += rvar >= M
elif isinstance(phi, STLFormula.FalseF):
try:
rvar = dict_vars['r_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
rvar = plp.LpVariable('r_'+str(id(phi))+'_t_'+str(t),cat='Continuous')
dict_vars['r_'+str(id(phi))+'_t_'+str(t)] = rvar
opt_model += rvar <= -M
elif isinstance(phi, STLFormula.Conjunction):
model_phi(phi.first_formula,t,opt_model)
model_phi(phi.second_formula,t,opt_model)
try:
pvar1 = dict_vars['p_'+str(id(phi.first_formula))+'_t_'+str(t)]
except KeyError:
pvar1 = plp.LpVariable('p_'+str(id(phi.first_formula))+'_t_'+str(t),cat='Binary')
dict_vars['p_'+str(id(phi.first_formula))+'_t_'+str(t)] = pvar1
try:
pvar2 = dict_vars['p_'+str(id(phi.second_formula))+'_t_'+str(t)]
except KeyError:
pvar2 = plp.LpVariable('p_'+str(id(phi.second_formula))+'_t_'+str(t),cat='Binary')
dict_vars['p_'+str(id(phi.second_formula))+'_t_'+str(t)] = pvar2
try:
rvar = dict_vars['r_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
rvar = plp.LpVariable('r_'+str(id(phi))+'_t_'+str(t),cat='Continuous')
dict_vars['r_'+str(id(phi))+'_t_'+str(t)] = rvar
opt_model += pvar1+pvar2 == 1 #(3)
opt_model += rvar <= dict_vars['r_'+str(id(phi.first_formula))+'_t_'+str(t)] #(4)
opt_model += rvar <= dict_vars['r_'+str(id(phi.second_formula))+'_t_'+str(t)] #(4)
opt_model += dict_vars['r_'+str(id(phi.first_formula))+'_t_'+str(t)] - (1 - pvar1)*M <= rvar <= dict_vars['r_'+str(id(phi.first_formula))+'_t_'+str(t)] + (1 - pvar1)*M #(5)
opt_model += dict_vars['r_'+str(id(phi.second_formula))+'_t_'+str(t)] - (1 - pvar2)*M <= rvar <= dict_vars['r_'+str(id(phi.second_formula))+'_t_'+str(t)] + (1 - pvar2)*M #(5)
elif isinstance(phi, STLFormula.Disjunction):
model_phi(phi.first_formula,t,opt_model)
model_phi(phi.second_formula,t,opt_model)
try:
pvar1 = dict_vars['p_'+str(id(phi.first_formula))+'_t_'+str(t)]
except KeyError:
pvar1 = plp.LpVariable('p_'+str(id(phi.first_formula))+'_t_'+str(t),cat='Binary')
dict_vars['p_'+str(id(phi.first_formula))+'_t_'+str(t)] = pvar1
try:
pvar2 = dict_vars['p_'+str(id(phi.second_formula))+'_t_'+str(t)]
except KeyError:
pvar2 = plp.LpVariable('p_'+str(id(phi.second_formula))+'_t_'+str(t),cat='Binary')
dict_vars['p_'+str(id(phi.second_formula))+'_t_'+str(t)] = pvar2
try:
rvar = dict_vars['r_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
rvar = plp.LpVariable('r_'+str(id(phi))+'_t_'+str(t),cat='Continuous')
dict_vars['r_'+str(id(phi))+'_t_'+str(t)] = rvar
opt_model += pvar1+pvar2 == 1 #(3)
opt_model += rvar >= dict_vars['r_'+str(id(phi.first_formula))+'_t_'+str(t)] #(4)
opt_model += rvar >= dict_vars['r_'+str(id(phi.second_formula))+'_t_'+str(t)] #(4)
opt_model += dict_vars['r_'+str(id(phi.first_formula))+'_t_'+str(t)] - (1 - pvar1)*M <= rvar <= dict_vars['r_'+str(id(phi.first_formula))+'_t_'+str(t)] + (1 - pvar1)*M #(5)
opt_model += dict_vars['r_'+str(id(phi.second_formula))+'_t_'+str(t)] - (1 - pvar2)*M <= rvar <= dict_vars['r_'+str(id(phi.second_formula))+'_t_'+str(t)] + (1 - pvar2)*M #(5)
elif isinstance(phi,STLFormula.Always):
try:
rvar = dict_vars['r_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
rvar = plp.LpVariable('r_'+str(id(phi))+'_t_'+str(t),cat='Continuous')
dict_vars['r_'+str(id(phi))+'_t_'+str(t)] = rvar
for t_i in range(phi.t1,phi.t2+1):
model_phi(phi.formula,t_i,opt_model)
try:
pvar_i = dict_vars['p_'+str(id(phi.formula))+'_t_'+str(t_i)]
except KeyError:
pvar_i = plp.LpVariable('p_'+str(id(phi.formula))+'_t_'+str(t_i),cat='Binary')
dict_vars['p_'+str(id(phi.formula))+'_t_'+str(t_i)] = pvar_i
opt_model += rvar <= dict_vars['r_'+str(id(phi.formula))+'_t_'+str(t_i)] #(4)
opt_model += dict_vars['r_'+str(id(phi.formula))+'_t_'+str(t_i)] - (1 - pvar_i)*M <= rvar <= dict_vars['r_'+str(id(phi.formula))+'_t_'+str(t_i)] + (1 - pvar_i)*M #(5)
opt_model += plp.lpSum([dict_vars['p_'+str(id(phi.formula))+'_t_'+str(t_i)] for t_i in range(phi.t1,phi.t2+1)]) == 1 #(3)
elif isinstance(phi,STLFormula.Eventually):
try:
rvar = dict_vars['r_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
rvar = plp.LpVariable('r_'+str(id(phi))+'_t_'+str(t),cat='Continuous')
dict_vars['r_'+str(id(phi))+'_t_'+str(t)] = rvar
for t_i in range(phi.t1,phi.t2+1):
model_phi(phi.formula,t_i,opt_model)
try:
pvar_i = dict_vars['p_'+str(id(phi.formula))+'_t_'+str(t_i)]
except KeyError:
pvar_i = plp.LpVariable('p_'+str(id(phi.formula))+'_t_'+str(t_i),cat='Binary')
dict_vars['p_'+str(id(phi.formula))+'_t_'+str(t_i)] = pvar_i
opt_model += rvar >= dict_vars['r_'+str(id(phi.formula))+'_t_'+str(t_i)] #(4)
opt_model += dict_vars['r_'+str(id(phi.formula))+'_t_'+str(t_i)] - (1 - pvar_i)*M <= rvar <= dict_vars['r_'+str(id(phi.formula))+'_t_'+str(t_i)] + (1 - pvar_i)*M #(5)
opt_model += plp.lpSum([dict_vars['p_'+str(id(phi.formula))+'_t_'+str(t_i)] for t_i in range(phi.t1,phi.t2+1)]) == 1 #(3)
elif isinstance(phi,STLFormula.Negation):
try:
rvar = dict_vars['r_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
rvar = plp.LpVariable('r_'+str(id(phi))+'_t_'+str(t),cat='Continuous')
dict_vars['r_'+str(id(phi))+'_t_'+str(t)] = rvar
model_phi(phi.formula,t,opt_model)
try:
rvar_i = dict_vars['p_'+str(id(phi.formula))+'_t_'+str(t)]
except KeyError:
rvar_i = plp.LpVariable('p_'+str(id(phi.formula))+'_t_'+str(t),cat='Binary')
dict_vars['p_'+str(id(phi.formula))+'_t_'+str(t)] = rvar_i
opt_model += rvar == -rvar_i
model_phi(phi,phi.horizon,opt_model)
rvar = dict_vars['r_'+str(id(phi))+'_t_'+str(phi.horizon)]
opt_model += rvar >= 0
opt_model.solve(plp.GUROBI_CMD(msg=False))
if s[0][0].varValue == None:
raise Exception("")
return [[s[j][i].varValue for i in range(NB_DIMENSIONS)] for j in range(phi.horizon+1)]
def generate_signal_milp_boolean(phi,start,rand_area,U,epsilon):
"""
Function generating a signal satisfying an STL Formula.
Takes as input:
* phi: an STL Formula
* start: a vector of the form [x0,y0] for the starting point coordinates
* rand_area: the domain on which signals are generated. rand_area = [lb,ub] where lb is the lower bound and ub the upper bound of the domain.
* U: a basic control policy standing for how much can move in 1 time stamp, i.e. \forall t \in [0,T], |s[t]-s[t+1]| < U \pm \epsilon
* epsilon: basic control policy parameter
The encoding details of the MILP optimization problem follows the boolean enconding of Raman et al., "Model predictive control with signaltemporal logic specifications" in 53rd IEEE Conference on Decision and Control. IEEE, 2014, pp. 81–87.
"""
dict_vars = {}
#satisfaction of phi
zvar1 = plp.LpVariable('z1_'+str(id(phi))+'_t_'+str(phi.horizon),cat='Binary')
dict_vars['z1_'+str(id(phi))+'_t_'+str(phi.horizon)] = zvar1
opt_model = plp.LpProblem("MIP Model")
#We want to optimize a signal. The lower and upperbounds are specified by the random area.
s = plp.LpVariable.dicts("s",(range(phi.horizon+1),range(NB_DIMENSIONS)),rand_area[0],rand_area[1],plp.LpContinuous)
#the start is specified
opt_model += s[0][0] == start[0]
opt_model += s[0][1] == start[1]
#control policy
for t in range(0,phi.horizon):
opt_model += s[t+1][0]-s[t][0] <= random.uniform(U-epsilon,U+epsilon)
opt_model += -(s[t+1][0]-s[t][0]) <= random.uniform(U-epsilon,U+epsilon)
opt_model += s[t+1][1]-s[t][1] <= random.uniform(U-epsilon,U+epsilon)
opt_model += -(s[t+1][1]-s[t][1]) <= random.uniform(U-epsilon,U+epsilon)
#recursive function
def model_phi1(phi,t,opt_model):
if isinstance(phi, STLFormula.TrueF):
try:
zvar = dict_vars['z1_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
zvar = plp.LpVariable('z1_'+str(id(phi))+'_t_'+str(t),cat='Binary')
dict_vars['z1_'+str(id(phi))+'_t_'+str(t)] = zvar
opt_model += zvar == 1
elif isinstance(phi, STLFormula.FalseF):
try:
zvar = dict_vars['z1_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
zvar = plp.LpVariable('z1_'+str(id(phi))+'_t_'+str(t),cat='Binary')
dict_vars['z1_'+str(id(phi))+'_t_'+str(t)] = zvar
opt_model += zvar == 0
if isinstance(phi, STLFormula.Predicate):
try:
zvar = dict_vars['z1_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
zvar = plp.LpVariable('z1_'+str(id(phi))+'_t_'+str(t),cat='Binary')
dict_vars['z1_'+str(id(phi))+'_t_'+str(t)] = zvar
if phi.operator == operatorclass.gt or phi.operator == operatorclass.ge:
opt_model += s[t][phi.pi_index_signal] - phi.mu <= M_up*zvar-M_low
opt_model += -(s[t][phi.pi_index_signal] - phi.mu) <= M_up*(1-zvar)-M_low
else:
opt_model += -s[t][phi.pi_index_signal] + phi.mu <= M_up*zvar-M_low
opt_model += -(-s[t][phi.pi_index_signal] + phi.mu) <= M_up*(1-zvar)-M_low
elif isinstance(phi, STLFormula.Negation):
model_phi1(phi.formula,t,opt_model)
try:
zvar1 = dict_vars['z1_'+str(id(phi.formula))+'_t_'+str(t)]
except KeyError:
zvar1 = plp.LpVariable('z1_'+str(id(phi.formula))+'_t_'+str(t),cat='Binary')
dict_vars['z1_'+str(id(phi.formula))+'_t_'+str(t)] = zvar1
try:
zvar = dict_vars['z1_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
zvar = plp.LpVariable('z1_'+str(id(phi))+'_t_'+str(t),cat='Binary')
dict_vars['z1_'+str(id(phi))+'_t_'+str(t)] = zvar
opt_model += zvar == 1-zvar1
elif isinstance(phi, STLFormula.Conjunction):
model_phi1(phi.first_formula,t,opt_model)
model_phi1(phi.second_formula,t,opt_model)
try:
zvar1 = dict_vars['z1_'+str(id(phi.first_formula))+'_t_'+str(t)]
except KeyError:
zvar1 = plp.LpVariable('z1_'+str(id(phi.first_formula))+'_t_'+str(t),cat='Binary')
dict_vars['z1_'+str(id(phi.first_formula))+'_t_'+str(t)] = zvar1
try:
zvar2 = dict_vars['z1_'+str(id(phi.second_formula))+'_t_'+str(t)]
except KeyError:
zvar2 = plp.LpVariable('z1_'+str(id(phi.second_formula))+'_t_'+str(t),cat='Binary')
dict_vars['z1_'+str(id(phi.second_formula))+'_t_'+str(t)] = zvar2
try:
zvar = dict_vars['z1_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
zvar = plp.LpVariable('z1_'+str(id(phi))+'_t_'+str(t),cat='Binary')
dict_vars['z1_'+str(id(phi))+'_t_'+str(t)] = zvar
opt_model += zvar <= zvar1
opt_model += zvar <= zvar2
opt_model += zvar >= 1-2+zvar1+zvar2
elif isinstance(phi, STLFormula.Disjunction):
model_phi1(phi.first_formula,t,opt_model)
model_phi1(phi.second_formula,t,opt_model)
try:
zvar1 = dict_vars['z1_'+str(id(phi.first_formula))+'_t_'+str(t)]
except KeyError:
zvar1 = plp.LpVariable('z1_'+str(id(phi.first_formula))+'_t_'+str(t),cat='Binary')
dict_vars['z1_'+str(id(phi.first_formula))+'_t_'+str(t)] = zvar1
try:
zvar2 = dict_vars['z1_'+str(id(phi.second_formula))+'_t_'+str(t)]
except KeyError:
zvar2 = plp.LpVariable('z1_'+str(id(phi.second_formula))+'_t_'+str(t),cat='Binary')
dict_vars['z1_'+str(id(phi.second_formula))+'_t_'+str(t)] = zvar2
try:
zvar = dict_vars['z1_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
zvar = plp.LpVariable('z1_'+str(id(phi))+'_t_'+str(t),cat='Binary')
dict_vars['z1_'+str(id(phi))+'_t_'+str(t)] = zvar
opt_model += zvar >= zvar1
opt_model += zvar >= zvar2
opt_model += zvar <= zvar1+zvar2
elif isinstance(phi,STLFormula.Always):
try:
zvar = dict_vars['z1_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
zvar = plp.LpVariable('z1_'+str(id(phi))+'_t_'+str(t),cat='Binary')
dict_vars['z1_'+str(id(phi))+'_t_'+str(t)] = zvar
for t_i in range(phi.t1,phi.t2+1):
model_phi1(phi.formula,t_i,opt_model)
try:
zvar_i = dict_vars['z1_'+str(id(phi.formula))+'_t_'+str(t_i)]
except KeyError:
zvar_i = plp.LpVariable('z1_'+str(id(phi.formula))+'_t_'+str(t_i),cat='Binary')
dict_vars['z1_'+str(id(phi.formula))+'_t_'+str(t_i)] = pvar_i
opt_model += zvar <= zvar_i
opt_model += zvar >= 1 - (phi.t2+1-phi.t1) + plp.lpSum([dict_vars['z1_'+str(id(phi.formula))+'_t_'+str(t_i)] for t_i in range(phi.t1,phi.t2+1)])
elif isinstance(phi,STLFormula.Eventually):
try:
zvar = dict_vars['z1_'+str(id(phi))+'_t_'+str(t)]
except KeyError:
zvar = plp.LpVariable('z1_'+str(id(phi))+'_t_'+str(t),cat='Binary')
dict_vars['z1_'+str(id(phi))+'_t_'+str(t)] = zvar
for t_i in range(phi.t1,phi.t2+1):
model_phi1(phi.formula,t_i,opt_model)
try:
zvar_i = dict_vars['z1_'+str(id(phi.formula))+'_t_'+str(t_i)]
except KeyError:
zvar_i = plp.LpVariable('z1_'+str(id(phi.formula))+'_t_'+str(t_i),cat='Binary')
dict_vars['z1_'+str(id(phi.formula))+'_t_'+str(t_i)] = pvar_i
opt_model += zvar >= zvar_i
opt_model += zvar <= plp.lpSum([dict_vars['z1_'+str(id(phi.formula))+'_t_'+str(t_i)] for t_i in range(phi.t1,phi.t2+1)])
model_phi1(phi,phi.horizon,opt_model)
opt_model += zvar1 == 1
opt_model.solve(plp.GUROBI_CMD(msg=False))
if s[0][0].varValue == None:
raise Exception("")
return [[s[j][i].varValue for i in range(NB_DIMENSIONS)] for j in range(phi.horizon+1)]
if __name__ == '__main__':
#CONSTANTS
INDEX_X = 0
INDEX_Y = 1
#Definition of STL Formulae
#Phi1
predicate_x_gt_min2 = STLFormula.Predicate('x',operatorclass.gt,-2.25,INDEX_X)
predicate_x_le_2 = STLFormula.Predicate('x',operatorclass.le,2.25,INDEX_X)
predicate_y_gt_min1 = STLFormula.Predicate('y',operatorclass.gt,-1.25,INDEX_Y)
predicate_y_le_1 = STLFormula.Predicate('y',operatorclass.le,1.25,INDEX_Y)
phi1 = STLFormula.toNegationNormalForm(STLFormula.Always(STLFormula.Negation(STLFormula.Conjunction(STLFormula.Conjunction(predicate_x_gt_min2,predicate_x_le_2),STLFormula.Conjunction(predicate_y_gt_min1,predicate_y_le_1))),0,100),False)
#Phi2
predicate_x_gt_min6 = STLFormula.Predicate('x',operatorclass.gt,-6,INDEX_X)
predicate_x_le_min2 = STLFormula.Predicate('x',operatorclass.le,-2,INDEX_X)
predicate_y_gt_min4 = STLFormula.Predicate('y',operatorclass.gt,-4,INDEX_Y)
predicate_y_le_min3 = STLFormula.Predicate('y',operatorclass.le,-3,INDEX_Y)
phi2 = STLFormula.Always(STLFormula.Conjunction(STLFormula.Conjunction(predicate_x_gt_min6,predicate_x_le_min2),STLFormula.Conjunction(predicate_y_gt_min4,predicate_y_le_min3)),10,15)
#Phi3
predicate_x_gt_6 = STLFormula.Predicate('x',operatorclass.gt,6,INDEX_X)
predicate_x_le_8 = STLFormula.Predicate('x',operatorclass.le,8,INDEX_X)
predicate_y_gt_min6 = STLFormula.Predicate('y',operatorclass.gt,-6,INDEX_Y)
predicate_y_le_min2 = STLFormula.Predicate('y',operatorclass.le,-2,INDEX_Y)
phi3 = STLFormula.Always(STLFormula.Conjunction(STLFormula.Conjunction(predicate_x_gt_6,predicate_x_le_8),STLFormula.Conjunction(predicate_y_gt_min6,predicate_y_le_min2)),25,30)
#Phi4
predicate_x_gt_min6 = STLFormula.Predicate('x',operatorclass.gt,-6,INDEX_X)
predicate_x_le_min4 = STLFormula.Predicate('x',operatorclass.le,-4,INDEX_X)
predicate_y_gt_3 = STLFormula.Predicate('y',operatorclass.gt,3,INDEX_Y)
predicate_y_le_4 = STLFormula.Predicate('y',operatorclass.le,4,INDEX_Y)
phi4 = STLFormula.Eventually(STLFormula.Conjunction(STLFormula.Conjunction(predicate_x_gt_min6,predicate_x_le_min4),STLFormula.Conjunction(predicate_y_gt_3,predicate_y_le_4)),40,80)
#Phi5
predicate_x_gt_1 = STLFormula.Predicate('x',operatorclass.gt,1,INDEX_X)
predicate_x_le_4 = STLFormula.Predicate('x',operatorclass.le,4,INDEX_X)
predicate_y_gt_2 = STLFormula.Predicate('y',operatorclass.gt,2,INDEX_Y)
predicate_y_le_4 = STLFormula.Predicate('y',operatorclass.le,4,INDEX_Y)
phi5 = STLFormula.Eventually(STLFormula.Conjunction(STLFormula.Conjunction(predicate_x_gt_1,predicate_x_le_4),STLFormula.Conjunction(predicate_y_gt_2,predicate_y_le_4)),50,70)
#Phi6
predicate_x_gt_min3 = STLFormula.Predicate('x',operatorclass.gt,-3,INDEX_X)
predicate_x_le_min1 = STLFormula.Predicate('x',operatorclass.le,-1,INDEX_X)
predicate_y_gt_5 = STLFormula.Predicate('y',operatorclass.gt,5,INDEX_Y)
predicate_y_le_8 = STLFormula.Predicate('y',operatorclass.le,8,INDEX_Y)
phi6 = STLFormula.Always(STLFormula.Conjunction(STLFormula.Conjunction(predicate_x_gt_min3,predicate_x_le_min1),STLFormula.Conjunction(predicate_y_gt_5,predicate_y_le_8)),85,90)
#Phi7
predicate_x_gt_1 = STLFormula.Predicate('x',operatorclass.gt,1,INDEX_X)
predicate_x_le_3 = STLFormula.Predicate('x',operatorclass.le,3,INDEX_X)
predicate_y_gt_5 = STLFormula.Predicate('y',operatorclass.gt,5,INDEX_Y)
predicate_y_le_8 = STLFormula.Predicate('y',operatorclass.le,8,INDEX_Y)
phi7 = STLFormula.Always(STLFormula.Conjunction(STLFormula.Conjunction(predicate_x_gt_1,predicate_x_le_3),STLFormula.Conjunction(predicate_y_gt_5,predicate_y_le_8)),95,100)
#The different classes
c1 = STLFormula.Conjunction(STLFormula.Conjunction(phi1,phi2),phi6)
c2 = STLFormula.Conjunction(STLFormula.Conjunction(phi1,phi3),STLFormula.Conjunction(phi4,phi7))
c3 = STLFormula.Conjunction(STLFormula.Conjunction(phi2,phi5),phi7)
c1_c2 = STLFormula.Conjunction(STLFormula.Conjunction(STLFormula.Conjunction(phi1,phi2),phi3),STLFormula.Conjunction(STLFormula.Conjunction(phi4,phi6),phi7))
c1_c3 = STLFormula.Conjunction(STLFormula.Conjunction(STLFormula.Conjunction(phi1,phi2),phi5),STLFormula.Conjunction(phi6,phi7))
c2_c3 = STLFormula.Conjunction(STLFormula.Conjunction(STLFormula.Conjunction(phi1,phi2),STLFormula.Conjunction(phi3,phi4)),STLFormula.Conjunction(phi5,phi7))
c1_c2_c3 = STLFormula.Conjunction(STLFormula.Conjunction(STLFormula.Conjunction(phi1,phi2),STLFormula.Conjunction(phi3,phi4)),STLFormula.Conjunction(STLFormula.Conjunction(phi5,phi6),phi7))
#parameters
start=[0, -7]
rand_area=[-7.1, 8]
U = 0.4
epsilon = 0.1
#generation of 3 trajectories (quantitative no maximization, quantitative with maximization, boolean)
# trajectory1 = generate_signal_milp_quantitative(c1,start,rand_area,U,epsilon,False)
# trajectory2 = generate_signal_milp_quantitative(c1,start,rand_area,U,epsilon,True)
# trajectory1 = generate_signal_milp_boolean(c1,start,rand_area,U,epsilon)
trajectory1 = []
trajectory2 = []
trajectory3 = []
trajectory4 = []
trajectory5 = []
trajectory6 = []
trajectory7 = []
while not trajectory1:
try:
trajectory1 = generate_signal_milp_quantitative(c1,start,rand_area,U,epsilon,True)
print("found t1")
except Exception:
pass
while not trajectory2:
try:
trajectory2 = generate_signal_milp_quantitative(c2,start,rand_area,U,epsilon,True)
print("found t2")
except Exception:
pass
while not trajectory3:
try:
trajectory3 = generate_signal_milp_quantitative(c3,start,rand_area,U,epsilon,True)
print("found t3")
except Exception:
pass
while not trajectory4:
try:
# trajectory4 = generate_signal_milp_quantitative(c1_c2,start,rand_area,0.8,epsilon,True)
trajectory4 = generate_signal_milp_boolean(c1_c2,start,rand_area,0.8,epsilon)
print("found t4")
except Exception:
pass
while not trajectory5:
try:
# trajectory5 = generate_signal_milp_quantitative(c1_c3,start,rand_area,0.7,epsilon,True)
trajectory5 = generate_signal_milp_boolean(c1_c3,start,rand_area,0.7,epsilon)
print("found t5")
except Exception:
pass
while not trajectory6:
try:
trajectory6 = generate_signal_milp_quantitative(c2_c3,start,rand_area,0.8,epsilon,True)
print("found t6")
except Exception:
pass
while not trajectory7:
try:
# trajectory7 = generate_signal_milp_quantitative(c1_c2_c3,start,rand_area,0.8,epsilon,True)
trajectory7 = generate_signal_milp_boolean(c1_c2_c3,start,rand_area,0.8,epsilon)
print("found t7")
except Exception:
pass
# trajectory2 = generate_signal_milp_quantitative(c2,start,rand_area,U,epsilon,True)
# trajectory3 = generate_signal_milp_quantitative(c3,start,rand_area,U,epsilon,True)
# trajectory4 = generate_signal_milp_quantitative(c1_c2,start,rand_area,0.9,epsilon,True)
# trajectory5 = generate_signal_milp_quantitative(c1_c3,start,rand_area,0.8,epsilon,True)
# trajectory6 = generate_signal_milp_quantitative(c2_c3,start,rand_area,0.8,epsilon,True)
# trajectory7 = generate_signal_milp_quantitative(c1_c2_c3,start,rand_area,0.8,epsilon,True)
# trajectory1 = generate_signal_milp_quantitative(c1,start,rand_area,U,epsilon,False)
# trajectory2 = generate_signal_milp_quantitative(c2,start,rand_area,U,epsilon,False)
# trajectory3 = generate_signal_milp_quantitative(c3,start,rand_area,U,epsilon,False)
# trajectory4 = generate_signal_milp_quantitative(c1_c2,start,rand_area,0.9,epsilon,False)
# trajectory5 = generate_signal_milp_quantitative(c1_c3,start,rand_area,0.8,epsilon,False)
# trajectory6 = generate_signal_milp_quantitative(c2_c3,start,rand_area,0.8,epsilon,False)
# trajectory7 = generate_signal_milp_quantitative(c1_c2_c3,start,rand_area,0.8,epsilon,False)
# trajectory1 = generate_signal_milp_boolean(c1,start,rand_area,U,epsilon)
# trajectory2 = generate_signal_milp_boolean(c2,start,rand_area,U,epsilon)
# trajectory3 = generate_signal_milp_boolean(c3,start,rand_area,U,epsilon)
# trajectory4 = generate_signal_milp_boolean(c1_c2,start,rand_area,0.9,epsilon)
# trajectory5 = generate_signal_milp_boolean(c1_c3,start,rand_area,0.8,epsilon)
# trajectory6 = generate_signal_milp_boolean(c2_c3,start,rand_area,0.8,epsilon)
# trajectory7 = generate_signal_milp_boolean(c1_c2_c3,start,rand_area,0.8,epsilon)
#Plot
plt.clf()
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_xticks(list(range(-10,11,2)))
ax.set_yticks(list(range(-10,11,2)))
fig.tight_layout()
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
def label(xy, text):
y = xy[1] - 0.15 # shift y-value for label so that it's below the artist
plt.text(xy[0], y, text, ha="center", family='sans-serif', size=14)
path_phi2 = [
(-6., -4.), # left, bottom
(-6., -3.), # left, top
(-2., -3.), # right, top
(-2., -4.), # right, bottom
(0., 0.), # ignored
]
path4_2 = Path(path_phi2, codes)
patch4_2 = patches.PathPatch(path4_2, facecolor='darkgreen',lw=0)
ax.add_patch(patch4_2)
plt.text(-8, -4.6,'$\phi_2=\Box_{[10,15]}$')
path_phi3 = [
(6., -6.), # left, bottom
(6., -2.), # left, top
(8., -2.), # right, top
(8., -6.), # right, bottom
(0., 0.), # ignored
]
path4_3 = Path(path_phi3, codes)
patch4_3 = patches.PathPatch(path4_3, facecolor='darkgreen',lw=0)
ax.add_patch(patch4_3)
plt.text(6, -6.6,'$\phi_3=\Box_{[25,30]}$')
path_phi4 = [
(-6., 3.), # left, bottom
(-6., 4.), # left, top
(-4., 4.), # right, top
(-4., 3.), # right, bottom
(0., 0.), # ignored
]
path4_4 = Path(path_phi4, codes)
patch4_4 = patches.PathPatch(path4_4, facecolor='palegreen',lw=0)
ax.add_patch(patch4_4)
plt.text(-10, 3.25,'$\phi_4=\diamondsuit_{[40,80]}$')
path_phi5 = [
(1., 2.), # left, bottom
(1., 4.), # left, top
(4., 4.), # right, top
(4., 2.), # right, bottom
(0., 0.), # ignored
]
path4_5 = Path(path_phi5, codes)
patch4_5 = patches.PathPatch(path4_5, facecolor='palegreen',lw=0)
ax.add_patch(patch4_5)
plt.text(4.1, 2.1,'$\phi_5=\diamondsuit_{[50,70]}$')
path_phi6 = [
(-3., 5.), # left, bottom
(-3., 8.), # left, top
(-1., 8.), # right, top
(-1., 5.), # right, bottom
(0., 0.), # ignored
]
path4_6 = Path(path_phi6, codes)
patch4_6 = patches.PathPatch(path4_6, facecolor='darkgreen',lw=0)
ax.add_patch(patch4_6)
plt.text(-6.5, 8.2,'$\phi_6=\Box_{[85,90]}$')
path_phi7 = [
(1., 5.), # left, bottom
(1., 8.), # left, top
(3., 8.), # right, top
(3., 5.), # right, bottom
(0., 0.), # ignored
]
path4_7 = Path(path_phi7, codes)
patch4_7 = patches.PathPatch(path4_7, facecolor='darkgreen',lw=0)
ax.add_patch(patch4_7)
plt.text(3, 8.2,'$\phi_7=\Box_{[95,100]}$')
path_phi1 = [
(-2., -1.), # left, bottom
(-2., 1.), # left, top
(2., 1.), # right, top
(2., -1.), # right, bottom
(0., 0.), # ignored
]
path4_1 = Path(path_phi1, codes)
patch4_1 = patches.PathPatch(path4_1, facecolor='mistyrose',lw=0)
ax.add_patch(patch4_1)
plt.text(-1.95, -0.25,'$\phi_1=\Box_{[0,100]}\\neg$')
ax.plot([0], [-7], '-r', marker='X')
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.axis([-10.2, 10.2, -10.2, 10.2])
plt.grid(True)
overlapping = 0.5
# ax.plot([x for (x, y) in trajectory1], [y for (x, y) in trajectory1], '-g', marker='o', label=r'quantitave $\rho='+str(round(c1.robustness(trajectory1,0),3))+'$')
ax.plot([x for (x, y) in trajectory1], [y for (x, y) in trajectory1], '-g', label=r'$\sigma_1 \in c_1$', alpha=overlapping)
plt.grid(True)
ax.plot([x for (x, y) in trajectory2],[y for (x, y) in trajectory2], '-b', label=r'$\sigma_2 \in c_2$', alpha=overlapping)
plt.grid(True)
ax.plot([x for (x, y) in trajectory3],[y for (x, y) in trajectory3], '-r', label=r'$\sigma_3 \in c_3$', alpha=overlapping)
plt.grid(True)
ax.plot([x for (x, y) in trajectory4],[y for (x, y) in trajectory4], '-c', label=r'$\sigma_4 \in \{c_1,c_2\}$', alpha=overlapping)
plt.grid(True)
ax.plot([x for (x, y) in trajectory5],[y for (x, y) in trajectory5], '-m', label=r'$\sigma_5 \in \{c_1,c_3\}$', alpha=overlapping)
plt.grid(True)
ax.plot([x for (x, y) in trajectory6],[y for (x, y) in trajectory6], '-y', label=r'$\sigma_6 \in \{c_2,c_3\}$', alpha=overlapping)
plt.grid(True)
ax.plot([x for (x, y) in trajectory7],[y for (x, y) in trajectory7], '-k', label=r'$\sigma_7 \in \{c_1,c_2,c_3\}$', alpha=overlapping)
plt.grid(True)
print("len t1",len(trajectory1))
print(trajectory1)
print("len t2",len(trajectory2))
print(trajectory2)
print("len t3",len(trajectory3))
print(trajectory3)
print("len t4",len(trajectory4))
print(trajectory4)
print("len t5",len(trajectory5))
print(trajectory5)
print("len t6",len(trajectory6))
print(trajectory6)
print("len t7",len(trajectory7))
print(trajectory7)
lgd = ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.075), ncol=3, shadow=True)
plt.savefig('synthetic_data.pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
| 48.831187 | 257 | 0.599889 |
aef45d31653564aa7938773007cc5e2370e40c7f | 1,095 | py | Python | simpleMoveExample/moveGlyphWinow.py | typemytype/RoboFontExamples | cc5668de3d27ce996cff5eadb32d0b905c2aff65 | [
"MIT"
] | 12 | 2015-01-03T12:34:18.000Z | 2022-01-24T13:11:35.000Z | simpleMoveExample/moveGlyphWinow.py | typemytype/RoboFontExamples | cc5668de3d27ce996cff5eadb32d0b905c2aff65 | [
"MIT"
] | null | null | null | simpleMoveExample/moveGlyphWinow.py | typemytype/RoboFontExamples | cc5668de3d27ce996cff5eadb32d0b905c2aff65 | [
"MIT"
] | 3 | 2015-01-02T13:38:09.000Z | 2016-05-06T21:36:01.000Z | from vanilla import *
class MoveGlyphWindow:
def __init__(self, glyph):
if glyph is None:
print "There should be a glyph window selected!!"
return
self.glyph = glyph
self.moveX = 0
self.moveY = 0
self.w = Window((200, 60), "Move %s" %self.glyph.name)
self.w.hs = Slider((10, 10, -10, 22), value=0,
maxValue=200,
minValue=-200,
callback=self.adjust)
self.w.vs = Slider((10, 30, -10, 22), value=0,
maxValue=200,
minValue=-200,
callback=self.adjust)
self.w.open()
def adjust(self, sender):
hValue = self.w.hs.get()
vValue = self.w.vs.get()
x = self.moveX - hValue
y = self.moveY - vValue
self.moveX = hValue
self.moveY = vValue
self.glyph.move((x, y))
OpenWindow(MoveGlyphWindow, CurrentGlyph()) | 28.076923 | 62 | 0.447489 |
82a8045fc9226dd4c97954c5243f820b65670d08 | 669 | py | Python | frequently/tests/urls.py | bitlabstudio/django-frequently | 93c76af62325afd1f09487dd1bb527fdd238ec8e | [
"MIT"
] | 5 | 2016-12-08T21:40:54.000Z | 2020-04-08T07:05:22.000Z | frequently/tests/urls.py | bitlabstudio/django-frequently | 93c76af62325afd1f09487dd1bb527fdd238ec8e | [
"MIT"
] | null | null | null | frequently/tests/urls.py | bitlabstudio/django-frequently | 93c76af62325afd1f09487dd1bb527fdd238ec8e | [
"MIT"
] | 1 | 2019-11-29T13:35:05.000Z | 2019-11-29T13:35:05.000Z | """
This ``urls.py`` is only used when running the tests via ``runtests.py``.
As you know, every app must be hooked into yout main ``urls.py`` so that
you can actually reach the app's views (provided it has any views, of course).
"""
from django.urls import include, path
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.views.generic import TemplateView
admin.autodiscover()
urlpatterns = [
path(r'^admin/', admin.site.urls),
path(r'^faq/', include('frequently.urls')),
path(r'^test/$', TemplateView.as_view(template_name=('tag_test.html'))),
]
urlpatterns += staticfiles_urlpatterns()
| 29.086957 | 78 | 0.735426 |
0efb0d3957d60399c035f4577df48d0e8efb7ca4 | 92,357 | py | Python | superset/viz.py | ITV/incubator-superset | 5814bdf699f1ff3da0e6f24ee803a611c5059752 | [
"Apache-2.0"
] | 1 | 2020-11-07T14:08:16.000Z | 2020-11-07T14:08:16.000Z | superset/viz.py | ITV/incubator-superset | 5814bdf699f1ff3da0e6f24ee803a611c5059752 | [
"Apache-2.0"
] | 7 | 2020-03-24T18:00:40.000Z | 2022-03-29T22:28:01.000Z | superset/viz.py | ITV/incubator-superset | 5814bdf699f1ff3da0e6f24ee803a611c5059752 | [
"Apache-2.0"
] | 1 | 2020-02-11T14:42:57.000Z | 2020-02-11T14:42:57.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""This module contains the 'Viz' objects
These objects represent the backend of all the visualizations that
Superset can render.
"""
import copy
import hashlib
import inspect
import logging
import math
import pickle as pkl
import re
import uuid
from collections import defaultdict, OrderedDict
from datetime import datetime, timedelta
from functools import reduce
from itertools import product
from typing import Any, Dict, List, Optional
import geohash
import numpy as np
import pandas as pd
import polyline
import simplejson as json
from dateutil import relativedelta as rdelta
from flask import request
from flask_babel import lazy_gettext as _
from geopy.point import Point
from markdown import markdown
from pandas.tseries.frequencies import to_offset
from superset import app, cache, get_css_manifest_files
from superset.constants import NULL_STRING
from superset.exceptions import NullValueException, SpatialException
from superset.utils import core as utils
from superset.utils.core import (
DTTM_ALIAS,
JS_MAX_INTEGER,
merge_extra_filters,
to_adhoc,
)
config = app.config
stats_logger = config["STATS_LOGGER"]
relative_start = config["DEFAULT_RELATIVE_START_TIME"]
relative_end = config["DEFAULT_RELATIVE_END_TIME"]
METRIC_KEYS = [
"metric",
"metrics",
"percent_metrics",
"metric_2",
"secondary_metric",
"x",
"y",
"size",
]
class BaseViz(object):
"""All visualizations derive this base class"""
viz_type: Optional[str] = None
verbose_name = "Base Viz"
credits = ""
is_timeseries = False
cache_type = "df"
enforce_numerical_metrics = True
def __init__(self, datasource, form_data, force=False):
if not datasource:
raise Exception(_("Viz is missing a datasource"))
self.datasource = datasource
self.request = request
self.viz_type = form_data.get("viz_type")
self.form_data = form_data
self.query = ""
self.token = self.form_data.get("token", "token_" + uuid.uuid4().hex[:8])
self.groupby = self.form_data.get("groupby") or []
self.time_shift = timedelta()
self.status = None
self.error_msg = ""
self.results = None
self.error_message = None
self.force = force
# Keeping track of whether some data came from cache
# this is useful to trigger the <CachedLabel /> when
# in the cases where visualization have many queries
# (FilterBox for instance)
self._some_from_cache = False
self._any_cache_key = None
self._any_cached_dttm = None
self._extra_chart_data = []
self.process_metrics()
def process_metrics(self):
# metrics in TableViz is order sensitive, so metric_dict should be
# OrderedDict
self.metric_dict = OrderedDict()
fd = self.form_data
for mkey in METRIC_KEYS:
val = fd.get(mkey)
if val:
if not isinstance(val, list):
val = [val]
for o in val:
label = utils.get_metric_name(o)
self.metric_dict[label] = o
# Cast to list needed to return serializable object in py3
self.all_metrics = list(self.metric_dict.values())
self.metric_labels = list(self.metric_dict.keys())
@staticmethod
def handle_js_int_overflow(data):
for d in data.get("records", dict()):
for k, v in list(d.items()):
if isinstance(v, int):
# if an int is too big for Java Script to handle
# convert it to a string
if abs(v) > JS_MAX_INTEGER:
d[k] = str(v)
return data
def run_extra_queries(self):
"""Lifecycle method to use when more than one query is needed
In rare-ish cases, a visualization may need to execute multiple
queries. That is the case for FilterBox or for time comparison
in Line chart for instance.
In those cases, we need to make sure these queries run before the
main `get_payload` method gets called, so that the overall caching
metadata can be right. The way it works here is that if any of
the previous `get_df_payload` calls hit the cache, the main
payload's metadata will reflect that.
The multi-query support may need more work to become a first class
use case in the framework, and for the UI to reflect the subtleties
(show that only some of the queries were served from cache for
instance). In the meantime, since multi-query is rare, we treat
it with a bit of a hack. Note that the hack became necessary
when moving from caching the visualization's data itself, to caching
the underlying query(ies).
"""
pass
def get_samples(self):
query_obj = self.query_obj()
query_obj.update(
{
"groupby": [],
"metrics": [],
"row_limit": 1000,
"columns": [o.column_name for o in self.datasource.columns],
}
)
df = self.get_df(query_obj)
return df.to_dict(orient="records")
def get_df(
self, query_obj: Optional[Dict[str, Any]] = None
) -> Optional[pd.DataFrame]:
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
if not query_obj:
return None
self.error_msg = ""
timestamp_format = None
if self.datasource.type == "table":
dttm_col = self.datasource.get_col(query_obj["granularity"])
if dttm_col:
timestamp_format = dttm_col.python_date_format
# The datasource here can be different backend but the interface is common
self.results = self.datasource.query(query_obj)
self.query = self.results.query
self.status = self.results.status
self.error_message = self.results.error_message
df = self.results.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic.
if df is not None and not df.empty:
if DTTM_ALIAS in df.columns:
if timestamp_format in ("epoch_s", "epoch_ms"):
# Column has already been formatted as a timestamp.
dttm_col = df[DTTM_ALIAS]
one_ts_val = dttm_col[0]
# convert time column to pandas Timestamp, but different
# ways to convert depending on string or int types
try:
int(one_ts_val)
is_integral = True
except (ValueError, TypeError):
is_integral = False
if is_integral:
unit = "s" if timestamp_format == "epoch_s" else "ms"
df[DTTM_ALIAS] = pd.to_datetime(
dttm_col, utc=False, unit=unit, origin="unix"
)
else:
df[DTTM_ALIAS] = dttm_col.apply(pd.Timestamp)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format
)
if self.datasource.offset:
df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
df[DTTM_ALIAS] += self.time_shift
if self.enforce_numerical_metrics:
self.df_metrics_to_num(df)
df.replace([np.inf, -np.inf], np.nan, inplace=True)
return df
def df_metrics_to_num(self, df):
"""Converting metrics to numeric when pandas.read_sql cannot"""
metrics = self.metric_labels
for col, dtype in df.dtypes.items():
if dtype.type == np.object_ and col in metrics:
df[col] = pd.to_numeric(df[col], errors="coerce")
def process_query_filters(self):
utils.convert_legacy_filters_into_adhoc(self.form_data)
merge_extra_filters(self.form_data)
utils.split_adhoc_filters_into_base_filters(self.form_data)
def query_obj(self):
"""Building a query object"""
form_data = self.form_data
self.process_query_filters()
gb = form_data.get("groupby") or []
metrics = self.all_metrics or []
columns = form_data.get("columns") or []
groupby = []
for o in gb + columns:
if o not in groupby:
groupby.append(o)
is_timeseries = self.is_timeseries
if DTTM_ALIAS in groupby:
groupby.remove(DTTM_ALIAS)
is_timeseries = True
granularity = form_data.get("granularity") or form_data.get("granularity_sqla")
limit = int(form_data.get("limit") or 0)
timeseries_limit_metric = form_data.get("timeseries_limit_metric")
row_limit = int(form_data.get("row_limit") or config["ROW_LIMIT"])
# default order direction
order_desc = form_data.get("order_desc", True)
since, until = utils.get_since_until(
relative_start=relative_start,
relative_end=relative_end,
time_range=form_data.get("time_range"),
since=form_data.get("since"),
until=form_data.get("until"),
)
time_shift = form_data.get("time_shift", "")
self.time_shift = utils.parse_past_timedelta(time_shift)
from_dttm = None if since is None else (since - self.time_shift)
to_dttm = None if until is None else (until - self.time_shift)
if from_dttm and to_dttm and from_dttm > to_dttm:
raise Exception(_("From date cannot be larger than to date"))
self.from_dttm = from_dttm
self.to_dttm = to_dttm
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
"druid_time_origin": form_data.get("druid_time_origin", ""),
"having": form_data.get("having", ""),
"having_druid": form_data.get("having_filters", []),
"time_grain_sqla": form_data.get("time_grain_sqla", ""),
"time_range_endpoints": form_data.get("time_range_endpoints"),
"where": form_data.get("where", ""),
}
d = {
"granularity": granularity,
"from_dttm": from_dttm,
"to_dttm": to_dttm,
"is_timeseries": is_timeseries,
"groupby": groupby,
"metrics": metrics,
"row_limit": row_limit,
"filter": self.form_data.get("filters", []),
"timeseries_limit": limit,
"extras": extras,
"timeseries_limit_metric": timeseries_limit_metric,
"order_desc": order_desc,
}
return d
@property
def cache_timeout(self):
if self.form_data.get("cache_timeout") is not None:
return int(self.form_data.get("cache_timeout"))
if self.datasource.cache_timeout is not None:
return self.datasource.cache_timeout
if (
hasattr(self.datasource, "database")
and self.datasource.database.cache_timeout
) is not None:
return self.datasource.database.cache_timeout
return config["CACHE_DEFAULT_TIMEOUT"]
def get_json(self):
return json.dumps(
self.get_payload(), default=utils.json_int_dttm_ser, ignore_nan=True
)
def cache_key(self, query_obj, **extra):
"""
The cache key is made out of the key/values in `query_obj`, plus any
other key/values in `extra`.
We remove datetime bounds that are hard values, and replace them with
the use-provided inputs to bounds, which may be time-relative (as in
"5 days ago" or "now").
The `extra` arguments are currently used by time shift queries, since
different time shifts wil differ only in the `from_dttm` and `to_dttm`
values which are stripped.
"""
cache_dict = copy.copy(query_obj)
cache_dict.update(extra)
for k in ["from_dttm", "to_dttm"]:
del cache_dict[k]
cache_dict["time_range"] = self.form_data.get("time_range")
cache_dict["datasource"] = self.datasource.uid
cache_dict["extra_cache_keys"] = self.datasource.get_extra_cache_keys(query_obj)
json_data = self.json_dumps(cache_dict, sort_keys=True)
return hashlib.md5(json_data.encode("utf-8")).hexdigest()
def get_payload(self, query_obj=None):
"""Returns a payload of metadata and data"""
self.run_extra_queries()
payload = self.get_df_payload(query_obj)
df = payload.get("df")
if self.status != utils.QueryStatus.FAILED:
if df is not None and df.empty:
payload["error"] = "No data"
else:
payload["data"] = self.get_data(df)
if "df" in payload:
del payload["df"]
return payload
def get_df_payload(self, query_obj=None, **kwargs):
"""Handles caching around the df payload retrieval"""
if not query_obj:
query_obj = self.query_obj()
cache_key = self.cache_key(query_obj, **kwargs) if query_obj else None
logging.info("Cache key: {}".format(cache_key))
is_loaded = False
stacktrace = None
df = None
cached_dttm = datetime.utcnow().isoformat().split(".")[0]
if cache_key and cache and not self.force:
cache_value = cache.get(cache_key)
if cache_value:
stats_logger.incr("loaded_from_cache")
try:
cache_value = pkl.loads(cache_value)
df = cache_value["df"]
self.query = cache_value["query"]
self._any_cached_dttm = cache_value["dttm"]
self._any_cache_key = cache_key
self.status = utils.QueryStatus.SUCCESS
is_loaded = True
except Exception as e:
logging.exception(e)
logging.error(
"Error reading cache: " + utils.error_msg_from_exception(e)
)
logging.info("Serving from cache")
if query_obj and not is_loaded:
try:
df = self.get_df(query_obj)
if self.status != utils.QueryStatus.FAILED:
stats_logger.incr("loaded_from_source")
is_loaded = True
except Exception as e:
logging.exception(e)
if not self.error_message:
self.error_message = "{}".format(e)
self.status = utils.QueryStatus.FAILED
stacktrace = utils.get_stacktrace()
if (
is_loaded
and cache_key
and cache
and self.status != utils.QueryStatus.FAILED
):
try:
cache_value = dict(
dttm=cached_dttm,
df=df if df is not None else None,
query=self.query,
)
cache_value = pkl.dumps(cache_value, protocol=pkl.HIGHEST_PROTOCOL)
logging.info(
"Caching {} chars at key {}".format(len(cache_value), cache_key)
)
stats_logger.incr("set_cache_key")
cache.set(cache_key, cache_value, timeout=self.cache_timeout)
except Exception as e:
# cache.set call can fail if the backend is down or if
# the key is too large or whatever other reasons
logging.warning("Could not cache key {}".format(cache_key))
logging.exception(e)
cache.delete(cache_key)
return {
"cache_key": self._any_cache_key,
"cached_dttm": self._any_cached_dttm,
"cache_timeout": self.cache_timeout,
"df": df,
"error": self.error_message,
"form_data": self.form_data,
"is_cached": self._any_cache_key is not None,
"query": self.query,
"status": self.status,
"stacktrace": stacktrace,
"rowcount": len(df.index) if df is not None else 0,
}
def json_dumps(self, obj, sort_keys=False):
return json.dumps(
obj, default=utils.json_int_dttm_ser, ignore_nan=True, sort_keys=sort_keys
)
def payload_json_and_has_error(self, payload):
has_error = (
payload.get("status") == utils.QueryStatus.FAILED
or payload.get("error") is not None
)
return self.json_dumps(payload), has_error
@property
def data(self):
"""This is the data object serialized to the js layer"""
content = {
"form_data": self.form_data,
"token": self.token,
"viz_name": self.viz_type,
"filter_select_enabled": self.datasource.filter_select_enabled,
}
return content
def get_csv(self):
df = self.get_df()
include_index = not isinstance(df.index, pd.RangeIndex)
return df.to_csv(index=include_index, **config["CSV_EXPORT"])
def get_data(self, df):
return df.to_dict(orient="records")
@property
def json_data(self):
return json.dumps(self.data)
class TableViz(BaseViz):
"""A basic html table that is sortable and searchable"""
viz_type = "table"
verbose_name = _("Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
enforce_numerical_metrics = False
def should_be_timeseries(self):
fd = self.form_data
# TODO handle datasource-type-specific code in datasource
conditions_met = (fd.get("granularity") and fd.get("granularity") != "all") or (
fd.get("granularity_sqla") and fd.get("time_grain_sqla")
)
if fd.get("include_time") and not conditions_met:
raise Exception(
_("Pick a granularity in the Time section or " "uncheck 'Include Time'")
)
return fd.get("include_time")
def query_obj(self):
d = super().query_obj()
fd = self.form_data
if fd.get("all_columns") and (fd.get("groupby") or fd.get("metrics")):
raise Exception(
_(
"Choose either fields to [Group By] and [Metrics] or "
"[Columns], not both"
)
)
sort_by = fd.get("timeseries_limit_metric")
if fd.get("all_columns"):
d["columns"] = fd.get("all_columns")
d["groupby"] = []
order_by_cols = fd.get("order_by_cols") or []
d["orderby"] = [json.loads(t) for t in order_by_cols]
elif sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in utils.get_metric_names(d["metrics"]):
d["metrics"] += [sort_by]
d["orderby"] = [(sort_by, not fd.get("order_desc", True))]
# Add all percent metrics that are not already in the list
if "percent_metrics" in fd:
d["metrics"] = d["metrics"] + list(
filter(lambda m: m not in d["metrics"], fd["percent_metrics"] or [])
)
d["is_timeseries"] = self.should_be_timeseries()
return d
def get_data(self, df):
fd = self.form_data
if not self.should_be_timeseries() and df is not None and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
# Sum up and compute percentages for all percent metrics
percent_metrics = fd.get("percent_metrics") or []
percent_metrics = [utils.get_metric_name(m) for m in percent_metrics]
if len(percent_metrics):
percent_metrics = list(filter(lambda m: m in df, percent_metrics))
metric_sums = {
m: reduce(lambda a, b: a + b, df[m]) for m in percent_metrics
}
metric_percents = {
m: list(
map(
lambda a: None if metric_sums[m] == 0 else a / metric_sums[m],
df[m],
)
)
for m in percent_metrics
}
for m in percent_metrics:
m_name = "%" + m
df[m_name] = pd.Series(metric_percents[m], name=m_name)
# Remove metrics that are not in the main metrics list
metrics = fd.get("metrics") or []
metrics = [utils.get_metric_name(m) for m in metrics]
for m in filter(
lambda m: m not in metrics and m in df.columns, percent_metrics
):
del df[m]
data = self.handle_js_int_overflow(
dict(records=df.to_dict(orient="records"), columns=list(df.columns))
)
return data
def json_dumps(self, obj, sort_keys=False):
return json.dumps(
obj, default=utils.json_iso_dttm_ser, sort_keys=sort_keys, ignore_nan=True
)
class TimeTableViz(BaseViz):
"""A data table with rich time-series related columns"""
viz_type = "time_table"
verbose_name = _("Time Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self):
d = super().query_obj()
fd = self.form_data
if not fd.get("metrics"):
raise Exception(_("Pick at least one metric"))
if fd.get("groupby") and len(fd.get("metrics")) > 1:
raise Exception(
_("When using 'Group By' you are limited to use a single metric")
)
return d
def get_data(self, df):
fd = self.form_data
columns = None
values = self.metric_labels
if fd.get("groupby"):
values = self.metric_labels[0]
columns = fd.get("groupby")
pt = df.pivot_table(index=DTTM_ALIAS, columns=columns, values=values)
pt.index = pt.index.map(str)
pt = pt.sort_index()
return dict(
records=pt.to_dict(orient="index"),
columns=list(pt.columns),
is_group_by=len(fd.get("groupby")) > 0,
)
class PivotTableViz(BaseViz):
"""A pivot table view, define your rows, columns and metrics"""
viz_type = "pivot_table"
verbose_name = _("Pivot Table")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
d = super().query_obj()
groupby = self.form_data.get("groupby")
columns = self.form_data.get("columns")
metrics = self.form_data.get("metrics")
transpose = self.form_data.get("transpose_pivot")
if not columns:
columns = []
if not groupby:
groupby = []
if not groupby:
raise Exception(_("Please choose at least one 'Group by' field "))
if transpose and not columns:
raise Exception(
_(
(
"Please choose at least one 'Columns' field when "
"select 'Transpose Pivot' option"
)
)
)
if not metrics:
raise Exception(_("Please choose at least one metric"))
if any(v in groupby for v in columns) or any(v in columns for v in groupby):
raise Exception(_("Group By' and 'Columns' can't overlap"))
return d
def get_data(self, df):
if self.form_data.get("granularity") == "all" and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
aggfunc = self.form_data.get("pandas_aggfunc") or "sum"
# Ensure that Pandas's sum function mimics that of SQL.
if aggfunc == "sum":
aggfunc = lambda x: x.sum(min_count=1)
groupby = self.form_data.get("groupby")
columns = self.form_data.get("columns")
if self.form_data.get("transpose_pivot"):
groupby, columns = columns, groupby
df = df.pivot_table(
index=groupby,
columns=columns,
values=[utils.get_metric_name(m) for m in self.form_data.get("metrics")],
aggfunc=aggfunc,
margins=self.form_data.get("pivot_margins"),
)
# Display metrics side by side with each column
if self.form_data.get("combine_metric"):
df = df.stack(0).unstack()
return dict(
columns=list(df.columns),
html=df.to_html(
na_rep="null",
classes=(
"dataframe table table-striped table-bordered "
"table-condensed table-hover"
).split(" "),
),
)
class MarkupViz(BaseViz):
"""Use html or markdown to create a free form widget"""
viz_type = "markup"
verbose_name = _("Markup")
is_timeseries = False
def query_obj(self):
return None
def get_df(
self, query_obj: Optional[Dict[str, Any]] = None
) -> Optional[pd.DataFrame]:
return None
def get_data(self, df):
markup_type = self.form_data.get("markup_type")
code = self.form_data.get("code", "")
if markup_type == "markdown":
code = markdown(code)
return dict(html=code, theme_css=get_css_manifest_files("theme"))
class SeparatorViz(MarkupViz):
"""Use to create section headers in a dashboard, similar to `Markup`"""
viz_type = "separator"
verbose_name = _("Separator")
class WordCloudViz(BaseViz):
"""Build a colorful word cloud
Uses the nice library at:
https://github.com/jasondavies/d3-cloud
"""
viz_type = "word_cloud"
verbose_name = _("Word Cloud")
is_timeseries = False
def query_obj(self):
d = super().query_obj()
d["groupby"] = [self.form_data.get("series")]
return d
class TreemapViz(BaseViz):
"""Tree map visualisation for hierarchical data."""
viz_type = "treemap"
verbose_name = _("Treemap")
credits = '<a href="https://d3js.org">d3.js</a>'
is_timeseries = False
def _nest(self, metric, df):
nlevels = df.index.nlevels
if nlevels == 1:
result = [{"name": n, "value": v} for n, v in zip(df.index, df[metric])]
else:
result = [
{"name": l, "children": self._nest(metric, df.loc[l])}
for l in df.index.levels[0]
]
return result
def get_data(self, df):
df = df.set_index(self.form_data.get("groupby"))
chart_data = [
{"name": metric, "children": self._nest(metric, df)}
for metric in df.columns
]
return chart_data
class CalHeatmapViz(BaseViz):
"""Calendar heatmap."""
viz_type = "cal_heatmap"
verbose_name = _("Calendar Heatmap")
credits = "<a href=https://github.com/wa0x6e/cal-heatmap>cal-heatmap</a>"
is_timeseries = True
def get_data(self, df):
form_data = self.form_data
data = {}
records = df.to_dict("records")
for metric in self.metric_labels:
values = {}
for obj in records:
v = obj[DTTM_ALIAS]
if hasattr(v, "value"):
v = v.value
values[str(v / 10 ** 9)] = obj.get(metric)
data[metric] = values
start, end = utils.get_since_until(
relative_start=relative_start,
relative_end=relative_end,
time_range=form_data.get("time_range"),
since=form_data.get("since"),
until=form_data.get("until"),
)
if not start or not end:
raise Exception("Please provide both time bounds (Since and Until)")
domain = form_data.get("domain_granularity")
diff_delta = rdelta.relativedelta(end, start)
diff_secs = (end - start).total_seconds()
if domain == "year":
range_ = diff_delta.years + 1
elif domain == "month":
range_ = diff_delta.years * 12 + diff_delta.months + 1
elif domain == "week":
range_ = diff_delta.years * 53 + diff_delta.weeks + 1
elif domain == "day":
range_ = diff_secs // (24 * 60 * 60) + 1
else:
range_ = diff_secs // (60 * 60) + 1
return {
"data": data,
"start": start,
"domain": domain,
"subdomain": form_data.get("subdomain_granularity"),
"range": range_,
}
def query_obj(self):
d = super().query_obj()
fd = self.form_data
d["metrics"] = fd.get("metrics")
return d
class NVD3Viz(BaseViz):
"""Base class for all nvd3 vizs"""
credits = '<a href="http://nvd3.org/">NVD3.org</a>'
viz_type: Optional[str] = None
verbose_name = "Base NVD3 Viz"
is_timeseries = False
class BoxPlotViz(NVD3Viz):
"""Box plot viz from ND3"""
viz_type = "box_plot"
verbose_name = _("Box Plot")
sort_series = False
is_timeseries = True
def to_series(self, df, classed="", title_suffix=""):
label_sep = " - "
chart_data = []
for index_value, row in zip(df.index, df.to_dict(orient="records")):
if isinstance(index_value, tuple):
index_value = label_sep.join(index_value)
boxes = defaultdict(dict)
for (label, key), value in row.items():
if key == "nanmedian":
key = "Q2"
boxes[label][key] = value
for label, box in boxes.items():
if len(self.form_data.get("metrics")) > 1:
# need to render data labels with metrics
chart_label = label_sep.join([index_value, label])
else:
chart_label = index_value
chart_data.append({"label": chart_label, "values": box})
return chart_data
def get_data(self, df):
form_data = self.form_data
# conform to NVD3 names
def Q1(series): # need to be named functions - can't use lambdas
return np.nanpercentile(series, 25)
def Q3(series):
return np.nanpercentile(series, 75)
whisker_type = form_data.get("whisker_options")
if whisker_type == "Tukey":
def whisker_high(series):
upper_outer_lim = Q3(series) + 1.5 * (Q3(series) - Q1(series))
return series[series <= upper_outer_lim].max()
def whisker_low(series):
lower_outer_lim = Q1(series) - 1.5 * (Q3(series) - Q1(series))
return series[series >= lower_outer_lim].min()
elif whisker_type == "Min/max (no outliers)":
def whisker_high(series):
return series.max()
def whisker_low(series):
return series.min()
elif " percentiles" in whisker_type:
low, high = whisker_type.replace(" percentiles", "").split("/")
def whisker_high(series):
return np.nanpercentile(series, int(high))
def whisker_low(series):
return np.nanpercentile(series, int(low))
else:
raise ValueError("Unknown whisker type: {}".format(whisker_type))
def outliers(series):
above = series[series > whisker_high(series)]
below = series[series < whisker_low(series)]
# pandas sometimes doesn't like getting lists back here
return set(above.tolist() + below.tolist())
aggregate = [Q1, np.nanmedian, Q3, whisker_high, whisker_low, outliers]
df = df.groupby(form_data.get("groupby")).agg(aggregate)
chart_data = self.to_series(df)
return chart_data
class BubbleViz(NVD3Viz):
"""Based on the NVD3 bubble chart"""
viz_type = "bubble"
verbose_name = _("Bubble Chart")
is_timeseries = False
def query_obj(self):
form_data = self.form_data
d = super().query_obj()
d["groupby"] = [form_data.get("entity")]
if form_data.get("series"):
d["groupby"].append(form_data.get("series"))
self.x_metric = form_data.get("x")
self.y_metric = form_data.get("y")
self.z_metric = form_data.get("size")
self.entity = form_data.get("entity")
self.series = form_data.get("series") or self.entity
d["row_limit"] = form_data.get("limit")
d["metrics"] = [self.z_metric, self.x_metric, self.y_metric]
if len(set(self.metric_labels)) < 3:
raise Exception(_("Please use 3 different metric labels"))
if not all(d["metrics"] + [self.entity]):
raise Exception(_("Pick a metric for x, y and size"))
return d
def get_data(self, df):
df["x"] = df[[utils.get_metric_name(self.x_metric)]]
df["y"] = df[[utils.get_metric_name(self.y_metric)]]
df["size"] = df[[utils.get_metric_name(self.z_metric)]]
df["shape"] = "circle"
df["group"] = df[[self.series]]
series = defaultdict(list)
for row in df.to_dict(orient="records"):
series[row["group"]].append(row)
chart_data = []
for k, v in series.items():
chart_data.append({"key": k, "values": v})
return chart_data
class BulletViz(NVD3Viz):
"""Based on the NVD3 bullet chart"""
viz_type = "bullet"
verbose_name = _("Bullet Chart")
is_timeseries = False
def query_obj(self):
form_data = self.form_data
d = super().query_obj()
self.metric = form_data.get("metric")
def as_strings(field):
value = form_data.get(field)
return value.split(",") if value else []
def as_floats(field):
return [float(x) for x in as_strings(field)]
self.ranges = as_floats("ranges")
self.range_labels = as_strings("range_labels")
self.markers = as_floats("markers")
self.marker_labels = as_strings("marker_labels")
self.marker_lines = as_floats("marker_lines")
self.marker_line_labels = as_strings("marker_line_labels")
d["metrics"] = [self.metric]
if not self.metric:
raise Exception(_("Pick a metric to display"))
return d
def get_data(self, df):
df["metric"] = df[[utils.get_metric_name(self.metric)]]
values = df["metric"].values
return {
"measures": values.tolist(),
"ranges": self.ranges or [0, values.max() * 1.1],
"rangeLabels": self.range_labels or None,
"markers": self.markers or None,
"markerLabels": self.marker_labels or None,
"markerLines": self.marker_lines or None,
"markerLineLabels": self.marker_line_labels or None,
}
class BigNumberViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number"
verbose_name = _("Big Number with Trendline")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self):
d = super().query_obj()
metric = self.form_data.get("metric")
if not metric:
raise Exception(_("Pick a metric!"))
d["metrics"] = [self.form_data.get("metric")]
self.form_data["metric"] = metric
return d
class BigNumberTotalViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number_total"
verbose_name = _("Big Number")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
d = super().query_obj()
metric = self.form_data.get("metric")
if not metric:
raise Exception(_("Pick a metric!"))
d["metrics"] = [self.form_data.get("metric")]
self.form_data["metric"] = metric
# Limiting rows is not required as only one cell is returned
d["row_limit"] = None
return d
class NVD3TimeSeriesViz(NVD3Viz):
"""A rich line chart component with tons of options"""
viz_type = "line"
verbose_name = _("Time Series - Line Chart")
sort_series = False
is_timeseries = True
pivot_fill_value: Optional[int] = None
def to_series(self, df, classed="", title_suffix=""):
cols = []
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
series = df.to_dict("series")
chart_data = []
for name in df.T.index.tolist():
ys = series[name]
if df[name].dtype.kind not in "biufc":
continue
if isinstance(name, list):
series_title = [str(title) for title in name]
elif isinstance(name, tuple):
series_title = tuple(str(title) for title in name)
else:
series_title = str(name)
if (
isinstance(series_title, (list, tuple))
and len(series_title) > 1
and len(self.metric_labels) == 1
):
# Removing metric from series name if only one metric
series_title = series_title[1:]
if title_suffix:
if isinstance(series_title, str):
series_title = (series_title, title_suffix)
elif isinstance(series_title, (list, tuple)):
series_title = series_title + (title_suffix,)
values = []
non_nan_cnt = 0
for ds in df.index:
if ds in ys:
d = {"x": ds, "y": ys[ds]}
if not np.isnan(ys[ds]):
non_nan_cnt += 1
else:
d = {}
values.append(d)
if non_nan_cnt == 0:
continue
d = {"key": series_title, "values": values}
if classed:
d["classed"] = classed
chart_data.append(d)
return chart_data
def process_data(self, df, aggregate=False):
fd = self.form_data
if fd.get("granularity") == "all":
raise Exception(_("Pick a time granularity for your time series"))
if aggregate:
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get("groupby"),
values=self.metric_labels,
fill_value=0,
aggfunc=sum,
)
else:
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get("groupby"),
values=self.metric_labels,
fill_value=self.pivot_fill_value,
)
rule = fd.get("resample_rule")
method = fd.get("resample_method")
if rule and method:
df = getattr(df.resample(rule), method)()
if self.sort_series:
dfs = df.sum()
dfs.sort_values(ascending=False, inplace=True)
df = df[dfs.index]
rolling_type = fd.get("rolling_type")
rolling_periods = int(fd.get("rolling_periods") or 0)
min_periods = int(fd.get("min_periods") or 0)
if rolling_type in ("mean", "std", "sum") and rolling_periods:
kwargs = dict(window=rolling_periods, min_periods=min_periods)
if rolling_type == "mean":
df = df.rolling(**kwargs).mean()
elif rolling_type == "std":
df = df.rolling(**kwargs).std()
elif rolling_type == "sum":
df = df.rolling(**kwargs).sum()
elif rolling_type == "cumsum":
df = df.cumsum()
if min_periods:
df = df[min_periods:]
if fd.get("contribution"):
dft = df.T
df = (dft / dft.sum()).T
return df
def run_extra_queries(self):
fd = self.form_data
time_compare = fd.get("time_compare") or []
# backwards compatibility
if not isinstance(time_compare, list):
time_compare = [time_compare]
for option in time_compare:
query_object = self.query_obj()
delta = utils.parse_past_timedelta(option)
query_object["inner_from_dttm"] = query_object["from_dttm"]
query_object["inner_to_dttm"] = query_object["to_dttm"]
if not query_object["from_dttm"] or not query_object["to_dttm"]:
raise Exception(
_(
"`Since` and `Until` time bounds should be specified "
"when using the `Time Shift` feature."
)
)
query_object["from_dttm"] -= delta
query_object["to_dttm"] -= delta
df2 = self.get_df_payload(query_object, time_compare=option).get("df")
if df2 is not None and DTTM_ALIAS in df2:
label = "{} offset".format(option)
df2[DTTM_ALIAS] += delta
df2 = self.process_data(df2)
self._extra_chart_data.append((label, df2))
def get_data(self, df):
fd = self.form_data
comparison_type = fd.get("comparison_type") or "values"
df = self.process_data(df)
if comparison_type == "values":
# Filter out series with all NaN
chart_data = self.to_series(df.dropna(axis=1, how="all"))
for i, (label, df2) in enumerate(self._extra_chart_data):
chart_data.extend(
self.to_series(
df2, classed="time-shift-{}".format(i), title_suffix=label
)
)
else:
chart_data = []
for i, (label, df2) in enumerate(self._extra_chart_data):
# reindex df2 into the df2 index
combined_index = df.index.union(df2.index)
df2 = (
df2.reindex(combined_index)
.interpolate(method="time")
.reindex(df.index)
)
if comparison_type == "absolute":
diff = df - df2
elif comparison_type == "percentage":
diff = (df - df2) / df2
elif comparison_type == "ratio":
diff = df / df2
else:
raise Exception(
"Invalid `comparison_type`: {0}".format(comparison_type)
)
# remove leading/trailing NaNs from the time shift difference
diff = diff[diff.first_valid_index() : diff.last_valid_index()]
chart_data.extend(
self.to_series(
diff, classed="time-shift-{}".format(i), title_suffix=label
)
)
if not self.sort_series:
chart_data = sorted(chart_data, key=lambda x: tuple(x["key"]))
return chart_data
class MultiLineViz(NVD3Viz):
"""Pile on multiple line charts"""
viz_type = "line_multi"
verbose_name = _("Time Series - Multiple Line Charts")
is_timeseries = True
def query_obj(self):
return None
def get_data(self, df):
fd = self.form_data
# Late imports to avoid circular import issues
from superset.models.slice import Slice
from superset import db
slice_ids1 = fd.get("line_charts")
slices1 = db.session.query(Slice).filter(Slice.id.in_(slice_ids1)).all()
slice_ids2 = fd.get("line_charts_2")
slices2 = db.session.query(Slice).filter(Slice.id.in_(slice_ids2)).all()
return {
"slices": {
"axis1": [slc.data for slc in slices1],
"axis2": [slc.data for slc in slices2],
}
}
class NVD3DualLineViz(NVD3Viz):
"""A rich line chart with dual axis"""
viz_type = "dual_line"
verbose_name = _("Time Series - Dual Axis Line Chart")
sort_series = False
is_timeseries = True
def query_obj(self):
d = super().query_obj()
m1 = self.form_data.get("metric")
m2 = self.form_data.get("metric_2")
d["metrics"] = [m1, m2]
if not m1:
raise Exception(_("Pick a metric for left axis!"))
if not m2:
raise Exception(_("Pick a metric for right axis!"))
if m1 == m2:
raise Exception(
_("Please choose different metrics" " on left and right axis")
)
return d
def to_series(self, df, classed=""):
cols = []
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
series = df.to_dict("series")
chart_data = []
metrics = [self.form_data.get("metric"), self.form_data.get("metric_2")]
for i, m in enumerate(metrics):
m = utils.get_metric_name(m)
ys = series[m]
if df[m].dtype.kind not in "biufc":
continue
series_title = m
d = {
"key": series_title,
"classed": classed,
"values": [
{"x": ds, "y": ys[ds] if ds in ys else None} for ds in df.index
],
"yAxis": i + 1,
"type": "line",
}
chart_data.append(d)
return chart_data
def get_data(self, df):
fd = self.form_data
if self.form_data.get("granularity") == "all":
raise Exception(_("Pick a time granularity for your time series"))
metric = utils.get_metric_name(fd.get("metric"))
metric_2 = utils.get_metric_name(fd.get("metric_2"))
df = df.pivot_table(index=DTTM_ALIAS, values=[metric, metric_2])
chart_data = self.to_series(df)
return chart_data
class NVD3TimeSeriesBarViz(NVD3TimeSeriesViz):
"""A bar chart where the x axis is time"""
viz_type = "bar"
sort_series = True
verbose_name = _("Time Series - Bar Chart")
class NVD3TimePivotViz(NVD3TimeSeriesViz):
"""Time Series - Periodicity Pivot"""
viz_type = "time_pivot"
sort_series = True
verbose_name = _("Time Series - Period Pivot")
def query_obj(self):
d = super().query_obj()
d["metrics"] = [self.form_data.get("metric")]
return d
def get_data(self, df):
fd = self.form_data
df = self.process_data(df)
freq = to_offset(fd.get("freq"))
try:
freq = type(freq)(freq.n, normalize=True, **freq.kwds)
except ValueError:
freq = type(freq)(freq.n, **freq.kwds)
df.index.name = None
df[DTTM_ALIAS] = df.index.map(freq.rollback)
df["ranked"] = df[DTTM_ALIAS].rank(method="dense", ascending=False) - 1
df.ranked = df.ranked.map(int)
df["series"] = "-" + df.ranked.map(str)
df["series"] = df["series"].str.replace("-0", "current")
rank_lookup = {
row["series"]: row["ranked"] for row in df.to_dict(orient="records")
}
max_ts = df[DTTM_ALIAS].max()
max_rank = df["ranked"].max()
df[DTTM_ALIAS] = df.index + (max_ts - df[DTTM_ALIAS])
df = df.pivot_table(
index=DTTM_ALIAS,
columns="series",
values=utils.get_metric_name(fd.get("metric")),
)
chart_data = self.to_series(df)
for serie in chart_data:
serie["rank"] = rank_lookup[serie["key"]]
serie["perc"] = 1 - (serie["rank"] / (max_rank + 1))
return chart_data
class NVD3CompareTimeSeriesViz(NVD3TimeSeriesViz):
"""A line chart component where you can compare the % change over time"""
viz_type = "compare"
verbose_name = _("Time Series - Percent Change")
class NVD3TimeSeriesStackedViz(NVD3TimeSeriesViz):
"""A rich stack area chart"""
viz_type = "area"
verbose_name = _("Time Series - Stacked")
sort_series = True
pivot_fill_value = 0
class DistributionPieViz(NVD3Viz):
"""Annoy visualization snobs with this controversial pie chart"""
viz_type = "pie"
verbose_name = _("Distribution - NVD3 - Pie Chart")
is_timeseries = False
def get_data(self, df):
metric = self.metric_labels[0]
df = df.pivot_table(index=self.groupby, values=[metric])
df.sort_values(by=metric, ascending=False, inplace=True)
df = df.reset_index()
df.columns = ["x", "y"]
return df.to_dict(orient="records")
class HistogramViz(BaseViz):
"""Histogram"""
viz_type = "histogram"
verbose_name = _("Histogram")
is_timeseries = False
def query_obj(self):
"""Returns the query object for this visualization"""
d = super().query_obj()
d["row_limit"] = self.form_data.get("row_limit", int(config["VIZ_ROW_LIMIT"]))
numeric_columns = self.form_data.get("all_columns_x")
if numeric_columns is None:
raise Exception(_("Must have at least one numeric column specified"))
self.columns = numeric_columns
d["columns"] = numeric_columns + self.groupby
# override groupby entry to avoid aggregation
d["groupby"] = []
return d
def labelify(self, keys, column):
if isinstance(keys, str):
keys = (keys,)
# removing undesirable characters
labels = [re.sub(r"\W+", r"_", k) for k in keys]
if len(self.columns) > 1 or not self.groupby:
# Only show numeric column in label if there are many
labels = [column] + labels
return "__".join(labels)
def get_data(self, df):
"""Returns the chart data"""
chart_data = []
if len(self.groupby) > 0:
groups = df.groupby(self.groupby)
else:
groups = [((), df)]
for keys, data in groups:
chart_data.extend(
[
{
"key": self.labelify(keys, column),
"values": data[column].tolist(),
}
for column in self.columns
]
)
return chart_data
class DistributionBarViz(DistributionPieViz):
"""A good old bar chart"""
viz_type = "dist_bar"
verbose_name = _("Distribution - Bar Chart")
is_timeseries = False
def query_obj(self):
d = super().query_obj()
fd = self.form_data
if len(d["groupby"]) < len(fd.get("groupby") or []) + len(
fd.get("columns") or []
):
raise Exception(_("Can't have overlap between Series and Breakdowns"))
if not fd.get("metrics"):
raise Exception(_("Pick at least one metric"))
if not fd.get("groupby"):
raise Exception(_("Pick at least one field for [Series]"))
return d
def get_data(self, df):
fd = self.form_data
metrics = self.metric_labels
columns = fd.get("columns") or []
# pandas will throw away nulls when grouping/pivoting,
# so we substitute NULL_STRING for any nulls in the necessary columns
filled_cols = self.groupby + columns
df[filled_cols] = df[filled_cols].fillna(value=NULL_STRING)
row = df.groupby(self.groupby).sum()[metrics[0]].copy()
row.sort_values(ascending=False, inplace=True)
pt = df.pivot_table(index=self.groupby, columns=columns, values=metrics)
if fd.get("contribution"):
pt = pt.T
pt = (pt / pt.sum()).T
pt = pt.reindex(row.index)
chart_data = []
for name, ys in pt.items():
if pt[name].dtype.kind not in "biufc" or name in self.groupby:
continue
if isinstance(name, str):
series_title = name
else:
offset = 0 if len(metrics) > 1 else 1
series_title = ", ".join([str(s) for s in name[offset:]])
values = []
for i, v in ys.items():
x = i
if isinstance(x, (tuple, list)):
x = ", ".join([str(s) for s in x])
else:
x = str(x)
values.append({"x": x, "y": v})
d = {"key": series_title, "values": values}
chart_data.append(d)
return chart_data
class SunburstViz(BaseViz):
"""A multi level sunburst chart"""
viz_type = "sunburst"
verbose_name = _("Sunburst")
is_timeseries = False
credits = (
"Kerry Rodden "
'@<a href="https://bl.ocks.org/kerryrodden/7090426">bl.ocks.org</a>'
)
def get_data(self, df):
fd = self.form_data
cols = fd.get("groupby")
metric = utils.get_metric_name(fd.get("metric"))
secondary_metric = utils.get_metric_name(fd.get("secondary_metric"))
if metric == secondary_metric or secondary_metric is None:
df.columns = cols + ["m1"]
df["m2"] = df["m1"]
return json.loads(df.to_json(orient="values"))
def query_obj(self):
qry = super().query_obj()
fd = self.form_data
qry["metrics"] = [fd["metric"]]
secondary_metric = fd.get("secondary_metric")
if secondary_metric and secondary_metric != fd["metric"]:
qry["metrics"].append(secondary_metric)
return qry
class SankeyViz(BaseViz):
"""A Sankey diagram that requires a parent-child dataset"""
viz_type = "sankey"
verbose_name = _("Sankey")
is_timeseries = False
credits = '<a href="https://www.npmjs.com/package/d3-sankey">d3-sankey on npm</a>'
def query_obj(self):
qry = super().query_obj()
if len(qry["groupby"]) != 2:
raise Exception(_("Pick exactly 2 columns as [Source / Target]"))
qry["metrics"] = [self.form_data["metric"]]
return qry
def get_data(self, df):
df.columns = ["source", "target", "value"]
df["source"] = df["source"].astype(str)
df["target"] = df["target"].astype(str)
recs = df.to_dict(orient="records")
hierarchy = defaultdict(set)
for row in recs:
hierarchy[row["source"]].add(row["target"])
def find_cycle(g):
"""Whether there's a cycle in a directed graph"""
path = set()
def visit(vertex):
path.add(vertex)
for neighbour in g.get(vertex, ()):
if neighbour in path or visit(neighbour):
return (vertex, neighbour)
path.remove(vertex)
for v in g:
cycle = visit(v)
if cycle:
return cycle
cycle = find_cycle(hierarchy)
if cycle:
raise Exception(
_(
"There's a loop in your Sankey, please provide a tree. "
"Here's a faulty link: {}"
).format(cycle)
)
return recs
class DirectedForceViz(BaseViz):
"""An animated directed force layout graph visualization"""
viz_type = "directed_force"
verbose_name = _("Directed Force Layout")
credits = 'd3noob @<a href="http://bl.ocks.org/d3noob/5141278">bl.ocks.org</a>'
is_timeseries = False
def query_obj(self):
qry = super().query_obj()
if len(self.form_data["groupby"]) != 2:
raise Exception(_("Pick exactly 2 columns to 'Group By'"))
qry["metrics"] = [self.form_data["metric"]]
return qry
def get_data(self, df):
df.columns = ["source", "target", "value"]
return df.to_dict(orient="records")
class ChordViz(BaseViz):
"""A Chord diagram"""
viz_type = "chord"
verbose_name = _("Directed Force Layout")
credits = '<a href="https://github.com/d3/d3-chord">Bostock</a>'
is_timeseries = False
def query_obj(self):
qry = super().query_obj()
fd = self.form_data
qry["groupby"] = [fd.get("groupby"), fd.get("columns")]
qry["metrics"] = [utils.get_metric_name(fd.get("metric"))]
return qry
def get_data(self, df):
df.columns = ["source", "target", "value"]
# Preparing a symetrical matrix like d3.chords calls for
nodes = list(set(df["source"]) | set(df["target"]))
matrix = {}
for source, target in product(nodes, nodes):
matrix[(source, target)] = 0
for source, target, value in df.to_records(index=False):
matrix[(source, target)] = value
m = [[matrix[(n1, n2)] for n1 in nodes] for n2 in nodes]
return {"nodes": list(nodes), "matrix": m}
class CountryMapViz(BaseViz):
"""A country centric"""
viz_type = "country_map"
verbose_name = _("Country Map")
is_timeseries = False
credits = "From bl.ocks.org By john-guerra"
def query_obj(self):
qry = super().query_obj()
qry["metrics"] = [self.form_data["metric"]]
qry["groupby"] = [self.form_data["entity"]]
return qry
def get_data(self, df):
fd = self.form_data
cols = [fd.get("entity")]
metric = self.metric_labels[0]
cols += [metric]
ndf = df[cols]
df = ndf
df.columns = ["country_id", "metric"]
d = df.to_dict(orient="records")
return d
class WorldMapViz(BaseViz):
"""A country centric world map"""
viz_type = "world_map"
verbose_name = _("World Map")
is_timeseries = False
credits = 'datamaps on <a href="https://www.npmjs.com/package/datamaps">npm</a>'
def query_obj(self):
qry = super().query_obj()
qry["groupby"] = [self.form_data["entity"]]
return qry
def get_data(self, df):
from superset.examples import countries
fd = self.form_data
cols = [fd.get("entity")]
metric = utils.get_metric_name(fd.get("metric"))
secondary_metric = utils.get_metric_name(fd.get("secondary_metric"))
columns = ["country", "m1", "m2"]
if metric == secondary_metric:
ndf = df[cols]
ndf["m1"] = df[metric]
ndf["m2"] = ndf["m1"]
else:
if secondary_metric:
cols += [metric, secondary_metric]
else:
cols += [metric]
columns = ["country", "m1"]
ndf = df[cols]
df = ndf
df.columns = columns
d = df.to_dict(orient="records")
for row in d:
country = None
if isinstance(row["country"], str):
country = countries.get(fd.get("country_fieldtype"), row["country"])
if country:
row["country"] = country["cca3"]
row["latitude"] = country["lat"]
row["longitude"] = country["lng"]
row["name"] = country["name"]
else:
row["country"] = "XXX"
return d
class FilterBoxViz(BaseViz):
"""A multi filter, multi-choice filter box to make dashboards interactive"""
viz_type = "filter_box"
verbose_name = _("Filters")
is_timeseries = False
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
cache_type = "get_data"
filter_row_limit = 1000
def query_obj(self):
return None
def run_extra_queries(self):
qry = super().query_obj()
filters = self.form_data.get("filter_configs") or []
qry["row_limit"] = self.filter_row_limit
self.dataframes = {}
for flt in filters:
col = flt.get("column")
if not col:
raise Exception(
_("Invalid filter configuration, please select a column")
)
qry["groupby"] = [col]
metric = flt.get("metric")
qry["metrics"] = [metric] if metric else []
df = self.get_df_payload(query_obj=qry).get("df")
self.dataframes[col] = df
def get_data(self, df):
filters = self.form_data.get("filter_configs") or []
d = {}
for flt in filters:
col = flt.get("column")
metric = flt.get("metric")
df = self.dataframes.get(col)
if df is not None:
if metric:
df = df.sort_values(
utils.get_metric_name(metric), ascending=flt.get("asc")
)
d[col] = [
{"id": row[0], "text": row[0], "metric": row[1]}
for row in df.itertuples(index=False)
]
else:
df = df.sort_values(col, ascending=flt.get("asc"))
d[col] = [
{"id": row[0], "text": row[0]}
for row in df.itertuples(index=False)
]
return d
class IFrameViz(BaseViz):
"""You can squeeze just about anything in this iFrame component"""
viz_type = "iframe"
verbose_name = _("iFrame")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self):
return None
def get_df(self, query_obj: Dict[str, Any] = None) -> Optional[pd.DataFrame]:
return None
def get_data(self, df):
return {}
class ParallelCoordinatesViz(BaseViz):
"""Interactive parallel coordinate implementation
Uses this amazing javascript library
https://github.com/syntagmatic/parallel-coordinates
"""
viz_type = "para"
verbose_name = _("Parallel Coordinates")
credits = (
'<a href="https://syntagmatic.github.io/parallel-coordinates/">'
"Syntagmatic's library</a>"
)
is_timeseries = False
def query_obj(self):
d = super().query_obj()
fd = self.form_data
d["groupby"] = [fd.get("series")]
return d
def get_data(self, df):
return df.to_dict(orient="records")
class HeatmapViz(BaseViz):
"""A nice heatmap visualization that support high density through canvas"""
viz_type = "heatmap"
verbose_name = _("Heatmap")
is_timeseries = False
credits = (
'inspired from mbostock @<a href="http://bl.ocks.org/mbostock/3074470">'
"bl.ocks.org</a>"
)
def query_obj(self):
d = super().query_obj()
fd = self.form_data
d["metrics"] = [fd.get("metric")]
d["groupby"] = [fd.get("all_columns_x"), fd.get("all_columns_y")]
return d
def get_data(self, df):
fd = self.form_data
x = fd.get("all_columns_x")
y = fd.get("all_columns_y")
v = self.metric_labels[0]
if x == y:
df.columns = ["x", "y", "v"]
else:
df = df[[x, y, v]]
df.columns = ["x", "y", "v"]
norm = fd.get("normalize_across")
overall = False
max_ = df.v.max()
min_ = df.v.min()
if norm == "heatmap":
overall = True
else:
gb = df.groupby(norm, group_keys=False)
if len(gb) <= 1:
overall = True
else:
df["perc"] = gb.apply(
lambda x: (x.v - x.v.min()) / (x.v.max() - x.v.min())
)
df["rank"] = gb.apply(lambda x: x.v.rank(pct=True))
if overall:
df["perc"] = (df.v - min_) / (max_ - min_)
df["rank"] = df.v.rank(pct=True)
return {"records": df.to_dict(orient="records"), "extents": [min_, max_]}
class HorizonViz(NVD3TimeSeriesViz):
"""Horizon chart
https://www.npmjs.com/package/d3-horizon-chart
"""
viz_type = "horizon"
verbose_name = _("Horizon Charts")
credits = (
'<a href="https://www.npmjs.com/package/d3-horizon-chart">'
"d3-horizon-chart</a>"
)
class MapboxViz(BaseViz):
"""Rich maps made with Mapbox"""
viz_type = "mapbox"
verbose_name = _("Mapbox")
is_timeseries = False
credits = "<a href=https://www.mapbox.com/mapbox-gl-js/api/>Mapbox GL JS</a>"
def query_obj(self):
d = super().query_obj()
fd = self.form_data
label_col = fd.get("mapbox_label")
if not fd.get("groupby"):
if fd.get("all_columns_x") is None or fd.get("all_columns_y") is None:
raise Exception(_("[Longitude] and [Latitude] must be set"))
d["columns"] = [fd.get("all_columns_x"), fd.get("all_columns_y")]
if label_col and len(label_col) >= 1:
if label_col[0] == "count":
raise Exception(
_(
"Must have a [Group By] column to have 'count' as the "
+ "[Label]"
)
)
d["columns"].append(label_col[0])
if fd.get("point_radius") != "Auto":
d["columns"].append(fd.get("point_radius"))
d["columns"] = list(set(d["columns"]))
else:
# Ensuring columns chosen are all in group by
if (
label_col
and len(label_col) >= 1
and label_col[0] != "count"
and label_col[0] not in fd.get("groupby")
):
raise Exception(_("Choice of [Label] must be present in [Group By]"))
if fd.get("point_radius") != "Auto" and fd.get(
"point_radius"
) not in fd.get("groupby"):
raise Exception(
_("Choice of [Point Radius] must be present in [Group By]")
)
if fd.get("all_columns_x") not in fd.get("groupby") or fd.get(
"all_columns_y"
) not in fd.get("groupby"):
raise Exception(
_(
"[Longitude] and [Latitude] columns must be present in "
+ "[Group By]"
)
)
return d
def get_data(self, df):
if df is None:
return None
fd = self.form_data
label_col = fd.get("mapbox_label")
has_custom_metric = label_col is not None and len(label_col) > 0
metric_col = [None] * len(df.index)
if has_custom_metric:
if label_col[0] == fd.get("all_columns_x"):
metric_col = df[fd.get("all_columns_x")]
elif label_col[0] == fd.get("all_columns_y"):
metric_col = df[fd.get("all_columns_y")]
else:
metric_col = df[label_col[0]]
point_radius_col = (
[None] * len(df.index)
if fd.get("point_radius") == "Auto"
else df[fd.get("point_radius")]
)
# limiting geo precision as long decimal values trigger issues
# around json-bignumber in Mapbox
GEO_PRECISION = 10
# using geoJSON formatting
geo_json = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {"metric": metric, "radius": point_radius},
"geometry": {
"type": "Point",
"coordinates": [
round(lon, GEO_PRECISION),
round(lat, GEO_PRECISION),
],
},
}
for lon, lat, metric, point_radius in zip(
df[fd.get("all_columns_x")],
df[fd.get("all_columns_y")],
metric_col,
point_radius_col,
)
],
}
x_series, y_series = df[fd.get("all_columns_x")], df[fd.get("all_columns_y")]
south_west = [x_series.min(), y_series.min()]
north_east = [x_series.max(), y_series.max()]
return {
"geoJSON": geo_json,
"hasCustomMetric": has_custom_metric,
"mapboxApiKey": config["MAPBOX_API_KEY"],
"mapStyle": fd.get("mapbox_style"),
"aggregatorName": fd.get("pandas_aggfunc"),
"clusteringRadius": fd.get("clustering_radius"),
"pointRadiusUnit": fd.get("point_radius_unit"),
"globalOpacity": fd.get("global_opacity"),
"bounds": [south_west, north_east],
"renderWhileDragging": fd.get("render_while_dragging"),
"tooltip": fd.get("rich_tooltip"),
"color": fd.get("mapbox_color"),
}
class DeckGLMultiLayer(BaseViz):
"""Pile on multiple DeckGL layers"""
viz_type = "deck_multi"
verbose_name = _("Deck.gl - Multiple Layers")
is_timeseries = False
credits = '<a href="https://uber.github.io/deck.gl/">deck.gl</a>'
def query_obj(self):
return None
def get_data(self, df):
fd = self.form_data
# Late imports to avoid circular import issues
from superset.models.slice import Slice
from superset import db
slice_ids = fd.get("deck_slices")
slices = db.session.query(Slice).filter(Slice.id.in_(slice_ids)).all()
return {
"mapboxApiKey": config["MAPBOX_API_KEY"],
"slices": [slc.data for slc in slices],
}
class BaseDeckGLViz(BaseViz):
"""Base class for deck.gl visualizations"""
is_timeseries = False
credits = '<a href="https://uber.github.io/deck.gl/">deck.gl</a>'
spatial_control_keys: List[str] = []
def get_metrics(self):
self.metric = self.form_data.get("size")
return [self.metric] if self.metric else []
def process_spatial_query_obj(self, key, group_by):
group_by.extend(self.get_spatial_columns(key))
def get_spatial_columns(self, key):
spatial = self.form_data.get(key)
if spatial is None:
raise ValueError(_("Bad spatial key"))
if spatial.get("type") == "latlong":
return [spatial.get("lonCol"), spatial.get("latCol")]
elif spatial.get("type") == "delimited":
return [spatial.get("lonlatCol")]
elif spatial.get("type") == "geohash":
return [spatial.get("geohashCol")]
@staticmethod
def parse_coordinates(s):
if not s:
return None
try:
p = Point(s)
return (p.latitude, p.longitude) # pylint: disable=no-member
except Exception:
raise SpatialException(_("Invalid spatial point encountered: %s" % s))
@staticmethod
def reverse_geohash_decode(geohash_code):
lat, lng = geohash.decode(geohash_code)
return (lng, lat)
@staticmethod
def reverse_latlong(df, key):
df[key] = [tuple(reversed(o)) for o in df[key] if isinstance(o, (list, tuple))]
def process_spatial_data_obj(self, key, df):
spatial = self.form_data.get(key)
if spatial is None:
raise ValueError(_("Bad spatial key"))
if spatial.get("type") == "latlong":
df[key] = list(
zip(
pd.to_numeric(df[spatial.get("lonCol")], errors="coerce"),
pd.to_numeric(df[spatial.get("latCol")], errors="coerce"),
)
)
elif spatial.get("type") == "delimited":
lon_lat_col = spatial.get("lonlatCol")
df[key] = df[lon_lat_col].apply(self.parse_coordinates)
del df[lon_lat_col]
elif spatial.get("type") == "geohash":
df[key] = df[spatial.get("geohashCol")].map(self.reverse_geohash_decode)
del df[spatial.get("geohashCol")]
if spatial.get("reverseCheckbox"):
self.reverse_latlong(df, key)
if df.get(key) is None:
raise NullValueException(
_(
"Encountered invalid NULL spatial entry, \
please consider filtering those out"
)
)
return df
def add_null_filters(self):
fd = self.form_data
spatial_columns = set()
for key in self.spatial_control_keys:
for column in self.get_spatial_columns(key):
spatial_columns.add(column)
if fd.get("adhoc_filters") is None:
fd["adhoc_filters"] = []
line_column = fd.get("line_column")
if line_column:
spatial_columns.add(line_column)
for column in sorted(spatial_columns):
filter_ = to_adhoc({"col": column, "op": "IS NOT NULL", "val": ""})
fd["adhoc_filters"].append(filter_)
def query_obj(self):
fd = self.form_data
# add NULL filters
if fd.get("filter_nulls", True):
self.add_null_filters()
d = super().query_obj()
gb = []
for key in self.spatial_control_keys:
self.process_spatial_query_obj(key, gb)
if fd.get("dimension"):
gb += [fd.get("dimension")]
if fd.get("js_columns"):
gb += fd.get("js_columns")
metrics = self.get_metrics()
gb = list(set(gb))
if metrics:
d["groupby"] = gb
d["metrics"] = metrics
d["columns"] = []
else:
d["columns"] = gb
return d
def get_js_columns(self, d):
cols = self.form_data.get("js_columns") or []
return {col: d.get(col) for col in cols}
def get_data(self, df):
if df is None:
return None
# Processing spatial info
for key in self.spatial_control_keys:
df = self.process_spatial_data_obj(key, df)
features = []
for d in df.to_dict(orient="records"):
feature = self.get_properties(d)
extra_props = self.get_js_columns(d)
if extra_props:
feature["extraProps"] = extra_props
features.append(feature)
return {
"features": features,
"mapboxApiKey": config["MAPBOX_API_KEY"],
"metricLabels": self.metric_labels,
}
def get_properties(self, d):
raise NotImplementedError()
class DeckScatterViz(BaseDeckGLViz):
"""deck.gl's ScatterLayer"""
viz_type = "deck_scatter"
verbose_name = _("Deck.gl - Scatter plot")
spatial_control_keys = ["spatial"]
is_timeseries = True
def query_obj(self):
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
self.point_radius_fixed = fd.get("point_radius_fixed") or {
"type": "fix",
"value": 500,
}
return super().query_obj()
def get_metrics(self):
self.metric = None
if self.point_radius_fixed.get("type") == "metric":
self.metric = self.point_radius_fixed.get("value")
return [self.metric]
return None
def get_properties(self, d):
return {
"metric": d.get(self.metric_label),
"radius": self.fixed_value
if self.fixed_value
else d.get(self.metric_label),
"cat_color": d.get(self.dim) if self.dim else None,
"position": d.get("spatial"),
DTTM_ALIAS: d.get(DTTM_ALIAS),
}
def get_data(self, df):
fd = self.form_data
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
self.point_radius_fixed = fd.get("point_radius_fixed")
self.fixed_value = None
self.dim = self.form_data.get("dimension")
if self.point_radius_fixed.get("type") != "metric":
self.fixed_value = self.point_radius_fixed.get("value")
return super().get_data(df)
class DeckScreengrid(BaseDeckGLViz):
"""deck.gl's ScreenGridLayer"""
viz_type = "deck_screengrid"
verbose_name = _("Deck.gl - Screen Grid")
spatial_control_keys = ["spatial"]
is_timeseries = True
def query_obj(self):
fd = self.form_data
self.is_timeseries = fd.get("time_grain_sqla") or fd.get("granularity")
return super().query_obj()
def get_properties(self, d):
return {
"position": d.get("spatial"),
"weight": d.get(self.metric_label) or 1,
"__timestamp": d.get(DTTM_ALIAS) or d.get("__time"),
}
def get_data(self, df):
self.metric_label = utils.get_metric_name(self.metric)
return super().get_data(df)
class DeckGrid(BaseDeckGLViz):
"""deck.gl's DeckLayer"""
viz_type = "deck_grid"
verbose_name = _("Deck.gl - 3D Grid")
spatial_control_keys = ["spatial"]
def get_properties(self, d):
return {"position": d.get("spatial"), "weight": d.get(self.metric_label) or 1}
def get_data(self, df):
self.metric_label = utils.get_metric_name(self.metric)
return super().get_data(df)
def geohash_to_json(geohash_code):
p = geohash.bbox(geohash_code)
return [
[p.get("w"), p.get("n")],
[p.get("e"), p.get("n")],
[p.get("e"), p.get("s")],
[p.get("w"), p.get("s")],
[p.get("w"), p.get("n")],
]
class DeckPathViz(BaseDeckGLViz):
"""deck.gl's PathLayer"""
viz_type = "deck_path"
verbose_name = _("Deck.gl - Paths")
deck_viz_key = "path"
is_timeseries = True
deser_map = {
"json": json.loads,
"polyline": polyline.decode,
"geohash": geohash_to_json,
}
def query_obj(self):
fd = self.form_data
self.is_timeseries = fd.get("time_grain_sqla") or fd.get("granularity")
d = super().query_obj()
self.metric = fd.get("metric")
line_col = fd.get("line_column")
if d["metrics"]:
self.has_metrics = True
d["groupby"].append(line_col)
else:
self.has_metrics = False
d["columns"].append(line_col)
return d
def get_properties(self, d):
fd = self.form_data
line_type = fd.get("line_type")
deser = self.deser_map[line_type]
line_column = fd.get("line_column")
path = deser(d[line_column])
if fd.get("reverse_long_lat"):
path = [(o[1], o[0]) for o in path]
d[self.deck_viz_key] = path
if line_type != "geohash":
del d[line_column]
d["__timestamp"] = d.get(DTTM_ALIAS) or d.get("__time")
return d
def get_data(self, df):
self.metric_label = utils.get_metric_name(self.metric)
return super().get_data(df)
class DeckPolygon(DeckPathViz):
"""deck.gl's Polygon Layer"""
viz_type = "deck_polygon"
deck_viz_key = "polygon"
verbose_name = _("Deck.gl - Polygon")
def query_obj(self):
fd = self.form_data
self.elevation = fd.get("point_radius_fixed") or {"type": "fix", "value": 500}
return super().query_obj()
def get_metrics(self):
metrics = [self.form_data.get("metric")]
if self.elevation.get("type") == "metric":
metrics.append(self.elevation.get("value"))
return [metric for metric in metrics if metric]
def get_properties(self, d):
super().get_properties(d)
fd = self.form_data
elevation = fd["point_radius_fixed"]["value"]
type_ = fd["point_radius_fixed"]["type"]
d["elevation"] = (
d.get(utils.get_metric_name(elevation)) if type_ == "metric" else elevation
)
return d
class DeckHex(BaseDeckGLViz):
"""deck.gl's DeckLayer"""
viz_type = "deck_hex"
verbose_name = _("Deck.gl - 3D HEX")
spatial_control_keys = ["spatial"]
def get_properties(self, d):
return {"position": d.get("spatial"), "weight": d.get(self.metric_label) or 1}
def get_data(self, df):
self.metric_label = utils.get_metric_name(self.metric)
return super(DeckHex, self).get_data(df)
class DeckGeoJson(BaseDeckGLViz):
"""deck.gl's GeoJSONLayer"""
viz_type = "deck_geojson"
verbose_name = _("Deck.gl - GeoJSON")
def query_obj(self):
d = super().query_obj()
d["columns"] += [self.form_data.get("geojson")]
d["metrics"] = []
d["groupby"] = []
return d
def get_properties(self, d):
geojson = d.get(self.form_data.get("geojson"))
return json.loads(geojson)
class DeckArc(BaseDeckGLViz):
"""deck.gl's Arc Layer"""
viz_type = "deck_arc"
verbose_name = _("Deck.gl - Arc")
spatial_control_keys = ["start_spatial", "end_spatial"]
is_timeseries = True
def query_obj(self):
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
return super().query_obj()
def get_properties(self, d):
dim = self.form_data.get("dimension")
return {
"sourcePosition": d.get("start_spatial"),
"targetPosition": d.get("end_spatial"),
"cat_color": d.get(dim) if dim else None,
DTTM_ALIAS: d.get(DTTM_ALIAS),
}
def get_data(self, df):
d = super().get_data(df)
return {"features": d["features"], "mapboxApiKey": config["MAPBOX_API_KEY"]}
class EventFlowViz(BaseViz):
"""A visualization to explore patterns in event sequences"""
viz_type = "event_flow"
verbose_name = _("Event flow")
credits = 'from <a href="https://github.com/williaster/data-ui">@data-ui</a>'
is_timeseries = True
def query_obj(self):
query = super().query_obj()
form_data = self.form_data
event_key = form_data.get("all_columns_x")
entity_key = form_data.get("entity")
meta_keys = [
col
for col in form_data.get("all_columns")
if col != event_key and col != entity_key
]
query["columns"] = [event_key, entity_key] + meta_keys
if form_data["order_by_entity"]:
query["orderby"] = [(entity_key, True)]
return query
def get_data(self, df):
return df.to_dict(orient="records")
class PairedTTestViz(BaseViz):
"""A table displaying paired t-test values"""
viz_type = "paired_ttest"
verbose_name = _("Time Series - Paired t-test")
sort_series = False
is_timeseries = True
def get_data(self, df):
"""
Transform received data frame into an object of the form:
{
'metric1': [
{
groups: ('groupA', ... ),
values: [ {x, y}, ... ],
}, ...
], ...
}
"""
fd = self.form_data
groups = fd.get("groupby")
metrics = self.metric_labels
df = df.pivot_table(index=DTTM_ALIAS, columns=groups, values=metrics)
cols = []
# Be rid of falsey keys
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
data = {}
series = df.to_dict("series")
for nameSet in df.columns:
# If no groups are defined, nameSet will be the metric name
hasGroup = not isinstance(nameSet, str)
Y = series[nameSet]
d = {
"group": nameSet[1:] if hasGroup else "All",
"values": [{"x": t, "y": Y[t] if t in Y else None} for t in df.index],
}
key = nameSet[0] if hasGroup else nameSet
if key in data:
data[key].append(d)
else:
data[key] = [d]
return data
class RoseViz(NVD3TimeSeriesViz):
viz_type = "rose"
verbose_name = _("Time Series - Nightingale Rose Chart")
sort_series = False
is_timeseries = True
def get_data(self, df):
data = super().get_data(df)
result = {}
for datum in data:
key = datum["key"]
for val in datum["values"]:
timestamp = val["x"].value
if not result.get(timestamp):
result[timestamp] = []
value = 0 if math.isnan(val["y"]) else val["y"]
result[timestamp].append(
{
"key": key,
"value": value,
"name": ", ".join(key) if isinstance(key, list) else key,
"time": val["x"],
}
)
return result
class PartitionViz(NVD3TimeSeriesViz):
"""
A hierarchical data visualization with support for time series.
"""
viz_type = "partition"
verbose_name = _("Partition Diagram")
def query_obj(self):
query_obj = super().query_obj()
time_op = self.form_data.get("time_series_option", "not_time")
# Return time series data if the user specifies so
query_obj["is_timeseries"] = time_op != "not_time"
return query_obj
def levels_for(self, time_op, groups, df):
"""
Compute the partition at each `level` from the dataframe.
"""
levels = {}
for i in range(0, len(groups) + 1):
agg_df = df.groupby(groups[:i]) if i else df
levels[i] = (
agg_df.mean()
if time_op == "agg_mean"
else agg_df.sum(numeric_only=True)
)
return levels
def levels_for_diff(self, time_op, groups, df):
# Obtain a unique list of the time grains
times = list(set(df[DTTM_ALIAS]))
times.sort()
until = times[len(times) - 1]
since = times[0]
# Function describing how to calculate the difference
func = {
"point_diff": [pd.Series.sub, lambda a, b, fill_value: a - b],
"point_factor": [pd.Series.div, lambda a, b, fill_value: a / float(b)],
"point_percent": [
lambda a, b, fill_value=0: a.div(b, fill_value=fill_value) - 1,
lambda a, b, fill_value: a / float(b) - 1,
],
}[time_op]
agg_df = df.groupby(DTTM_ALIAS).sum()
levels = {
0: pd.Series(
{
m: func[1](agg_df[m][until], agg_df[m][since], 0)
for m in agg_df.columns
}
)
}
for i in range(1, len(groups) + 1):
agg_df = df.groupby([DTTM_ALIAS] + groups[:i]).sum()
levels[i] = pd.DataFrame(
{
m: func[0](agg_df[m][until], agg_df[m][since], fill_value=0)
for m in agg_df.columns
}
)
return levels
def levels_for_time(self, groups, df):
procs = {}
for i in range(0, len(groups) + 1):
self.form_data["groupby"] = groups[:i]
df_drop = df.drop(groups[i:], 1)
procs[i] = self.process_data(df_drop, aggregate=True)
self.form_data["groupby"] = groups
return procs
def nest_values(self, levels, level=0, metric=None, dims=()):
"""
Nest values at each level on the back-end with
access and setting, instead of summing from the bottom.
"""
if not level:
return [
{
"name": m,
"val": levels[0][m],
"children": self.nest_values(levels, 1, m),
}
for m in levels[0].index
]
if level == 1:
return [
{
"name": i,
"val": levels[1][metric][i],
"children": self.nest_values(levels, 2, metric, (i,)),
}
for i in levels[1][metric].index
]
if level >= len(levels):
return []
return [
{
"name": i,
"val": levels[level][metric][dims][i],
"children": self.nest_values(levels, level + 1, metric, dims + (i,)),
}
for i in levels[level][metric][dims].index
]
def nest_procs(self, procs, level=-1, dims=(), time=None):
if level == -1:
return [
{"name": m, "children": self.nest_procs(procs, 0, (m,))}
for m in procs[0].columns
]
if not level:
return [
{
"name": t,
"val": procs[0][dims[0]][t],
"children": self.nest_procs(procs, 1, dims, t),
}
for t in procs[0].index
]
if level >= len(procs):
return []
return [
{
"name": i,
"val": procs[level][dims][i][time],
"children": self.nest_procs(procs, level + 1, dims + (i,), time),
}
for i in procs[level][dims].columns
]
def get_data(self, df):
fd = self.form_data
groups = fd.get("groupby", [])
time_op = fd.get("time_series_option", "not_time")
if not len(groups):
raise ValueError("Please choose at least one groupby")
if time_op == "not_time":
levels = self.levels_for("agg_sum", groups, df)
elif time_op in ["agg_sum", "agg_mean"]:
levels = self.levels_for(time_op, groups, df)
elif time_op in ["point_diff", "point_factor", "point_percent"]:
levels = self.levels_for_diff(time_op, groups, df)
elif time_op == "adv_anal":
procs = self.levels_for_time(groups, df)
return self.nest_procs(procs)
else:
levels = self.levels_for("agg_sum", [DTTM_ALIAS] + groups, df)
return self.nest_values(levels)
viz_types = {
o.viz_type: o
for o in globals().values()
if (
inspect.isclass(o)
and issubclass(o, BaseViz)
and o.viz_type not in config["VIZ_TYPE_BLACKLIST"]
)
}
| 33.126614 | 88 | 0.552367 |
d4547d513cdf3e4bc0352d9652d44729dd151c85 | 2,817 | py | Python | dataset/loader.py | yilmazbaysal/CBDNet-pytorch | 6e4aa5021710177a2f7400e4dc9ffa780ccfc802 | [
"MIT"
] | 120 | 2019-05-29T03:55:56.000Z | 2022-03-21T04:55:12.000Z | dataset/loader.py | yilmazbaysal/CBDNet-pytorch | 6e4aa5021710177a2f7400e4dc9ffa780ccfc802 | [
"MIT"
] | 27 | 2019-06-03T07:03:31.000Z | 2021-12-17T07:19:28.000Z | dataset/loader.py | yilmazbaysal/CBDNet-pytorch | 6e4aa5021710177a2f7400e4dc9ffa780ccfc802 | [
"MIT"
] | 25 | 2019-06-17T10:52:35.000Z | 2022-03-07T06:52:10.000Z | import os
import random
import torch
import numpy as np
import glob
from torch.utils.data import Dataset
from utils import read_img, hwc_to_chw
def get_patch(imgs, patch_size):
H = imgs[0].shape[0]
W = imgs[0].shape[1]
ps_temp = min(H, W, patch_size)
xx = np.random.randint(0, W-ps_temp) if W > ps_temp else 0
yy = np.random.randint(0, H-ps_temp) if H > ps_temp else 0
for i in range(len(imgs)):
imgs[i] = imgs[i][yy:yy+ps_temp, xx:xx+ps_temp, :]
if np.random.randint(2, size=1)[0] == 1:
for i in range(len(imgs)):
imgs[i] = np.flip(imgs[i], axis=1)
if np.random.randint(2, size=1)[0] == 1:
for i in range(len(imgs)):
imgs[i] = np.flip(imgs[i], axis=0)
if np.random.randint(2, size=1)[0] == 1:
for i in range(len(imgs)):
imgs[i] = np.transpose(imgs[i], (1, 0, 2))
return imgs
class Real(Dataset):
def __init__(self, root_dir, sample_num, patch_size=128):
self.patch_size = patch_size
folders = glob.glob(root_dir + '/*')
folders.sort()
self.clean_fns = [None] * sample_num
for i in range(sample_num):
self.clean_fns[i] = []
for ind, folder in enumerate(folders):
clean_imgs = glob.glob(folder + '/*GT_SRGB*')
clean_imgs.sort()
for clean_img in clean_imgs:
self.clean_fns[ind % sample_num].append(clean_img)
def __len__(self):
l = len(self.clean_fns)
return l
def __getitem__(self, idx):
clean_fn = random.choice(self.clean_fns[idx])
clean_img = read_img(clean_fn)
noise_img = read_img(clean_fn.replace('GT_SRGB', 'NOISY_SRGB'))
if self.patch_size > 0:
[clean_img, noise_img] = get_patch([clean_img, noise_img], self.patch_size)
return hwc_to_chw(noise_img), hwc_to_chw(clean_img), np.zeros((3, self.patch_size, self.patch_size)), np.zeros((3, self.patch_size, self.patch_size))
class Syn(Dataset):
def __init__(self, root_dir, sample_num, patch_size=128):
self.patch_size = patch_size
folders = glob.glob(root_dir + '/*')
folders.sort()
self.clean_fns = [None] * sample_num
for i in range(sample_num):
self.clean_fns[i] = []
for ind, folder in enumerate(folders):
clean_imgs = glob.glob(folder + '/*GT_SRGB*')
clean_imgs.sort()
for clean_img in clean_imgs:
self.clean_fns[ind % sample_num].append(clean_img)
def __len__(self):
l = len(self.clean_fns)
return l
def __getitem__(self, idx):
clean_fn = random.choice(self.clean_fns[idx])
clean_img = read_img(clean_fn)
noise_img = read_img(clean_fn.replace('GT_SRGB', 'NOISY_SRGB'))
sigma_img = read_img(clean_fn.replace('GT_SRGB', 'SIGMA_SRGB')) / 15. # inverse scaling
if self.patch_size > 0:
[clean_img, noise_img, sigma_img] = get_patch([clean_img, noise_img, sigma_img], self.patch_size)
return hwc_to_chw(noise_img), hwc_to_chw(clean_img), hwc_to_chw(sigma_img), np.ones((3, self.patch_size, self.patch_size)) | 27.617647 | 151 | 0.695421 |
246a55f45e853815176effffcf34ec510ab7cd54 | 9,767 | py | Python | Lib/site-packages/pyqode/core/backend/workers.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | null | null | null | Lib/site-packages/pyqode/core/backend/workers.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/pyqode/core/backend/workers.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 1 | 2021-12-15T20:07:45.000Z | 2021-12-15T20:07:45.000Z | # -*- coding: utf-8 -*-
"""
This module contains the worker functions/classes used on the server side.
A worker is a function or a callable which receive one single argument (the
decoded json object) and returns a tuple made up of a status (bool) and a
response object (json serializable).
A worker is always tightly coupled with its caller, so are the data.
.. warning::
This module should keep its dependencies as low as possible and fully
supports python2 syntax. This is badly needed since the server might be run
with a python2 interpreter. We don't want to force the user to install all
the pyqode dependencies twice (if the user choose to run the server with
python2, which might happen in pyqode.python to support python2 syntax).
"""
import os
import re
import sys
import traceback
def echo_worker(data):
"""
Example of worker that simply echoes back the received data.
:param data: Request data dict.
:returns: True, data
"""
return data
class CodeCompletionWorker(object):
"""
This is the worker associated with the code completion mode.
The worker does not actually do anything smart, the real work of collecting
code completions is accomplished by the completion providers (see the
:class:`pyqode.core.backend.workers.CodeCompletionWorker.Provider`
interface) listed in
:attr:`pyqode.core.backend.workers.CompletionWorker.providers`.
Completion providers must be installed on the CodeCompletionWorker
at the beginning of the main server script, e.g.::
from pyqode.core.backend import CodeCompletionWorker
CodeCompletionWorker.providers.insert(0, MyProvider())
"""
#: The list of code completion provider to run on each completion request.
providers = []
class Provider(object):
"""
This class describes the expected interface for code completion
providers.
You can inherit from this class but this is not required as long as you
implement a ``complete`` method which returns the list of completions
and have the expected signature::
def complete(self, code, line, column, path, encoding, prefix):
pass
"""
def complete(self, code, line, column, path,
encoding, prefix, triggered_by_symbol):
"""
Returns a list of completions.
A completion is dictionary with the following keys:
- 'name': name of the completion, this the text displayed and
inserted when the user select a completion in the list
- 'icon': an optional icon file name
- 'tooltip': an optional tooltip string
:param code: code string
:param line: line number (0 based)
:param column: column number (0 based)
:param path: file path
:param encoding: file encoding
:param prefix: completion prefix (text before cursor)
:param triggered_by_symbol: True if the completion was triggered
by typing a completion character such as '.'
:returns: A list of completion dicts as described above.
:rtype: list
"""
raise NotImplementedError()
def __call__(self, data):
"""
Do the work (this will be called in the child process by the
SubprocessServer).
"""
code = data['code']
line = data['line']
column = data['column']
path = data['path']
encoding = data['encoding']
prefix = data['prefix']
req_id = data['request_id']
triggered_by_symbol = data['triggered_by_symbol']
completions = []
for prov in CodeCompletionWorker.providers:
try:
results = prov.complete(
code,
line,
column,
path,
encoding,
prefix,
triggered_by_symbol
)
completions.append(results)
if len(completions):
break
except:
sys.stderr.write('Failed to get completions from provider %r'
% prov)
exc1, exc2, exc3 = sys.exc_info()
traceback.print_exception(exc1, exc2, exc3, file=sys.stderr)
return [(line, column, req_id)] + completions
class DocumentWordsProvider(object):
"""
Provides completions based on the document words
"""
words = {}
# word separators
separators = [
'~', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '+', '{',
'}', '|', ':', '"', "'", "<", ">", "?", ",", ".", "/", ";", '[',
']', '\\', '\n', '\t', '=', '-', ' '
]
@staticmethod
def split(txt, seps):
"""
Splits a text in a meaningful list of words based on a list of word
separators (define in pyqode.core.settings)
:param txt: Text to split
:param seps: List of words separators
:return: A **set** of words found in the document (excluding
punctuations, numbers, ...)
"""
# replace all possible separators with a default sep
default_sep = seps[0]
for sep in seps[1:]:
if sep:
txt = txt.replace(sep, default_sep)
# now we can split using the default_sep
raw_words = txt.split(default_sep)
words = set()
for word in raw_words:
# w = w.strip()
if word.replace('_', '').isalpha():
words.add(word)
return sorted(words)
def complete(self, code, *args):
"""
Provides completions based on the document words.
:param code: code to complete
:param args: additional (unused) arguments.
"""
completions = []
for word in self.split(code, self.separators):
completions.append({'name': word})
return completions
def finditer_noregex(string, sub, whole_word):
"""
Search occurrences using str.find instead of regular expressions.
:param string: string to parse
:param sub: search string
:param whole_word: True to select whole words only
"""
start = 0
while True:
start = string.find(sub, start)
if start == -1:
return
if whole_word:
if start:
pchar = string[start - 1]
else:
pchar = ' '
try:
nchar = string[start + len(sub)]
except IndexError:
nchar = ' '
if nchar in DocumentWordsProvider.separators and \
pchar in DocumentWordsProvider.separators:
yield start
start += len(sub)
else:
yield start
start += 1
def findalliter(string, sub, regex=False, case_sensitive=False,
whole_word=False):
"""
Generator that finds all occurrences of ``sub`` in ``string``
:param string: string to parse
:param sub: string to search
:param regex: True to search using regex
:param case_sensitive: True to match case, False to ignore case
:param whole_word: True to returns only whole words
:return:
"""
if not sub:
return
if regex:
flags = re.MULTILINE
if not case_sensitive:
flags |= re.IGNORECASE
for val in re.finditer(sub, string, flags):
yield val.span()
else:
if not case_sensitive:
string = string.lower()
sub = sub.lower()
for val in finditer_noregex(string, sub, whole_word):
yield val, val + len(sub)
def findall(data):
"""
Worker that finds all occurrences of a given string (or regex)
in a given text.
:param data: Request data dict::
{
'string': string to search in text
'sub': input text
'regex': True to consider string as a regular expression
'whole_word': True to match whole words only.
'case_sensitive': True to match case, False to ignore case
}
:return: list of occurrence positions in text
"""
return list(findalliter(
data['string'], data['sub'], regex=data['regex'],
whole_word=data['whole_word'], case_sensitive=data['case_sensitive']))
_image_annotations = {}
def _meaningful_code(code):
"""Strips code of comments and trailing whitespace. This avoids image
annotations from vanishing after trivial changes to the code. Markdown
images (# ![]) are preserverd because they are part of captured output.
"""
code = code.replace('# ![]', '![]')
code = re.sub(r'(?m)#.*\n?', '\n', code)
code = '\n'.join([line.rstrip() for line in code.splitlines()])
return code
def image_annotations(data):
"""Returns a list of image annotations."""
haystack = _meaningful_code(data['code'])
ret_val = []
for needle, paths in _image_annotations.get(data['path'], {}).items():
for path in paths:
if not os.path.exists(path):
continue
prev_pos = 0
while True:
pos = haystack.find(_meaningful_code(needle), prev_pos)
if pos < 0:
break
prev_pos = pos + 1
line = haystack[:pos].count('\n')
ret_val.append(('Image', 0, line, None, None, None, path))
return ret_val
def set_image_annotations(data):
"""Sets the image annotation data."""
global _image_annotations
_image_annotations = data
| 32.996622 | 79 | 0.57981 |
e80aa1572085cf7e13a3007de5b046390d085107 | 7,830 | py | Python | test/unit/test_objectstore.py | mysticmirages/GalaxyProject | 6a47d2d3a7f27b659e5de5fb1b37c7c1f9d04302 | [
"CC-BY-3.0"
] | null | null | null | test/unit/test_objectstore.py | mysticmirages/GalaxyProject | 6a47d2d3a7f27b659e5de5fb1b37c7c1f9d04302 | [
"CC-BY-3.0"
] | null | null | null | test/unit/test_objectstore.py | mysticmirages/GalaxyProject | 6a47d2d3a7f27b659e5de5fb1b37c7c1f9d04302 | [
"CC-BY-3.0"
] | null | null | null | import os
from shutil import rmtree
from string import Template
from tempfile import mkdtemp
try:
from galaxy import objectstore
except ImportError:
from lwr import objectstore
from contextlib import contextmanager
DISK_TEST_CONFIG = """<?xml version="1.0"?>
<object_store type="disk">
<files_dir path="${temp_directory}/files1"/>
<extra_dir type="temp" path="${temp_directory}/tmp1"/>
<extra_dir type="job_work" path="${temp_directory}/job_working_directory1"/>
</object_store>
"""
def test_disk_store():
with TestConfig(DISK_TEST_CONFIG) as (directory, object_store):
# Test no dataset with id 1 exists.
absent_dataset = MockDataset(1)
assert not object_store.exists(absent_dataset)
# Write empty dataset 2 in second backend, ensure it is empty and
# exists.
empty_dataset = MockDataset(2)
directory.write(b"", "files1/000/dataset_2.dat")
assert object_store.exists(empty_dataset)
assert object_store.empty(empty_dataset)
# Write non-empty dataset in backend 1, test it is not emtpy & exists.
hello_world_dataset = MockDataset(3)
directory.write(b"Hello World!", "files1/000/dataset_3.dat")
assert object_store.exists(hello_world_dataset)
assert not object_store.empty(hello_world_dataset)
# Test get_data
data = object_store.get_data(hello_world_dataset)
assert data == b"Hello World!"
data = object_store.get_data(hello_world_dataset, start=1, count=6)
assert data == b"ello W"
# Test Size
# Test absent and empty datasets yield size of 0.
assert object_store.size(absent_dataset) == 0
assert object_store.size(empty_dataset) == 0
# Elsewise
assert object_store.size(hello_world_dataset) > 0 # Should this always be the number of bytes?
# Test percent used (to some degree)
percent_store_used = object_store.get_store_usage_percent()
assert percent_store_used > 0.0
assert percent_store_used < 100.0
# Test update_from_file test
output_dataset = MockDataset(4)
output_real_path = os.path.join(directory.temp_directory, "files1", "000", "dataset_4.dat")
assert not os.path.exists(output_real_path)
output_working_path = directory.write(b"NEW CONTENTS", "job_working_directory1/example_output")
object_store.update_from_file(output_dataset, file_name=output_working_path, create=True)
assert os.path.exists(output_real_path)
# Test delete
to_delete_dataset = MockDataset(5)
to_delete_real_path = directory.write(b"content to be deleted!", "files1/000/dataset_5.dat")
assert object_store.exists(to_delete_dataset)
assert object_store.delete(to_delete_dataset)
assert not object_store.exists(to_delete_dataset)
assert not os.path.exists(to_delete_real_path)
HIERARCHICAL_TEST_CONFIG = """<?xml version="1.0"?>
<object_store type="hierarchical">
<backends>
<backend id="files1" type="disk" weight="1" order="0">
<files_dir path="${temp_directory}/files1"/>
<extra_dir type="temp" path="${temp_directory}/tmp1"/>
<extra_dir type="job_work" path="${temp_directory}/job_working_directory1"/>
</backend>
<backend id="files2" type="disk" weight="1" order="1">
<files_dir path="${temp_directory}/files2"/>
<extra_dir type="temp" path="${temp_directory}/tmp2"/>
<extra_dir type="job_work" path="${temp_directory}/job_working_directory2"/>
</backend>
</backends>
</object_store>
"""
def test_hierarchical_store():
with TestConfig(HIERARCHICAL_TEST_CONFIG) as (directory, object_store):
# Test no dataset with id 1 exists.
assert not object_store.exists(MockDataset(1))
# Write empty dataset 2 in second backend, ensure it is empty and
# exists.
directory.write("", "files2/000/dataset_2.dat")
assert object_store.exists(MockDataset(2))
assert object_store.empty(MockDataset(2))
# Write non-empty dataset in backend 1, test it is not emtpy & exists.
directory.write("Hello World!", "files1/000/dataset_3.dat")
assert object_store.exists(MockDataset(3))
assert not object_store.empty(MockDataset(3))
# Assert creation always happens in first backend.
for i in range(100):
dataset = MockDataset(100 + i)
object_store.create(dataset)
assert object_store.get_filename(dataset).find("files1") > 0
DISTRIBUTED_TEST_CONFIG = """<?xml version="1.0"?>
<object_store type="distributed">
<backends>
<backend id="files1" type="disk" weight="2" order="0">
<files_dir path="${temp_directory}/files1"/>
<extra_dir type="temp" path="${temp_directory}/tmp1"/>
<extra_dir type="job_work" path="${temp_directory}/job_working_directory1"/>
</backend>
<backend id="files2" type="disk" weight="1" order="1">
<files_dir path="${temp_directory}/files2"/>
<extra_dir type="temp" path="${temp_directory}/tmp2"/>
<extra_dir type="job_work" path="${temp_directory}/job_working_directory2"/>
</backend>
</backends>
</object_store>
"""
def test_distributed_store():
with TestConfig(DISTRIBUTED_TEST_CONFIG) as (directory, object_store):
with __stubbed_persistence() as persisted_ids:
for i in range(100):
dataset = MockDataset(100 + i)
object_store.create(dataset)
# Test distributes datasets between backends according to weights
backend_1_count = len([v for v in persisted_ids.values() if v == "files1"])
backend_2_count = len([v for v in persisted_ids.values() if v == "files2"])
assert backend_1_count > 0
assert backend_2_count > 0
assert backend_1_count > backend_2_count
class TestConfig(object):
def __init__(self, config_xml):
self.temp_directory = mkdtemp()
self.write(config_xml, "store.xml")
config = MockConfig(self.temp_directory)
self.object_store = objectstore.build_object_store_from_config(config)
def __enter__(self):
return self, self.object_store
def __exit__(self, type, value, tb):
rmtree(self.temp_directory)
def write(self, contents, name):
path = os.path.join(self.temp_directory, name)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
expanded_contents = Template(contents).safe_substitute(temp_directory=self.temp_directory)
open(path, "w").write(expanded_contents)
return path
class MockConfig(object):
def __init__(self, temp_directory):
self.file_path = temp_directory
self.object_store_config_file = os.path.join(temp_directory, "store.xml")
self.object_store_check_old_style = False
self.job_working_directory = temp_directory
self.new_file_path = temp_directory
self.umask = 0000
class MockDataset(object):
def __init__(self, id):
self.id = id
self.object_store_id = None
# Poor man's mocking. Need to get a real mocking library as real Galaxy development
# dependnecy.
PERSIST_METHOD_NAME = "create_object_in_session"
@contextmanager
def __stubbed_persistence():
real_method = getattr(objectstore, PERSIST_METHOD_NAME)
try:
persisted_ids = {}
def persist(object):
persisted_ids[object.id] = object.object_store_id
setattr(objectstore, PERSIST_METHOD_NAME, persist)
yield persisted_ids
finally:
setattr(objectstore, PERSIST_METHOD_NAME, real_method)
| 37.109005 | 103 | 0.673691 |
9110bd72dd2200f5dde0bea3efc183b2f23a004a | 669 | py | Python | FOREX/USECASE2/producer.py | kareemv46/Project3 | eeb63d69d69c7a3ff909a284e27740beb083ff10 | [
"MIT"
] | 1 | 2021-12-14T14:41:02.000Z | 2021-12-14T14:41:02.000Z | FOREX/USECASE2/producer.py | 6618karan/Stock-Analysis | 6a4008acc8f48abc5dc4f33cbe9f329451343459 | [
"MIT"
] | null | null | null | FOREX/USECASE2/producer.py | 6618karan/Stock-Analysis | 6a4008acc8f48abc5dc4f33cbe9f329451343459 | [
"MIT"
] | 3 | 2021-09-28T09:50:49.000Z | 2021-10-01T11:35:41.000Z | #This file is going to be used for producer
#first create a topic name as 'project3'
from kafka import KafkaProducer
import requests
from json import dumps
import time
kafka_data_producers = KafkaProducer(bootstrap_servers=['localhost:9092'],value_serializer=lambda x: dumps(x).encode('utf-8') )
while True:
response_data = requests.get("https://api.tiingo.com/tiingo/fx/top?tickers=audusd,eurusd&token=a3a48dc2dc4244210c167423d69942bcc5595b76")
#response_data=response_data.json()
data = {'Lagos' : response_data.json()}
data=data['Lagos'][0]
kafka_data_producers.send('project3', value=data)
print(data)
print()
time.sleep(10)
| 33.45 | 141 | 0.745889 |
091bad48d29d767c2c09ae65d8c6ffd6c0d44cd9 | 4,564 | py | Python | charcoal/utils.py | ctb/2020-charcoal-metagenome-abundance-foo | dc99f7774c20c942a2966186496351d7e575e34a | [
"BSD-3-Clause"
] | null | null | null | charcoal/utils.py | ctb/2020-charcoal-metagenome-abundance-foo | dc99f7774c20c942a2966186496351d7e575e34a | [
"BSD-3-Clause"
] | null | null | null | charcoal/utils.py | ctb/2020-charcoal-metagenome-abundance-foo | dc99f7774c20c942a2966186496351d7e575e34a | [
"BSD-3-Clause"
] | null | null | null | """
utility functions for charcoal.
"""
import math
import numpy as np
from numpy import genfromtxt
import screed
from sourmash.lca import lca_utils
def load_hashset(filename):
"Load set of hashes from a file."
with open(filename, 'rt') as fp:
hashes = set([ int(x.strip()) for x in fp if x.strip() ])
return hashes
def load_matrix_csv(filename):
mat = genfromtxt(filename, delimiter=',')
return mat
def make_distance_matrix(mat, delete_empty=False):
"""
Construct distance matrix from metagenome x hash matrices.
"""
n_hashes = mat.shape[1]
n_orig_hashes = n_hashes
# go through and normalize all the sample-presence vectors for each hash;
# track those with all 0s for later removal.
to_delete = []
for i in range(n_hashes):
if sum(mat[:, i]):
mat[:, i] /= math.sqrt(np.dot(mat[:, i], mat[:, i]))
else:
to_delete.append(i)
if delete_empty:
# remove all columns with zeros
print('removing {} null presence vectors'.format(len(to_delete)))
for row_n in reversed(to_delete):
mat = np.delete(mat, row_n, 1)
assert mat.shape[1] == n_hashes - len(to_delete)
n_hashes = mat.shape[1]
# construct distance matrix using angular distance
D = np.zeros((n_hashes, n_hashes))
for i in range(n_hashes):
for j in range(n_hashes):
cos_sim = np.dot(mat[:, i], mat[:, j])
cos_sim = min(cos_sim, 1.0)
ang_sim = 1 - 2*math.acos(cos_sim) / math.pi
D[i][j] = ang_sim
# done!
return D, n_orig_hashes
def is_lineage_match(lin_a, lin_b, rank):
"""
check to see if two lineages are a match down to given rank.
"""
for a, b in zip(lin_a, lin_b):
assert a.rank == b.rank
if a.rank == rank:
if a == b:
return 1
if a != b:
return 0
return 0
def pop_to_rank(lin, rank):
"Remove lineage tuples from given lineage `lin` until `rank` is reached."
lin = list(lin)
txl = lca_utils.taxlist()
before_rank = []
for txl_rank in txl:
if txl_rank != rank:
before_rank.append(txl_rank)
else:
break
# are we already above rank?
if lin and lin[-1].rank in before_rank:
return tuple(lin)
while lin and lin[-1].rank != rank:
lin.pop()
return tuple(lin)
class HashesToTaxonomy(object):
def __init__(self, genome_file, ksize, scaled, fragment_size, lca_db_file):
self.genome_file = genome_file
self.ksize = ksize
self.scaled = scaled
self.fragment_size = fragment_size
self.lca_db_file = lca_db_file
self.d = {}
def __setitem__(self, hashval, lineage):
self.d[hashval] = lineage
def __getitem__(self, hashval):
return self.d[hashval]
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def items(self):
return self.d.items()
class HashesToLengths(object):
def __init__(self, genome_file, ksize, scaled, fragment_size):
self.genome_file = genome_file
self.ksize = ksize
self.scaled = scaled
self.fragment_size = fragment_size
self.d = {}
def __setitem__(self, hashval, length):
self.d[hashval] = length
def __getitem__(self, hashval):
return self.d[hashval]
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def items(self):
return self.d.items()
class MetagenomesMatrix(object):
def __init__(self, genome_file, query_hashlist, query_fragment_size, ksize):
self.genome_file = genome_file
self.query_hashlist = list(sorted(query_hashlist))
self.query_fragment_size = query_fragment_size
self.ksize = ksize
self.mat = None
class GenomeShredder(object):
def __init__(self, genome_file, fragment_size):
self.genome_file = genome_file
self.fragment_size = fragment_size
def __iter__(self):
fragment_size = self.fragment_size
for record in screed.open(self.genome_file):
if not fragment_size:
yield record.name, record.sequence, 0, len(record.sequence)
else:
for start in range(0, len(record.sequence), fragment_size):
seq = record.sequence[start:start + fragment_size]
yield record.name, seq, start, start + len(seq)
| 26.229885 | 80 | 0.611744 |
61800c21aa465a9bba1c4b8488ceedc20d8316fd | 3,818 | py | Python | tests/test_es_utils.py | LandRegistry/digital-register-elasticsearch-updater | 2e7b95d8d5eac70e9bba6612bed09bf58376e781 | [
"MIT"
] | null | null | null | tests/test_es_utils.py | LandRegistry/digital-register-elasticsearch-updater | 2e7b95d8d5eac70e9bba6612bed09bf58376e781 | [
"MIT"
] | 12 | 2015-06-05T15:10:25.000Z | 2016-04-21T15:17:19.000Z | tests/test_es_utils.py | LandRegistry/digital-register-elasticsearch-updater | 2e7b95d8d5eac70e9bba6612bed09bf58376e781 | [
"MIT"
] | 1 | 2021-04-11T06:03:41.000Z | 2021-04-11T06:03:41.000Z | import mock
from service import es_utils
# TODO: partly to be replaced by proper integration tests
class TestEsUtils:
@mock.patch(
'service.es_utils.elasticsearch_client.indices.status',
return_value={'indices': []}
)
@mock.patch('service.es_utils.elasticsearch_client.index')
@mock.patch('service.es_utils.indices_client.put_mapping')
def test_ensure_mapping_exists_creates_index_when_one_not_present(
self, mock_put_mapping, mock_index, mock_status):
index_name = 'index_name'
doc_type = 'doc_type'
mapping = {'mapping': 'properties'}
es_utils.ensure_mapping_exists(index_name, doc_type, mapping)
mock_index.assert_called_once_with(index=index_name, doc_type=doc_type, body={})
mock_put_mapping.assert_called_once_with(
index=index_name, doc_type=doc_type, body=mapping
)
@mock.patch(
'service.es_utils.elasticsearch_client.indices.status',
return_value={'indices': ['index_name']})
@mock.patch('service.es_utils.elasticsearch_client.index')
@mock.patch('service.es_utils.indices_client.put_mapping')
def test_ensure_mapping_exists_does_not_create_index_when_present(
self, mock_put_mapping, mock_index, mock_status):
index_name = 'index_name'
doc_type = 'doc_type'
mapping = {'mapping': 'properties'}
es_utils.ensure_mapping_exists(index_name, doc_type, mapping)
assert mock_index.mock_calls == []
mock_put_mapping.assert_called_once_with(
index=index_name, doc_type=doc_type, body=mapping
)
@mock.patch('service.es_utils.bulk')
def test_execute_elasticsearch_actions_executes_all_given_actions(self, mock_bulk):
actions = [{'action1': '1', 'action2': '2'}]
es_utils.execute_elasticsearch_actions(actions)
mock_bulk.assert_called_once_with(es_utils.elasticsearch_client, actions)
def test_execute_elasticsearch_actions_returns_execution_result(self):
expected_result = (123, ['error1'])
with mock.patch('service.es_utils.bulk', return_value=expected_result):
result = es_utils.execute_elasticsearch_actions([])
assert result == expected_result
@mock.patch('service.es_utils.elasticsearch_client.search')
def test_search_executes_given_query(self, mock_search):
query_dict = {'query': 'dict'}
index_name = 'index_name'
doc_type = 'doc_type'
es_utils.search(query_dict, index_name, doc_type)
mock_search.assert_called_once_with(index=index_name, doc_type=doc_type, body=query_dict)
def test_search_returns_the_hits_from_elasticsearch_result(self):
hits = [{'_source': {'some': 'data'}}]
search_result = {'hits': {'hits': hits}}
with mock.patch(
'service.es_utils.elasticsearch_client.search',
return_value=search_result
):
result = es_utils.search({'query': 'dict'}, 'index_name', 'doc_type')
assert result == hits
def test_get_upsert_action_returns_action_with_the_right_content(self):
result = es_utils.get_upsert_action('idx_name1', 'doc_type1', {'doc': 'body1'}, 'id1')
assert result == {
'doc_as_upsert': True,
'_op_type': 'update',
'_index': 'idx_name1',
'_type': 'doc_type1',
'_id': 'id1',
'doc': {'doc': 'body1'},
}
def test_get_delete_action_returns_action_with_the_right_content(self):
result = es_utils.get_delete_action('index_name1', 'doc_type1', 'id1')
assert result == {
'_op_type': 'delete',
'_index': 'index_name1',
'_type': 'doc_type1',
'_id': 'id1',
}
| 36.018868 | 97 | 0.662651 |
5fdeb55763d07fb63c6a6d253f3a58d84bff195f | 35,999 | py | Python | cvxpy/tests/test_conic_solvers.py | tuelwer/cvxpy | daa2a92eef97c4e8870c280afce71273fc64ea11 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cvxpy/tests/test_conic_solvers.py | tuelwer/cvxpy | daa2a92eef97c4e8870c280afce71273fc64ea11 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cvxpy/tests/test_conic_solvers.py | tuelwer/cvxpy | daa2a92eef97c4e8870c280afce71273fc64ea11 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """
Copyright 2019, the CVXPY developers.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import numpy as np
import scipy.linalg as la
import cvxpy as cp
import unittest
from cvxpy.tests.base_test import BaseTest
from cvxpy.tests.solver_test_helpers import StandardTestECPs, StandardTestSDPs
from cvxpy.tests.solver_test_helpers import StandardTestSOCPs, StandardTestLPs
from cvxpy.reductions.solvers.defines import INSTALLED_SOLVERS
class TestECOS(BaseTest):
def setUp(self):
self.a = cp.Variable(name='a')
self.b = cp.Variable(name='b')
self.c = cp.Variable(name='c')
self.x = cp.Variable(2, name='x')
self.y = cp.Variable(3, name='y')
self.z = cp.Variable(2, name='z')
self.A = cp.Variable((2, 2), name='A')
self.B = cp.Variable((2, 2), name='B')
self.C = cp.Variable((3, 2), name='C')
def test_ecos_options(self):
"""Test that all the ECOS solver options work.
"""
# Test ecos
# feastol, abstol, reltol, feastol_inacc,
# abstol_inacc, and reltol_inacc for tolerance values
# max_iters for the maximum number of iterations,
EPS = 1e-4
prob = cp.Problem(cp.Minimize(cp.norm(self.x, 1) + 1.0), [self.x == 0])
for i in range(2):
prob.solve(solver=cp.ECOS, feastol=EPS, abstol=EPS, reltol=EPS,
feastol_inacc=EPS, abstol_inacc=EPS, reltol_inacc=EPS,
max_iters=20, verbose=True, warm_start=True)
self.assertAlmostEqual(prob.value, 1.0)
self.assertItemsAlmostEqual(self.x.value, [0, 0])
def test_ecos_bb_options(self):
"""Test that all the ECOS BB solver options work.
"""
# 'mi_maxiter'
# maximum number of branch and bound iterations (default: 1000)
# 'mi_abs_eps'
# absolute tolerance between upper and lower bounds (default: 1e-6)
# 'mi_rel_eps'
prob = cp.Problem(cp.Minimize(cp.norm(self.x, 1) + 1.0),
[self.x == cp.Variable(2, boolean=True)])
for i in range(2):
prob.solve(solver=cp.ECOS_BB, mi_max_iters=100, mi_abs_eps=1e-6,
mi_rel_eps=1e-5, verbose=True, warm_start=True)
self.assertAlmostEqual(prob.value, 1.0)
self.assertItemsAlmostEqual(self.x.value, [0, 0])
def test_ecos_lp_0(self):
StandardTestLPs.test_lp_0(solver='ECOS')
def test_ecos_lp_1(self):
StandardTestLPs.test_lp_1(solver='ECOS')
def test_ecos_lp_2(self):
StandardTestLPs.test_lp_2(solver='ECOS')
def test_ecos_lp_3(self):
StandardTestLPs.test_lp_3(solver='ECOS')
def test_ecos_lp_4(self):
StandardTestLPs.test_lp_4(solver='ECOS')
def test_ecos_socp_0(self):
StandardTestSOCPs.test_socp_0(solver='ECOS')
def test_ecos_socp_1(self):
StandardTestSOCPs.test_socp_1(solver='ECOS')
def test_ecos_socp_2(self):
StandardTestSOCPs.test_socp_2(solver='ECOS')
def test_ecos_expcone_1(self):
StandardTestECPs.test_expcone_1(solver='ECOS')
class TestSCS(BaseTest):
""" Unit tests for SCS. """
def setUp(self):
self.x = cp.Variable(2, name='x')
self.y = cp.Variable(2, name='y')
self.A = cp.Variable((2, 2), name='A')
self.B = cp.Variable((2, 2), name='B')
self.C = cp.Variable((3, 2), name='C')
# Overridden method to assume lower accuracy.
def assertItemsAlmostEqual(self, a, b, places=2):
super(TestSCS, self).assertItemsAlmostEqual(a, b, places=places)
# Overridden method to assume lower accuracy.
def assertAlmostEqual(self, a, b, places=2):
super(TestSCS, self).assertAlmostEqual(a, b, places=places)
def test_scs_options(self):
"""Test that all the SCS solver options work.
"""
# Test SCS
# MAX_ITERS, EPS, ALPHA, UNDET_TOL, VERBOSE, and NORMALIZE.
# If opts is missing, then the algorithm uses default settings.
# USE_INDIRECT = True
EPS = 1e-4
x = cp.Variable(2, name='x')
prob = cp.Problem(cp.Minimize(cp.norm(x, 1) + 1.0), [x == 0])
for i in range(2):
prob.solve(solver=cp.SCS, max_iters=50, eps=EPS, alpha=EPS,
verbose=True, normalize=True, use_indirect=False)
self.assertAlmostEqual(prob.value, 1.0, places=2)
self.assertItemsAlmostEqual(x.value, [0, 0], places=2)
def test_log_problem(self):
# Log in objective.
obj = cp.Maximize(cp.sum(cp.log(self.x)))
constr = [self.x <= [1, math.e]]
p = cp.Problem(obj, constr)
result = p.solve(solver=cp.SCS)
self.assertAlmostEqual(result, 1)
self.assertItemsAlmostEqual(self.x.value, [1, math.e])
# Log in constraint.
obj = cp.Minimize(sum(self.x))
constr = [cp.log(self.x) >= 0, self.x <= [1, 1]]
p = cp.Problem(obj, constr)
result = p.solve(solver=cp.SCS)
self.assertAlmostEqual(result, 2)
self.assertItemsAlmostEqual(self.x.value, [1, 1])
# Index into log.
obj = cp.Maximize(cp.log(self.x)[1])
constr = [self.x <= [1, math.e]]
p = cp.Problem(obj, constr)
result = p.solve(solver=cp.SCS)
def test_sigma_max(self):
"""Test sigma_max.
"""
const = cp.Constant([[1, 2, 3], [4, 5, 6]])
constr = [self.C == const]
prob = cp.Problem(cp.Minimize(cp.norm(self.C, 2)), constr)
result = prob.solve(solver=cp.SCS)
self.assertAlmostEqual(result, cp.norm(const, 2).value)
self.assertItemsAlmostEqual(self.C.value, const.value)
def test_sdp_var(self):
"""Test sdp var.
"""
const = cp.Constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
X = cp.Variable((3, 3), PSD=True)
prob = cp.Problem(cp.Minimize(0), [X == const])
prob.solve(solver=cp.SCS)
self.assertEqual(prob.status, cp.INFEASIBLE)
def test_complex_matrices(self):
"""Test complex matrices.
"""
# Complex-valued matrix
K = np.array(np.random.rand(2, 2) + 1j * np.random.rand(2, 2)) # example matrix
n1 = la.svdvals(K).sum() # trace norm of K
# Dual Problem
X = cp.Variable((2, 2), complex=True)
Y = cp.Variable((2, 2), complex=True)
# X, Y >= 0 so trace is real
objective = cp.Minimize(
cp.real(0.5 * cp.trace(X) + 0.5 * cp.trace(Y))
)
constraints = [
cp.bmat([[X, -K.conj().T], [-K, Y]]) >> 0,
X >> 0,
Y >> 0,
]
problem = cp.Problem(objective, constraints)
sol_scs = problem.solve(solver='SCS')
self.assertEqual(constraints[0].dual_value.shape, (4, 4))
self.assertEqual(constraints[1].dual_value.shape, (2, 2))
self.assertEqual(constraints[2].dual_value.shape, (2, 2))
self.assertAlmostEqual(sol_scs, n1)
def test_kl_div(self):
"""Test a problem with kl_div.
"""
kK = 50
kSeed = 10
prng = np.random.RandomState(kSeed)
# Generate a random reference distribution
npSPriors = prng.uniform(0.0, 1.0, (kK, 1))
npSPriors = npSPriors/sum(npSPriors)
# Reference distribution
p_refProb = cp.Parameter((kK, 1), nonneg=True)
# Distribution to be estimated
v_prob = cp.Variable((kK, 1))
objkl = 0.0
for k in range(kK):
objkl += cp.kl_div(v_prob[k, 0], p_refProb[k, 0])
constrs = [sum(v_prob[k, 0] for k in range(kK)) == 1]
klprob = cp.Problem(cp.Minimize(objkl), constrs)
p_refProb.value = npSPriors
klprob.solve(solver=cp.SCS)
self.assertItemsAlmostEqual(v_prob.value, npSPriors)
def test_entr(self):
"""Test a problem with entr.
"""
for n in [5, 10, 25]:
print(n)
x = cp.Variable(n)
obj = cp.Maximize(cp.sum(cp.entr(x)))
p = cp.Problem(obj, [cp.sum(x) == 1])
p.solve(solver=cp.SCS)
self.assertItemsAlmostEqual(x.value, n*[1./n])
def test_exp(self):
"""Test a problem with exp.
"""
for n in [5, 10, 25]:
print(n)
x = cp.Variable(n)
obj = cp.Minimize(cp.sum(cp.exp(x)))
p = cp.Problem(obj, [cp.sum(x) == 1])
p.solve(solver=cp.SCS)
self.assertItemsAlmostEqual(x.value, n*[1./n])
def test_log(self):
"""Test a problem with log.
"""
for n in [5, 10, 25]:
print(n)
x = cp.Variable(n)
obj = cp.Maximize(cp.sum(cp.log(x)))
p = cp.Problem(obj, [cp.sum(x) == 1])
p.solve(solver=cp.SCS)
self.assertItemsAlmostEqual(x.value, n*[1./n])
def test_solve_problem_twice(self):
"""Test a problem with log.
"""
n = 5
x = cp.Variable(n)
obj = cp.Maximize(cp.sum(cp.log(x)))
p = cp.Problem(obj, [cp.sum(x) == 1])
p.solve(solver=cp.SCS)
first_value = x.value
self.assertItemsAlmostEqual(first_value, n*[1./n])
p.solve(solver=cp.SCS)
second_value = x.value
self.assertItemsAlmostEqual(first_value, second_value)
def test_warm_start(self):
"""Test warm starting.
"""
x = cp.Variable(10)
obj = cp.Minimize(cp.sum(cp.exp(x)))
prob = cp.Problem(obj, [cp.sum(x) == 1])
result = prob.solve(solver=cp.SCS, eps=1e-4)
time = prob.solver_stats.solve_time
result2 = prob.solve(solver=cp.SCS, warm_start=True, eps=1e-4)
time2 = prob.solver_stats.solve_time
self.assertAlmostEqual(result2, result, places=2)
print(time > time2)
def test_warm_start_diffcp(self):
"""Test warm starting in diffcvx.
"""
try:
import diffcp
diffcp # for flake8
except ImportError:
self.skipTest("diffcp not installed.")
x = cp.Variable(10)
obj = cp.Minimize(cp.sum(cp.exp(x)))
prob = cp.Problem(obj, [cp.sum(x) == 1])
result = prob.solve(solver=cp.DIFFCP, eps=1e-4)
result2 = prob.solve(solver=cp.DIFFCP, warm_start=True, eps=1e-4)
self.assertAlmostEqual(result2, result, places=2)
def test_psd_constraint(self):
"""Test PSD constraint.
"""
s = cp.Variable((2, 2))
obj = cp.Maximize(cp.minimum(s[0, 1], 10))
const = [s >> 0, cp.diag(s) == np.ones(2)]
prob = cp.Problem(obj, const)
r = prob.solve(solver=cp.SCS)
s = s.value
print(const[0].residual)
print("value", r)
print("s", s)
print("eigs", np.linalg.eig(s + s.T)[0])
eigs = np.linalg.eig(s + s.T)[0]
self.assertEqual(np.all(eigs >= 0), True)
def test_scs_lp_3(self):
StandardTestLPs.test_lp_3(solver='SCS')
def test_scs_lp_4(self):
StandardTestLPs.test_lp_4(solver='SCS')
def test_scs_socp_1(self):
StandardTestSOCPs.test_socp_1(solver='SCS')
def test_scs_sdp_1min(self):
StandardTestSDPs.test_sdp_1min(solver='SCS')
def test_scs_expcone_1(self):
StandardTestECPs.test_expcone_1(solver='SCS')
@unittest.skipUnless('MOSEK' in INSTALLED_SOLVERS, 'MOSEK is not installed.')
class TestMosek(unittest.TestCase):
def test_mosek_lp_0(self):
StandardTestLPs.test_lp_0(solver='MOSEK')
def test_mosek_lp_1(self):
# default settings
StandardTestLPs.test_lp_1(solver='MOSEK')
# require a basic feasible solution
StandardTestLPs.test_lp_1(solver='MOSEK', places=6, bfs=True)
def test_mosek_lp_2(self):
StandardTestLPs.test_lp_2(solver='MOSEK')
def test_mosek_lp_3(self):
StandardTestLPs.test_lp_3(solver='MOSEK')
def test_mosek_lp_4(self):
StandardTestLPs.test_lp_4(solver='MOSEK')
def test_mosek_socp_0(self):
StandardTestSOCPs.test_socp_0(solver='MOSEK')
def test_mosek_socp_1(self):
StandardTestSOCPs.test_socp_1(solver='MOSEK')
def test_mosek_socp_2(self):
StandardTestSOCPs.test_socp_2(solver='MOSEK')
def test_mosek_sdp_1(self):
# minimization
StandardTestSDPs.test_sdp_1min(solver='MOSEK')
# maximization
StandardTestSDPs.test_sdp_1max(solver='MOSEK')
def test_mosek_expcone_1(self):
StandardTestECPs.test_expcone_1(solver='MOSEK')
def test_mosek_mi_lp_0(self):
StandardTestLPs.test_mi_lp_0(solver='MOSEK')
def test_mosek_mi_lp_1(self):
StandardTestLPs.test_mi_lp_1(solver='MOSEK')
def test_mosek_mi_lp_2(self):
StandardTestLPs.test_mi_lp_2(solver='MOSEK')
def test_mosek_mi_socp_1(self):
StandardTestSOCPs.test_mi_socp_1(solver='MOSEK')
def test_mosek_mi_socp_2(self):
StandardTestSOCPs.test_mi_socp_2(solver='MOSEK')
def test_mosek_params(self):
if cp.MOSEK in INSTALLED_SOLVERS:
import mosek
n = 10
m = 4
A = np.random.randn(m, n)
x = np.random.randn(n)
y = A.dot(x)
# Solve a simple basis pursuit problem for testing purposes.
z = cp.Variable(n)
objective = cp.Minimize(cp.norm1(z))
constraints = [A @ z == y]
problem = cp.Problem(objective, constraints)
invalid_mosek_params = {
"dparam.basis_tol_x": "1e-8"
}
with self.assertRaises(ValueError):
problem.solve(solver=cp.MOSEK, mosek_params=invalid_mosek_params)
with self.assertRaises(ValueError):
problem.solve(solver=cp.MOSEK, invalid_kwarg=None)
mosek_params = {
mosek.dparam.basis_tol_x: 1e-8,
"MSK_IPAR_INTPNT_MAX_ITERATIONS": 20
}
problem.solve(solver=cp.MOSEK, mosek_params=mosek_params)
@unittest.skipUnless('SUPER_SCS' in INSTALLED_SOLVERS, 'SUPER_SCS is not installed.')
class TestSuperSCS(BaseTest):
def setUp(self):
self.x = cp.Variable(2, name='x')
self.y = cp.Variable(2, name='y')
self.A = cp.Variable((2, 2), name='A')
self.B = cp.Variable((2, 2), name='B')
self.C = cp.Variable((3, 2), name='C')
# Overriden method to assume lower accuracy.
def assertItemsAlmostEqual(self, a, b, places=2):
super(TestSCS, self).assertItemsAlmostEqual(a, b, places=places)
# Overriden method to assume lower accuracy.
def assertAlmostEqual(self, a, b, places=2):
super(TestSCS, self).assertAlmostEqual(a, b, places=places)
def test_super_scs_lp_0(self):
StandardTestLPs.test_lp_0(solver='SUPER_SCS')
def test_super_scs_lp_1(self):
StandardTestLPs.test_lp_1(solver='SUPER_SCS')
def test_super_scs_lp_2(self):
StandardTestLPs.test_lp_2(solver='SUPER_SCS')
def test_super_scs_lp_3(self):
StandardTestLPs.test_lp_3(solver='SUPER_SCS')
def test_super_scs_lp_4(self):
StandardTestLPs.test_lp_4(solver='SUPER_SCS')
def test_super_scs_socp_0(self):
StandardTestSOCPs.test_socp_0(solver='SUPER_SCS')
def test_super_scs_socp_1(self):
StandardTestSOCPs.test_socp_1(solver='SUPER_SCS')
def test_super_scs_socp_2(self):
StandardTestSOCPs.test_socp_2(solver='SUPER_SCS')
def test_super_scs_sdp_1(self):
# minimization
StandardTestSDPs.test_sdp_1min(solver='SUPER_SCS')
# maximization
StandardTestSDPs.test_sdp_1max(solver='SUPER_SCS')
def test_super_scs_expcone_1(self):
StandardTestECPs.test_expcone_1(solver='SUPER_SCS')
def test_warm_start(self):
if cp.SUPER_SCS in INSTALLED_SOLVERS:
x = cp.Variable(10)
obj = cp.Minimize(cp.sum(cp.exp(x)))
prob = cp.Problem(obj, [cp.sum(x) == 1])
result = prob.solve(solver='SUPER_SCS', eps=1e-4)
result2 = prob.solve(solver='SUPER_SCS', warm_start=True, eps=1e-4)
self.assertAlmostEqual(result2, result, places=2)
@unittest.skipUnless('CVXOPT' in INSTALLED_SOLVERS, 'CVXOPT is not installed.')
class TestCVXOPT(BaseTest):
def setUp(self):
self.a = cp.Variable(name='a')
self.b = cp.Variable(name='b')
self.c = cp.Variable(name='c')
self.x = cp.Variable(2, name='x')
self.y = cp.Variable(3, name='y')
self.z = cp.Variable(2, name='z')
self.A = cp.Variable((2, 2), name='A')
self.B = cp.Variable((2, 2), name='B')
self.C = cp.Variable((3, 2), name='C')
def test_cvxopt_options(self):
"""Test that all the CVXOPT solver options work.
"""
# TODO race condition when changing these values.
# 'maxiters'
# maximum number of iterations (default: 100).
# 'abstol'
# absolute accuracy (default: 1e-7).
# 'reltol'
# relative accuracy (default: 1e-6).
# 'feastol'
# tolerance for feasibility conditions (default: 1e-7).
# 'refinement'
# number of iterative refinement steps when solving KKT equations
# (default: 0 if the problem has no second-order cone
# or matrix inequality constraints; 1 otherwise).
if cp.CVXOPT in INSTALLED_SOLVERS:
EPS = 1e-7
prob = cp.Problem(cp.Minimize(cp.norm(self.x, 1) + 1.0), [self.x == 0])
for i in range(2):
prob.solve(solver=cp.CVXOPT, feastol=EPS, abstol=EPS, reltol=EPS,
max_iters=20, verbose=True, kktsolver="chol",
refinement=2, warm_start=True)
self.assertAlmostEqual(prob.value, 1.0)
self.assertItemsAlmostEqual(self.x.value, [0, 0])
def test_cvxopt_lp_0(self):
StandardTestLPs.test_lp_0(solver='CVXOPT')
def test_cvxopt_lp_1(self):
StandardTestLPs.test_lp_1(solver='CVXOPT')
def test_cvxopt_lp_2(self):
StandardTestLPs.test_lp_2(solver='CVXOPT')
def test_cvxopt_lp_3(self):
StandardTestLPs.test_lp_3(solver='CVXOPT')
def test_cvxopt_lp_4(self):
# default settings
StandardTestLPs.test_lp_4(solver='CVXOPT')
# without a CVXPY-based presolve
StandardTestLPs.test_lp_4(solver='CVXOPT', kktsolver='robust')
def test_cvxopt_socp_0(self):
StandardTestSOCPs.test_socp_0(solver='CVXOPT')
def test_cvxopt_socp_1(self):
StandardTestSOCPs.test_socp_1(solver='CVXOPT')
def test_cvxopt_socp_2(self):
StandardTestSOCPs.test_socp_2(solver='CVXOPT')
def test_cvxopt_sdp_1(self):
# minimization
StandardTestSDPs.test_sdp_1min(solver='CVXOPT')
# maximization
StandardTestSDPs.test_sdp_1max(solver='CVXOPT')
@unittest.skipUnless('CBC' in INSTALLED_SOLVERS, 'CBC is not installed.')
class TestCBC(BaseTest):
def setUp(self):
self.a = cp.Variable(name='a')
self.b = cp.Variable(name='b')
self.c = cp.Variable(name='c')
self.x = cp.Variable(2, name='x')
self.y = cp.Variable(3, name='y')
self.z = cp.Variable(2, name='z')
self.A = cp.Variable((2, 2), name='A')
self.B = cp.Variable((2, 2), name='B')
self.C = cp.Variable((3, 2), name='C')
def test_options(self):
"""Test that all the cvx.CBC solver options work.
"""
prob = cp.Problem(cp.Minimize(cp.norm(self.x, 1)),
[self.x == cp.Variable(2, boolean=True)])
if cp.CBC in INSTALLED_SOLVERS:
for i in range(2):
# Some cut-generators seem to be buggy for now -> set to false
# prob.solve(solver=cvx.CBC, verbose=True, GomoryCuts=True, MIRCuts=True,
# MIRCuts2=True, TwoMIRCuts=True, ResidualCapacityCuts=True,
# KnapsackCuts=True, FlowCoverCuts=True, CliqueCuts=True,
# LiftProjectCuts=True, AllDifferentCuts=False, OddHoleCuts=True,
# RedSplitCuts=False, LandPCuts=False, PreProcessCuts=False,
# ProbingCuts=True, SimpleRoundingCuts=True)
prob.solve(solver=cp.CBC, verbose=True, maximumSeconds=100)
self.assertItemsAlmostEqual(self.x.value, [0, 0])
else:
with self.assertRaises(Exception) as cm:
prob.solve(solver=cp.CBC)
self.assertEqual(str(cm.exception), "The solver %s is not installed." % cp.CBC)
def test_cbc_lp_0(self):
StandardTestLPs.test_lp_0(solver='CBC', duals=False)
def test_cbc_lp_1(self):
StandardTestLPs.test_lp_1(solver='CBC', duals=False)
def test_cbc_lp_2(self):
StandardTestLPs.test_lp_2(solver='CBC', duals=False)
def test_cbc_lp_3(self):
StandardTestLPs.test_lp_3(solver='CBC')
def test_cbc_lp_4(self):
StandardTestLPs.test_lp_4(solver='CBC')
def test_cbc_mi_lp_0(self):
StandardTestLPs.test_mi_lp_0(solver='CBC')
def test_cbc_mi_lp_1(self):
StandardTestLPs.test_mi_lp_1(solver='CBC')
def test_cbc_mi_lp_2(self):
StandardTestLPs.test_mi_lp_2(solver='CBC')
@unittest.skipUnless('GLPK' in INSTALLED_SOLVERS, 'GLPK is not installed.')
class TestGLPK(unittest.TestCase):
def test_glpk_lp_0(self):
StandardTestLPs.test_lp_0(solver='GLPK')
def test_glpk_lp_1(self):
StandardTestLPs.test_lp_1(solver='GLPK')
def test_glpk_lp_2(self):
StandardTestLPs.test_lp_2(solver='GLPK')
def test_glpk_lp_3(self):
StandardTestLPs.test_lp_3(solver='GLPK')
def test_glpk_lp_4(self):
StandardTestLPs.test_lp_4(solver='GLPK')
def test_glpk_mi_lp_0(self):
StandardTestLPs.test_mi_lp_0(solver='GLPK_MI')
def test_glpk_mi_lp_1(self):
StandardTestLPs.test_mi_lp_1(solver='GLPK_MI')
def test_glpk_mi_lp_2(self):
StandardTestLPs.test_mi_lp_2(solver='GLPK_MI')
@unittest.skipUnless('CPLEX' in INSTALLED_SOLVERS, 'CPLEX is not installed.')
class TestCPLEX(BaseTest):
""" Unit tests for solver specific behavior. """
def setUp(self):
self.a = cp.Variable(name='a')
self.b = cp.Variable(name='b')
self.c = cp.Variable(name='c')
self.x = cp.Variable(2, name='x')
self.y = cp.Variable(3, name='y')
self.z = cp.Variable(2, name='z')
self.A = cp.Variable((2, 2), name='A')
self.B = cp.Variable((2, 2), name='B')
self.C = cp.Variable((3, 2), name='C')
def test_cplex_warm_start(self):
"""Make sure that warm starting CPLEX behaves as expected
Note: This only checks output, not whether or not CPLEX is warm starting internally
"""
if cp.CPLEX in INSTALLED_SOLVERS:
A = cp.Parameter((2, 2))
b = cp.Parameter(2)
h = cp.Parameter(2)
c = cp.Parameter(2)
A.value = np.array([[1, 0], [0, 0]])
b.value = np.array([1, 0])
h.value = np.array([2, 2])
c.value = np.array([1, 1])
objective = cp.Maximize(c[0] * self.x[0] + c[1] * self.x[1])
constraints = [self.x[0] <= h[0],
self.x[1] <= h[1],
A @ self.x == b]
prob = cp.Problem(objective, constraints)
result = prob.solve(solver=cp.CPLEX, warm_start=True)
self.assertEqual(result, 3)
self.assertItemsAlmostEqual(self.x.value, [1, 2])
# Change A and b from the original values
A.value = np.array([[0, 0], [0, 1]]) # <----- Changed
b.value = np.array([0, 1]) # <----- Changed
h.value = np.array([2, 2])
c.value = np.array([1, 1])
# Without setting update_eq_constrs = False,
# the results should change to the correct answer
result = prob.solve(solver=cp.CPLEX, warm_start=True)
self.assertEqual(result, 3)
self.assertItemsAlmostEqual(self.x.value, [2, 1])
# Change h from the original values
A.value = np.array([[1, 0], [0, 0]])
b.value = np.array([1, 0])
h.value = np.array([1, 1]) # <----- Changed
c.value = np.array([1, 1])
# Without setting update_ineq_constrs = False,
# the results should change to the correct answer
result = prob.solve(solver=cp.CPLEX, warm_start=True)
self.assertEqual(result, 2)
self.assertItemsAlmostEqual(self.x.value, [1, 1])
# Change c from the original values
A.value = np.array([[1, 0], [0, 0]])
b.value = np.array([1, 0])
h.value = np.array([2, 2])
c.value = np.array([2, 1]) # <----- Changed
# Without setting update_objective = False,
# the results should change to the correct answer
result = prob.solve(solver=cp.CPLEX, warm_start=True)
self.assertEqual(result, 4)
self.assertItemsAlmostEqual(self.x.value, [1, 2])
else:
with self.assertRaises(Exception) as cm:
prob = cp.Problem(cp.Minimize(cp.norm(self.x, 1)), [self.x == 0])
prob.solve(solver=cp.CPLEX, warm_start=True)
self.assertEqual(str(cm.exception), "The solver %s is not installed." % cp.CPLEX)
def test_cplex_params(self):
if cp.CPLEX in INSTALLED_SOLVERS:
n, m = 10, 4
A = np.random.randn(m, n)
x = np.random.randn(n)
y = A.dot(x)
# Solve a simple basis pursuit problem for testing purposes.
z = cp.Variable(n)
objective = cp.Minimize(cp.norm1(z))
constraints = [A @ z == y]
problem = cp.Problem(objective, constraints)
invalid_cplex_params = {
"bogus": "foo"
}
with self.assertRaises(ValueError):
problem.solve(solver=cp.CPLEX,
cplex_params=invalid_cplex_params)
with self.assertRaises(ValueError):
problem.solve(solver=cp.CPLEX, invalid_kwarg=None)
cplex_params = {
"advance": 0, # int param
"simplex.limits.iterations": 1000, # long param
"timelimit": 1000.0, # double param
"workdir": '"mydir"', # string param
}
problem.solve(solver=cp.CPLEX, cplex_params=cplex_params)
def test_cplex_lp_0(self):
StandardTestLPs.test_lp_0(solver='CPLEX')
def test_cplex_lp_1(self):
StandardTestLPs.test_lp_1(solver='CPLEX')
def test_cplex_lp_2(self):
StandardTestLPs.test_lp_2(solver='CPLEX')
def test_cplex_lp_3(self):
StandardTestLPs.test_lp_3(solver='CPLEX')
def test_cplex_lp_4(self):
StandardTestLPs.test_lp_4(solver='CPLEX')
def test_cplex_socp_0(self):
StandardTestSOCPs.test_socp_0(solver='CPLEX')
def test_cplex_socp_1(self):
StandardTestSOCPs.test_socp_1(solver='CPLEX')
def test_cplex_socp_2(self):
StandardTestSOCPs.test_socp_2(solver='CPLEX')
def test_cplex_mi_lp_0(self):
StandardTestLPs.test_mi_lp_0(solver='CPLEX')
def test_cplex_mi_lp_1(self):
StandardTestLPs.test_mi_lp_1(solver='CPLEX')
def test_cplex_mi_lp_2(self):
StandardTestLPs.test_mi_lp_2(solver='CPLEX')
def test_cplex_mi_socp_1(self):
StandardTestSOCPs.test_mi_socp_1(solver='CPLEX', places=3)
def test_cplex_mi_socp_2(self):
StandardTestSOCPs.test_mi_socp_2(solver='CPLEX')
@unittest.skipUnless('GUROBI' in INSTALLED_SOLVERS, 'GUROBI is not installed.')
class TestGUROBI(BaseTest):
""" Unit tests for solver specific behavior. """
def setUp(self):
self.a = cp.Variable(name='a')
self.b = cp.Variable(name='b')
self.c = cp.Variable(name='c')
self.x = cp.Variable(2, name='x')
self.y = cp.Variable(3, name='y')
self.z = cp.Variable(2, name='z')
self.A = cp.Variable((2, 2), name='A')
self.B = cp.Variable((2, 2), name='B')
self.C = cp.Variable((3, 2), name='C')
def test_gurobi_warm_start(self):
"""Make sure that warm starting Gurobi behaves as expected
Note: This only checks output, not whether or not Gurobi is warm starting internally
"""
if cp.GUROBI in INSTALLED_SOLVERS:
import numpy as np
A = cp.Parameter((2, 2))
b = cp.Parameter(2)
h = cp.Parameter(2)
c = cp.Parameter(2)
A.value = np.array([[1, 0], [0, 0]])
b.value = np.array([1, 0])
h.value = np.array([2, 2])
c.value = np.array([1, 1])
objective = cp.Maximize(c[0] * self.x[0] + c[1] * self.x[1])
constraints = [self.x[0] <= h[0],
self.x[1] <= h[1],
A @ self.x == b]
prob = cp.Problem(objective, constraints)
result = prob.solve(solver=cp.GUROBI, warm_start=True)
self.assertEqual(result, 3)
self.assertItemsAlmostEqual(self.x.value, [1, 2])
# Change A and b from the original values
A.value = np.array([[0, 0], [0, 1]]) # <----- Changed
b.value = np.array([0, 1]) # <----- Changed
h.value = np.array([2, 2])
c.value = np.array([1, 1])
# Without setting update_eq_constrs = False,
# the results should change to the correct answer
result = prob.solve(solver=cp.GUROBI, warm_start=True)
self.assertEqual(result, 3)
self.assertItemsAlmostEqual(self.x.value, [2, 1])
# Change h from the original values
A.value = np.array([[1, 0], [0, 0]])
b.value = np.array([1, 0])
h.value = np.array([1, 1]) # <----- Changed
c.value = np.array([1, 1])
# Without setting update_ineq_constrs = False,
# the results should change to the correct answer
result = prob.solve(solver=cp.GUROBI, warm_start=True)
self.assertEqual(result, 2)
self.assertItemsAlmostEqual(self.x.value, [1, 1])
# Change c from the original values
A.value = np.array([[1, 0], [0, 0]])
b.value = np.array([1, 0])
h.value = np.array([2, 2])
c.value = np.array([2, 1]) # <----- Changed
# Without setting update_objective = False,
# the results should change to the correct answer
result = prob.solve(solver=cp.GUROBI, warm_start=True)
self.assertEqual(result, 4)
self.assertItemsAlmostEqual(self.x.value, [1, 2])
else:
with self.assertRaises(Exception) as cm:
prob = cp.Problem(cp.Minimize(cp.norm(self.x, 1)), [self.x == 0])
prob.solve(solver=cp.GUROBI, warm_start=True)
self.assertEqual(str(cm.exception), "The solver %s is not installed." % cp.GUROBI)
def test_gurobi_lp_0(self):
StandardTestLPs.test_lp_0(solver='GUROBI')
def test_gurobi_lp_1(self):
StandardTestLPs.test_lp_1(solver='GUROBI')
def test_gurobi_lp_2(self):
StandardTestLPs.test_lp_2(solver='GUROBI')
def test_gurobi_lp_3(self):
StandardTestLPs.test_lp_3(solver='GUROBI')
def test_gurobi_lp_4(self):
StandardTestLPs.test_lp_4(solver='GUROBI')
def test_gurobi_socp_0(self):
StandardTestSOCPs.test_socp_0(solver='GUROBI')
def test_gurobi_socp_1(self):
StandardTestSOCPs.test_socp_1(solver='GUROBI')
def test_gurobi_socp_2(self):
StandardTestSOCPs.test_socp_2(solver='GUROBI')
def test_gurobi_mi_lp_0(self):
StandardTestLPs.test_mi_lp_0(solver='GUROBI')
def test_gurobi_mi_lp_1(self):
StandardTestLPs.test_mi_lp_1(solver='GUROBI')
def test_gurobi_mi_lp_2(self):
StandardTestLPs.test_mi_lp_2(solver='GUROBI')
def test_gurobi_mi_socp_1(self):
StandardTestSOCPs.test_mi_socp_1(solver='GUROBI', places=3)
def test_gurobi_mi_socp_2(self):
StandardTestSOCPs.test_mi_socp_2(solver='GUROBI')
@unittest.skipUnless('XPRESS' in INSTALLED_SOLVERS, 'EXPRESS is not installed.')
class TestXPRESS(unittest.TestCase):
def test_xpress_lp_0(self):
StandardTestLPs.test_lp_0(solver='XPRESS')
def test_xpress_lp_1(self):
StandardTestLPs.test_lp_1(solver='XPRESS')
def test_xpress_lp_2(self):
StandardTestLPs.test_lp_2(solver='XPRESS')
def test_xpress_lp_3(self):
StandardTestLPs.test_lp_3(solver='XPRESS')
def test_xpress_lp_4(self):
StandardTestLPs.test_lp_4(solver='XPRESS')
def test_xpress_socp_0(self):
StandardTestSOCPs.test_socp_0(solver='XPRESS')
def test_xpress_socp_1(self):
StandardTestSOCPs.test_socp_1(solver='XPRESS')
def test_xpress_socp_2(self):
StandardTestSOCPs.test_socp_2(solver='XPRESS')
def test_xpress_mi_lp_0(self):
StandardTestLPs.test_mi_lp_0(solver='XPRESS')
def test_xpress_mi_lp_1(self):
StandardTestLPs.test_mi_lp_1(solver='XPRESS')
def test_xpress_mi_lp_2(self):
StandardTestLPs.test_mi_lp_2(solver='XPRESS')
def test_xpress_mi_socp_1(self):
StandardTestSOCPs.test_mi_socp_1(solver='XPRESS')
def test_xpress_mi_socp_2(self):
StandardTestSOCPs.test_mi_socp_2(solver='XPRESS')
class TestAllSolvers(BaseTest):
def setUp(self):
self.a = cp.Variable(name='a')
self.b = cp.Variable(name='b')
self.c = cp.Variable(name='c')
self.x = cp.Variable(2, name='x')
self.y = cp.Variable(3, name='y')
self.z = cp.Variable(2, name='z')
self.A = cp.Variable((2, 2), name='A')
self.B = cp.Variable((2, 2), name='B')
self.C = cp.Variable((3, 2), name='C')
def test_installed_solvers(self):
"""Test the list of installed solvers.
"""
from cvxpy.reductions.solvers.defines import (SOLVER_MAP_CONIC, SOLVER_MAP_QP,
INSTALLED_SOLVERS)
prob = cp.Problem(cp.Minimize(cp.norm(self.x, 1) + 1.0), [self.x == 0])
for solver in SOLVER_MAP_CONIC.keys():
if solver in INSTALLED_SOLVERS:
prob.solve(solver=solver)
self.assertAlmostEqual(prob.value, 1.0)
self.assertItemsAlmostEqual(self.x.value, [0, 0])
else:
with self.assertRaises(Exception) as cm:
prob.solve(solver=solver)
self.assertEqual(str(cm.exception), "The solver %s is not installed." % solver)
for solver in SOLVER_MAP_QP.keys():
if solver in INSTALLED_SOLVERS:
prob.solve(solver=solver)
self.assertItemsAlmostEqual(self.x.value, [0, 0])
else:
with self.assertRaises(Exception) as cm:
prob.solve(solver=solver)
self.assertEqual(str(cm.exception), "The solver %s is not installed." % solver)
| 35.466995 | 95 | 0.598267 |
06fec392ffe6ed6240b2e3e8b401a61ab7d28ff8 | 1,586 | py | Python | apps/tweets/tests/test_web_api.py | thinkAmi/dj_ringo_tabetter | 9169f8e26cf4a85ed4efb7fcda1f0dd763a8cebb | [
"MIT"
] | null | null | null | apps/tweets/tests/test_web_api.py | thinkAmi/dj_ringo_tabetter | 9169f8e26cf4a85ed4efb7fcda1f0dd763a8cebb | [
"MIT"
] | 9 | 2020-02-11T21:44:18.000Z | 2022-02-10T09:19:42.000Z | apps/tweets/tests/test_web_api.py | thinkAmi/dj_ringo_tabetter | 9169f8e26cf4a85ed4efb7fcda1f0dd763a8cebb | [
"MIT"
] | null | null | null | import os
from typing import List
from unittest.mock import Mock
import pytest
def _is_lack_of_environment_variables(environment_variable_names: List[str]) -> bool:
""" 環境変数が不足しているかをチェック
:param environment_variable_names: 対象の環境名変数リスト
:return: 不足している場合、True
"""
for name in environment_variable_names:
if not os.environ.get(name):
return True
return False
class TestSlack:
""" Slack APIを使ったテスト """
def test_log(self, slack, capsys):
from apps.tweets.management.commands.gather_tweets import Command
if _is_lack_of_environment_variables(['SLACK_TOKEN', 'SLACK_CHANNEL']):
pytest.skip('必要な環境変数が設定されていません')
log_text = "test post"
sut = Command()
sut.log(log_text) # この時点でSlackに「test post」と投稿される
actual = capsys.readouterr()
assert actual.out == f"{log_text}\n", "標準出力へは改行コード付で出力されること"
class TestTwitter:
""" Twitter APIを使ったテスト """
def test_gather_tweets(self, twitter):
from apps.tweets.management.commands import gather_tweets
if _is_lack_of_environment_variables(['USER_ID', 'TWITTER_CONSUMER_KEY', 'TWITTER_CONSUMER_SECRET']):
pytest.skip('必要な環境変数が設定されていません')
gather_tweets.TWEET_COUNT = 3 # テストで200件は多いので、差し替える
sut = gather_tweets.Command()
sut.last_search = Mock()
sut.last_search.prev_since_id = twitter # コマンドラインから与えた status_id で検索
actual = sut.gather_tweets()
assert len(actual) == 3
assert actual[0].id > actual[1].id > actual[2].id, "idの降順に並んでいること"
| 29.37037 | 109 | 0.679697 |
211dd4e2b4b0371b0b39cf075805c01ea9c9ad75 | 2,407 | py | Python | project/apps/registration/fields.py | dbinetti/barberscore | 13c3d8193834bd2bb79922e28d3f5ab1675bdffd | [
"BSD-2-Clause"
] | 13 | 2017-08-07T15:45:49.000Z | 2019-07-03T13:58:50.000Z | project/apps/registration/fields.py | barberscore/barberscore-api | 2aa9f8598c18c28ba1d4a294f76fd055619f803e | [
"BSD-2-Clause"
] | 309 | 2017-07-14T02:34:12.000Z | 2022-01-14T21:37:02.000Z | project/apps/registration/fields.py | dbinetti/barberscore-django | 16fbd9945becda0a765bbdf52ad459a63655128f | [
"BSD-2-Clause"
] | 5 | 2017-08-07T14:01:07.000Z | 2019-06-24T19:44:55.000Z |
# Standard Library
import os
# Third-Party
import pytz
import six
from rest_framework_json_api import serializers
# Django
from django.core.exceptions import ValidationError
from django.db.models import EmailField
from django.utils.deconstruct import deconstructible
from django.contrib.postgres.fields import ArrayField
from django.forms import MultipleChoiceField
@deconstructible
class UploadPath(object):
def __init__(self, name):
self.name = name
def __call__(self, instance, filename):
return os.path.join(
instance._meta.app_label,
instance._meta.model_name,
self.name,
str(instance.id),
)
class LowerEmailField(EmailField):
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value is not None:
value = value.lower()
return value
class TimezoneField(serializers.Field):
def to_representation(self, obj):
return six.text_type(obj)
def to_internal_value(self, data):
try:
return pytz.timezone(str(data))
except pytz.exceptions.UnknownTimeZoneError:
raise ValidationError('Unknown timezone')
class DivisionsField(ArrayField):
def formfield(self, **kwargs):
defaults = {
'form_class': MultipleChoiceField,
'choices': self.base_field.choices,
}
defaults.update(kwargs)
# Skip our parent's formfield implementation completely as we don't
# care for it.
# pylint:disable=bad-super-call
return super(ArrayField, self).formfield(**defaults)
def to_python(self, value):
res = super().to_python(value)
if isinstance(res, list):
value = [self.base_field.to_python(val) for val in res]
return value
class DistrictField(ArrayField):
def formfield(self, **kwargs):
defaults = {
'choices': self.base_field.choices,
}
defaults.update(kwargs)
# Skip our parent's formfield implementation completely as we don't
# care for it.
# pylint:disable=bad-super-call
return super(ArrayField, self).formfield(**defaults)
def to_python(self, value):
res = super().to_python(value)
if isinstance(res, list):
value = [self.base_field.to_python(val) for val in res]
return value
| 27.666667 | 75 | 0.651849 |
a5bce036ba17bd4b3d238a73cd3f7dd39d32df4a | 5,577 | py | Python | ursina/build.py | cezidev/ursina | d51c666eae42e64b03ec6c71871e5197c9ebbbab | [
"MIT"
] | 1 | 2020-09-04T14:32:33.000Z | 2020-09-04T14:32:33.000Z | ursina/build.py | Noah-shn/ursina | d51c666eae42e64b03ec6c71871e5197c9ebbbab | [
"MIT"
] | null | null | null | ursina/build.py | Noah-shn/ursina | d51c666eae42e64b03ec6c71871e5197c9ebbbab | [
"MIT"
] | 1 | 2020-09-04T14:32:41.000Z | 2020-09-04T14:32:41.000Z | from modulefinder import ModuleFinder
import os
import sys
import shutil
from shutil import copy, copyfile
from distutils.dir_util import copy_tree
from pathlib import Path
import time
def copytree(src, dst, symlinks=False, ignore=None):
src = str(src)
dst = str(dst)
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
try:
shutil.copytree(s, d, symlinks, ignore)
except Exception as e:
print(e)
else:
if s.endswith('.psd'):
continue
shutil.copy2(s, d)
if len(sys.argv) > 1:
if sys.argv[1] == 'help' or sys.argv[1] == '--help':
print(
'''package ursina application for windows10.
provided with project folder path, creates a build folder where
it copies python and project's dependent packages. requires a main.py file.
copies game scripts and assets into 'build/scr' folder.
creates a .bat file to start the game.'''
)
sys.exit()
ignore = list()
for i, arg in enumerate(sys.argv):
if arg == '--ignore':
for j in range(i, len(sys.argv)):
ignore.append(sys.argv[j])
print('ignoring', sys.argv[j])
break
if len(sys.argv) > 1:
project_folder = Path(sys.argv[1])
else:
project_folder = Path.cwd()
build_folder = Path(project_folder / 'build')
if build_folder.exists():
msg = f'Build folder {build_folder} already exists. \nProceed to delete and overwrite?'
overwrite = input("%s (y/N) " % msg).lower() == 'y'
if not overwrite:
print('stopped building')
exit()
print('deleting existing build folder')
shutil.rmtree(str(build_folder))
print('building project:', project_folder)
start_time = time.time()
python_dest = Path(build_folder / 'python')
python_dlls_dest = Path(build_folder / 'python/DLLs')
python_lib_dest = Path(build_folder / 'python/Lib')
src_dest = Path(build_folder / 'src')
build_folder.mkdir()
python_dest.mkdir()
python_dlls_dest.mkdir()
python_lib_dest.mkdir()
src_dest.mkdir()
# def copy_python():
# copy files in python installation folder, but not the folders
print('copying python')
python_folder = Path(sys.executable).parent
[copy(str(f), str(python_dest / f.name)) for f in python_folder.iterdir() if f.is_file()]
# def copy_python_lib():
print('copying python Lib files')
[copy(str(f), str(python_lib_dest / f.name)) for f in Path(python_folder / 'Lib').iterdir() if f.is_file()]
# def copy_always_included():
print('copying always included')
always_include = (
'Lib/collections', 'Lib/ctypes', 'Lib/encodings',
'Lib/importlib', 'Lib/urllib', 'Lib/logging',
'Lib/site-packages/panda3d/etc', 'Lib/site-packages/panda3d/__init__.py',
'Lib/site-packages/panda3d/cg.dll', 'Lib/site-packages/panda3d/cgGL.dll',
'Lib/site-packages/panda3d/libp3direct.dll', 'Lib/site-packages/panda3d/libp3dtool.dll',
'Lib/site-packages/panda3d/libp3dtoolconfig.dll', 'Lib/site-packages/panda3d/libp3interrogatedb.dll',
'Lib/site-packages/panda3d/libp3openal_audio.dll','Lib/site-packages/panda3d/libp3windisplay.dll',
'Lib/site-packages/panda3d/libpanda.dll','Lib/site-packages/panda3d/libpandaegg.dll',
'Lib/site-packages/panda3d/libpandaexpress.dll','Lib/site-packages/panda3d/libpandagl.dll',
)
for path in always_include:
source = python_folder / path
dest = python_dest / path
print('copying:', source)
if source.is_file():
dest.parent.mkdir(parents=True, exist_ok=True)
copy(str(source), str(dest))
elif source.is_dir():
dest.mkdir(parents=True, exist_ok=True)
copytree(source, dest)
# def copy_ursina():
print('copying ursina')
import importlib
spec = importlib.util.find_spec('ursina')
ursina_path = Path(spec.origin).parent
dest = build_folder / 'python/Lib/site-packages/ursina'
dest.mkdir(parents=True, exist_ok=True)
copytree(ursina_path, dest)
print('copying found modules')
finder = ModuleFinder()
finder.run_script(str(project_folder) + '/main.py')
for name, mod in finder.modules.items():
filename = mod.__file__
if filename is None:
continue
if '__' in name:
print('ignore:', filename)
continue
if 'Python' in filename and 'DLLs' in filename:
print('copying:', filename)
copy(filename, str(build_folder / 'python/DLLs'))
elif 'lib\\site-packages\\' in filename:
print('copying:', filename)
forward_slashed = filename.split('lib\\site-packages\\')[1].replace('\\', '/')
dir = build_folder / 'python/lib/site-packages' / forward_slashed
dir.parent.mkdir(parents=True, exist_ok=True)
copy(filename, dir)
print('copying assets')
for f in project_folder.iterdir():
name = f.name
dest = Path(src_dest / f.name)
if name in ['.git', '__pycache__', 'build', '.gitignore'] + ignore:
print('ignore:', f)
continue
elif f.is_dir():
print('copying assetfolder:', f, 'to', dest)
dest.mkdir(parents=True, exist_ok=True)
copytree(project_folder / f, dest, ignore=shutil.ignore_patterns('*psd'))
elif f.is_file():
print('copying asset:', f, 'to', src_dest / f.name)
copy(str(f), str(dest))
print('creating .bat file')
with Path(build_folder / 'run.bat').open('w') as f:
f.write('''start "" "%CD%\python\python.exe" %CD%\src\main.py''')
print('build complete! time elapsed:', time.time() - start_time)
| 31.868571 | 107 | 0.65806 |
007f9396a3f83735759d37ebd51dce309272b13a | 61 | py | Python | models/__init__.py | juanmc2005/MetricAMI | 8cb9fbe8dcf5303f1b44007f03492e065e867caf | [
"MIT"
] | 5 | 2020-05-25T09:27:08.000Z | 2021-10-01T09:11:46.000Z | models/__init__.py | juanmc2005/MetricAMI | 8cb9fbe8dcf5303f1b44007f03492e065e867caf | [
"MIT"
] | null | null | null | models/__init__.py | juanmc2005/MetricAMI | 8cb9fbe8dcf5303f1b44007f03492e065e867caf | [
"MIT"
] | 1 | 2021-10-15T04:00:25.000Z | 2021-10-15T04:00:25.000Z | from .base import MetricNet
from .ami import AMILSTM, AMIBert | 30.5 | 33 | 0.819672 |
030ce6cbdf6bd27aec7236db8da2db885f1d63c3 | 1,001 | py | Python | src/doc/help2man_preformat.py | rmv/oiio | f9b45b96ec032fc94023f67433ca9bb97ba1ec5d | [
"BSD-3-Clause-Clear",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/doc/help2man_preformat.py | rmv/oiio | f9b45b96ec032fc94023f67433ca9bb97ba1ec5d | [
"BSD-3-Clause-Clear",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/doc/help2man_preformat.py | rmv/oiio | f9b45b96ec032fc94023f67433ca9bb97ba1ec5d | [
"BSD-3-Clause-Clear",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# Format the output from various oiio command line "$tool --help" invocations,
# and munge such that txt2man generates a simple man page with not-too-horrible
# formatting.
from __future__ import print_function
from __future__ import absolute_import
import sys
lines = [l.rstrip().replace('\t', ' '*8) for l in sys.stdin.readlines()]
print('TITLE')
print(lines[0])
print()
print('SYNOPSIS')
for i,line in enumerate(lines[2:]):
if line.lstrip().startswith('-') or line.lstrip().startswith('Options'):
optStart = i+2
break
print(line)
print('''DESCRIPTION
This program is part of the OpenImageIO (http://www.openimageio.org) tool suite.
Detailed documentation is avaliable in pdf format with the OpenImageIO
distribution.
''')
print('OPTIONS')
for line in lines[optStart:]:
if not line.startswith(' '):
print()
print(line)
elif not line.lstrip().startswith('-'):
print(line.lstrip())
else:
print(line)
print()
| 25.025 | 80 | 0.685315 |
8bd1767154cf34be5d653c7bb2323b720bccb790 | 19,603 | py | Python | grit/tool/build.py | lhopps/grit-i18n | 4e6ce0db3bd8d674851a74a7415be17ae95247e2 | [
"BSD-2-Clause"
] | null | null | null | grit/tool/build.py | lhopps/grit-i18n | 4e6ce0db3bd8d674851a74a7415be17ae95247e2 | [
"BSD-2-Clause"
] | null | null | null | grit/tool/build.py | lhopps/grit-i18n | 4e6ce0db3bd8d674851a74a7415be17ae95247e2 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''The 'grit build' tool along with integration for this tool with the
SCons build system.
'''
import filecmp
import getopt
import os
import shutil
import sys
from grit import grd_reader
from grit import util
from grit.tool import interface
from grit import shortcuts
# It would be cleaner to have each module register itself, but that would
# require importing all of them on every run of GRIT.
'''Map from <output> node types to modules under grit.format.'''
_format_modules = {
'android': 'android_xml',
'c_format': 'c_format',
'chrome_messages_json': 'chrome_messages_json',
'data_package': 'data_pack',
'js_map_format': 'js_map_format',
'rc_all': 'rc',
'rc_translateable': 'rc',
'rc_nontranslateable': 'rc',
'rc_header': 'rc_header',
'resource_map_header': 'resource_map',
'resource_map_source': 'resource_map',
'resource_file_map_source': 'resource_map',
}
_format_modules.update(
(type, 'policy_templates.template_formatter') for type in
[ 'adm', 'admx', 'adml', 'reg', 'doc', 'json',
'plist', 'plist_strings', 'ios_plist', 'android_policy' ])
def GetFormatter(type):
modulename = 'grit.format.' + _format_modules[type]
__import__(modulename)
module = sys.modules[modulename]
try:
return module.Format
except AttributeError:
return module.GetFormatter(type)
class RcBuilder(interface.Tool):
'''A tool that builds RC files and resource header files for compilation.
Usage: grit build [-o OUTPUTDIR] [-D NAME[=VAL]]*
All output options for this tool are specified in the input file (see
'grit help' for details on how to specify the input file - it is a global
option).
Options:
-a FILE Assert that the given file is an output. There can be
multiple "-a" flags listed for multiple outputs. If a "-a"
or "--assert-file-list" argument is present, then the list
of asserted files must match the output files or the tool
will fail. The use-case is for the build system to maintain
separate lists of output files and to catch errors if the
build system's list and the grit list are out-of-sync.
--assert-file-list Provide a file listing multiple asserted output files.
There is one file name per line. This acts like specifying
each file with "-a" on the command line, but without the
possibility of running into OS line-length limits for very
long lists.
-o OUTPUTDIR Specify what directory output paths are relative to.
Defaults to the current directory.
-D NAME[=VAL] Specify a C-preprocessor-like define NAME with optional
value VAL (defaults to 1) which will be used to control
conditional inclusion of resources.
-E NAME=VALUE Set environment variable NAME to VALUE (within grit).
-f FIRSTIDSFILE Path to a python file that specifies the first id of
value to use for resources. A non-empty value here will
override the value specified in the <grit> node's
first_ids_file.
-w WHITELISTFILE Path to a file containing the string names of the
resources to include. Anything not listed is dropped.
-t PLATFORM Specifies the platform the build is targeting; defaults
to the value of sys.platform. The value provided via this
flag should match what sys.platform would report for your
target platform; see grit.node.base.EvaluateCondition.
-h HEADERFORMAT Custom format string to use for generating rc header files.
The string should have two placeholders: {textual_id}
and {numeric_id}. E.g. "#define {textual_id} {numeric_id}"
Otherwise it will use the default "#define SYMBOL 1234"
--output-all-resource-defines
--no-output-all-resource-defines If specified, overrides the value of the
output_all_resource_defines attribute of the root <grit>
element of the input .grd file.
--write-only-new flag
If flag is non-0, write output files to a temporary file
first, and copy it to the real output only if the new file
is different from the old file. This allows some build
systems to realize that dependent build steps might be
unnecessary, at the cost of comparing the output data at
grit time.
--depend-on-stamp
If specified along with --depfile and --depdir, the depfile
generated will depend on a stampfile instead of the first
output in the input .grd file.
Conditional inclusion of resources only affects the output of files which
control which resources get linked into a binary, e.g. it affects .rc files
meant for compilation but it does not affect resource header files (that define
IDs). This helps ensure that values of IDs stay the same, that all messages
are exported to translation interchange files (e.g. XMB files), etc.
'''
def ShortDescription(self):
return 'A tool that builds RC files for compilation.'
def Run(self, opts, args):
self.output_directory = '.'
first_ids_file = None
whitelist_filenames = []
assert_output_files = []
target_platform = None
depfile = None
depdir = None
rc_header_format = None
output_all_resource_defines = None
write_only_new = False
depend_on_stamp = False
(own_opts, args) = getopt.getopt(args, 'a:o:D:E:f:w:t:h:',
('depdir=','depfile=','assert-file-list=',
'output-all-resource-defines',
'no-output-all-resource-defines',
'depend-on-stamp',
'write-only-new='))
for (key, val) in own_opts:
if key == '-a':
assert_output_files.append(val)
elif key == '--assert-file-list':
with open(val) as f:
assert_output_files += f.read().splitlines()
elif key == '-o':
self.output_directory = val
elif key == '-D':
name, val = util.ParseDefine(val)
self.defines[name] = val
elif key == '-E':
(env_name, env_value) = val.split('=', 1)
os.environ[env_name] = env_value
elif key == '-f':
# TODO(joi@chromium.org): Remove this override once change
# lands in WebKit.grd to specify the first_ids_file in the
# .grd itself.
first_ids_file = val
elif key == '-w':
whitelist_filenames.append(val)
elif key == '--output-all-resource-defines':
output_all_resource_defines = True
elif key == '--no-output-all-resource-defines':
output_all_resource_defines = False
elif key == '-t':
target_platform = val
elif key == '-h':
rc_header_format = val
elif key == '--depdir':
depdir = val
elif key == '--depfile':
depfile = val
elif key == '--write-only-new':
write_only_new = val != '0'
elif key == '--depend-on-stamp':
depend_on_stamp = True
if len(args):
print 'This tool takes no tool-specific arguments.'
return 2
self.SetOptions(opts)
if self.scons_targets:
self.VerboseOut('Using SCons targets to identify files to output.\n')
else:
self.VerboseOut('Output directory: %s (absolute path: %s)\n' %
(self.output_directory,
os.path.abspath(self.output_directory)))
if whitelist_filenames:
self.whitelist_names = set()
for whitelist_filename in whitelist_filenames:
self.VerboseOut('Using whitelist: %s\n' % whitelist_filename);
whitelist_contents = util.ReadFile(whitelist_filename, util.RAW_TEXT)
self.whitelist_names.update(whitelist_contents.strip().split('\n'))
self.write_only_new = write_only_new
self.res = grd_reader.Parse(opts.input,
debug=opts.extra_verbose,
first_ids_file=first_ids_file,
defines=self.defines,
target_platform=target_platform)
# If the output_all_resource_defines option is specified, override the value
# found in the grd file.
if output_all_resource_defines is not None:
self.res.SetShouldOutputAllResourceDefines(output_all_resource_defines)
# Set an output context so that conditionals can use defines during the
# gathering stage; we use a dummy language here since we are not outputting
# a specific language.
self.res.SetOutputLanguage('en')
if rc_header_format:
self.res.AssignRcHeaderFormat(rc_header_format)
self.res.RunGatherers()
self.Process()
if assert_output_files:
if not self.CheckAssertedOutputFiles(assert_output_files):
return 2
if depfile and depdir:
self.GenerateDepfile(depfile, depdir, first_ids_file, depend_on_stamp)
return 0
def __init__(self, defines=None):
# Default file-creation function is built-in open(). Only done to allow
# overriding by unit test.
self.fo_create = open
# key/value pairs of C-preprocessor like defines that are used for
# conditional output of resources
self.defines = defines or {}
# self.res is a fully-populated resource tree if Run()
# has been called, otherwise None.
self.res = None
# Set to a list of filenames for the output nodes that are relative
# to the current working directory. They are in the same order as the
# output nodes in the file.
self.scons_targets = None
# The set of names that are whitelisted to actually be included in the
# output.
self.whitelist_names = None
# Whether to compare outputs to their old contents before writing.
self.write_only_new = False
@staticmethod
def AddWhitelistTags(start_node, whitelist_names):
# Walk the tree of nodes added attributes for the nodes that shouldn't
# be written into the target files (skip markers).
from grit.node import include
from grit.node import message
from grit.node import structure
for node in start_node:
# Same trick data_pack.py uses to see what nodes actually result in
# real items.
if (isinstance(node, include.IncludeNode) or
isinstance(node, message.MessageNode) or
isinstance(node, structure.StructureNode)):
text_ids = node.GetTextualIds()
# Mark the item to be skipped if it wasn't in the whitelist.
if text_ids and text_ids[0] not in whitelist_names:
node.SetWhitelistMarkedAsSkip(True)
@staticmethod
def ProcessNode(node, output_node, outfile):
'''Processes a node in-order, calling its formatter before and after
recursing to its children.
Args:
node: grit.node.base.Node subclass
output_node: grit.node.io.OutputNode
outfile: open filehandle
'''
base_dir = util.dirname(output_node.GetOutputFilename())
formatter = GetFormatter(output_node.GetType())
formatted = formatter(node, output_node.GetLanguage(), output_dir=base_dir)
outfile.writelines(formatted)
def Process(self):
# Update filenames with those provided by SCons if we're being invoked
# from SCons. The list of SCons targets also includes all <structure>
# node outputs, but it starts with our output files, in the order they
# occur in the .grd
if self.scons_targets:
assert len(self.scons_targets) >= len(self.res.GetOutputFiles())
outfiles = self.res.GetOutputFiles()
for ix in range(len(outfiles)):
outfiles[ix].output_filename = os.path.abspath(
self.scons_targets[ix])
else:
for output in self.res.GetOutputFiles():
output.output_filename = os.path.abspath(os.path.join(
self.output_directory, output.GetFilename()))
# If there are whitelisted names, tag the tree once up front, this way
# while looping through the actual output, it is just an attribute check.
if self.whitelist_names:
self.AddWhitelistTags(self.res, self.whitelist_names)
for output in self.res.GetOutputFiles():
self.VerboseOut('Creating %s...' % output.GetFilename())
# Microsoft's RC compiler can only deal with single-byte or double-byte
# files (no UTF-8), so we make all RC files UTF-16 to support all
# character sets.
if output.GetType() in ('rc_header', 'resource_map_header',
'resource_map_source', 'resource_file_map_source'):
encoding = 'cp1252'
elif output.GetType() in ('android', 'c_format', 'js_map_format', 'plist',
'plist_strings', 'doc', 'json', 'android_policy'):
encoding = 'utf_8'
elif output.GetType() in ('chrome_messages_json'):
# Chrome Web Store currently expects BOM for UTF-8 files :-(
encoding = 'utf-8-sig'
else:
# TODO(gfeher) modify here to set utf-8 encoding for admx/adml
encoding = 'utf_16'
# Set the context, for conditional inclusion of resources
self.res.SetOutputLanguage(output.GetLanguage())
self.res.SetOutputContext(output.GetContext())
self.res.SetFallbackToDefaultLayout(output.GetFallbackToDefaultLayout())
self.res.SetDefines(self.defines)
# Make the output directory if it doesn't exist.
self.MakeDirectoriesTo(output.GetOutputFilename())
# Write the results to a temporary file and only overwrite the original
# if the file changed. This avoids unnecessary rebuilds.
outfile = self.fo_create(output.GetOutputFilename() + '.tmp', 'wb')
if output.GetType() != 'data_package':
outfile = util.WrapOutputStream(outfile, encoding)
# Iterate in-order through entire resource tree, calling formatters on
# the entry into a node and on exit out of it.
with outfile:
self.ProcessNode(self.res, output, outfile)
# Now copy from the temp file back to the real output, but on Windows,
# only if the real output doesn't exist or the contents of the file
# changed. This prevents identical headers from being written and .cc
# files from recompiling (which is painful on Windows).
if not os.path.exists(output.GetOutputFilename()):
os.rename(output.GetOutputFilename() + '.tmp',
output.GetOutputFilename())
else:
# CHROMIUM SPECIFIC CHANGE.
# This clashes with gyp + vstudio, which expect the output timestamp
# to change on a rebuild, even if nothing has changed, so only do
# it when opted in.
if not self.write_only_new:
write_file = True
else:
files_match = filecmp.cmp(output.GetOutputFilename(),
output.GetOutputFilename() + '.tmp')
write_file = not files_match
if write_file:
shutil.copy2(output.GetOutputFilename() + '.tmp',
output.GetOutputFilename())
os.remove(output.GetOutputFilename() + '.tmp')
self.VerboseOut(' done.\n')
# Print warnings if there are any duplicate shortcuts.
warnings = shortcuts.GenerateDuplicateShortcutsWarnings(
self.res.UberClique(), self.res.GetTcProject())
if warnings:
print '\n'.join(warnings)
# Print out any fallback warnings, and missing translation errors, and
# exit with an error code if there are missing translations in a non-pseudo
# and non-official build.
warnings = (self.res.UberClique().MissingTranslationsReport().
encode('ascii', 'replace'))
if warnings:
self.VerboseOut(warnings)
if self.res.UberClique().HasMissingTranslations():
print self.res.UberClique().missing_translations_
sys.exit(-1)
def CheckAssertedOutputFiles(self, assert_output_files):
'''Checks that the asserted output files are specified in the given list.
Returns true if the asserted files are present. If they are not, returns
False and prints the failure.
'''
# Compare the absolute path names, sorted.
asserted = sorted([os.path.abspath(i) for i in assert_output_files])
actual = sorted([
os.path.abspath(os.path.join(self.output_directory, i.GetFilename()))
for i in self.res.GetOutputFiles()])
if asserted != actual:
missing = list(set(actual) - set(asserted))
extra = list(set(asserted) - set(actual))
error = '''Asserted file list does not match.
Expected output files:
%s
Actual output files:
%s
Missing output files:
%s
Extra output files:
%s
'''
print error % ('\n'.join(asserted), '\n'.join(actual), '\n'.join(missing),
'\n'.join(extra))
return False
return True
def GenerateDepfile(self, depfile, depdir, first_ids_file, depend_on_stamp):
'''Generate a depfile that contains the imlicit dependencies of the input
grd. The depfile will be in the same format as a makefile, and will contain
references to files relative to |depdir|. It will be put in |depfile|.
For example, supposing we have three files in a directory src/
src/
blah.grd <- depends on input{1,2}.xtb
input1.xtb
input2.xtb
and we run
grit -i blah.grd -o ../out/gen --depdir ../out --depfile ../out/gen/blah.rd.d
from the directory src/ we will generate a depfile ../out/gen/blah.grd.d
that has the contents
gen/blah.h: ../src/input1.xtb ../src/input2.xtb
Where "gen/blah.h" is the first output (Ninja expects the .d file to list
the first output in cases where there is more than one). If the flag
--depend-on-stamp is specified, "gen/blah.rd.d.stamp" will be used that is
'touched' whenever a new depfile is generated.
Note that all paths in the depfile are relative to ../out, the depdir.
'''
depfile = os.path.abspath(depfile)
depdir = os.path.abspath(depdir)
infiles = self.res.GetInputFiles()
# We want to trigger a rebuild if the first ids change.
if first_ids_file is not None:
infiles.append(first_ids_file)
if (depend_on_stamp):
output_file = depfile + ".stamp"
# Touch the stamp file before generating the depfile.
with open(output_file, 'a'):
os.utime(output_file, None)
else:
# Get the first output file relative to the depdir.
outputs = self.res.GetOutputFiles()
output_file = os.path.join(self.output_directory,
outputs[0].GetFilename())
output_file = os.path.relpath(output_file, depdir)
# The path prefix to prepend to dependencies in the depfile.
prefix = os.path.relpath(os.getcwd(), depdir)
deps_text = ' '.join([os.path.join(prefix, i) for i in infiles])
depfile_contents = output_file + ': ' + deps_text
self.MakeDirectoriesTo(depfile)
outfile = self.fo_create(depfile, 'wb')
outfile.writelines(depfile_contents)
@staticmethod
def MakeDirectoriesTo(file):
'''Creates directories necessary to contain |file|.'''
dir = os.path.split(file)[0]
if not os.path.exists(dir):
os.makedirs(dir)
| 39.284569 | 83 | 0.656328 |
b910140ff812bb62978f041cffbbb1f878a4c815 | 86 | py | Python | notion_export_enhancer/__main__.py | Cobertos/notion-export-enhancer | 07a34d4b3daeb1ec69cd4253c089ba4d9dc5bbc3 | [
"MIT"
] | 17 | 2021-01-28T11:03:47.000Z | 2022-02-10T22:51:44.000Z | notion_export_enhancer/__main__.py | wongsingfo/notion_export_enhancer | bb036640e95b37ba74c9184a0448f19e644dc90c | [
"MIT"
] | 3 | 2021-03-21T02:35:24.000Z | 2021-10-05T07:10:22.000Z | notion_export_enhancer/__main__.py | wongsingfo/notion_export_enhancer | bb036640e95b37ba74c9184a0448f19e644dc90c | [
"MIT"
] | 4 | 2021-03-21T03:39:36.000Z | 2022-01-19T07:05:14.000Z | import sys
from .enhancer import cli
if __name__ == "__main__":
cli(sys.argv[1:]) | 17.2 | 26 | 0.686047 |
fc45e6f3d8acb520eafaaa2fb3fbce6c424370c7 | 5,995 | py | Python | Machine_Learning/Feature_Tutorials/01-freezing_a_keras_model/files/train_save.py | mkolod/Vitis-Tutorials | 33d6cf9686398ef1179778dc0da163291c68b465 | [
"Apache-2.0"
] | 1 | 2022-03-15T22:07:18.000Z | 2022-03-15T22:07:18.000Z | Machine_Learning/Feature_Tutorials/01-freezing_a_keras_model/files/train_save.py | mkolod/Vitis-Tutorials | 33d6cf9686398ef1179778dc0da163291c68b465 | [
"Apache-2.0"
] | null | null | null | Machine_Learning/Feature_Tutorials/01-freezing_a_keras_model/files/train_save.py | mkolod/Vitis-Tutorials | 33d6cf9686398ef1179778dc0da163291c68b465 | [
"Apache-2.0"
] | null | null | null | '''
Copyright 2020 Xilinx Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
CIFAR10 example using Keras
Runs for 5 epochs only
Demonstrates the 3 methods for saving Keras models
Method 1 - as a HDF5 checkpoint during training
Method 2 - as an HDF5 model (identical results to method #1)
Method 3 - as separate weights (HDF5) and architecture (JSON) files
'''
'''
Author: Mark Harvey
'''
import os
import sys
import shutil
import numpy as np
import keras
from keras import datasets, utils, layers, models, optimizers
from keras.callbacks import ModelCheckpoint
##############################################
# Set up directories
##############################################
# Returns the directory the current script (or interpreter) is running in
def get_script_directory():
path = os.path.realpath(sys.argv[0])
if os.path.isdir(path):
return path
else:
return os.path.dirname(path)
print('\n------------------------------------')
print('Keras version :',keras.__version__)
print('Python version :',(sys.version))
print('------------------------------------')
SCRIPT_DIR = get_script_directory()
print('This script is located in: ', SCRIPT_DIR)
METHOD1_DIR = os.path.join(SCRIPT_DIR, 'method1')
METHOD2_DIR = os.path.join(SCRIPT_DIR, 'method2')
METHOD3_DIR = os.path.join(SCRIPT_DIR, 'method3')
# create a directory for saving if it doesn't already exist
# delete it and recreate if it already exists
if (os.path.exists(METHOD1_DIR)):
shutil.rmtree(METHOD1_DIR)
os.makedirs(METHOD1_DIR)
print("Directory " , METHOD1_DIR , "created ")
if (os.path.exists(METHOD2_DIR)):
shutil.rmtree(METHOD2_DIR)
os.makedirs(METHOD2_DIR)
print("Directory " , METHOD2_DIR , "created ")
if (os.path.exists(METHOD3_DIR)):
shutil.rmtree(METHOD3_DIR)
os.makedirs(METHOD3_DIR)
print("Directory " , METHOD3_DIR , "created ")
#####################################################
# Hyperparameters
#####################################################
BATCHSIZE = 25
EPOCHS = 5
LEARN_RATE = 0.0001
DECAY_RATE = 1e-6
##############################################
# Preparation of input dataset
##############################################
# CIFAR10 datset has 60k images. Training set is 50k, test set is 10k.
# Each image is 32x32 pixels RGB
(X_train, Y_train), (X_test, Y_test) = datasets.cifar10.load_data()
# Scale image data from range 0:255 to range 0:1
X_train = X_train / 255.0
X_test = X_test / 255.0
# one-hot encode the labels
Y_train = utils.to_categorical(Y_train)
Y_test = utils.to_categorical(Y_test)
##############################################
# Simple custom CNN Keras functional model
##############################################
inputs = layers.Input(shape=(32, 32, 3))
net = layers.Conv2D(32, kernel_size=(3, 3), padding='same')(inputs)
net = layers.Activation('relu')(net)
net = layers.BatchNormalization()(net)
net = layers.MaxPooling2D(pool_size=(2,2))(net)
net = layers.Conv2D(64, kernel_size=(3, 3), padding='same')(net)
net = layers.Activation('relu')(net)
net = layers.BatchNormalization()(net)
net = layers.MaxPooling2D(pool_size=(2,2))(net)
net = layers.Flatten()(net)
net = layers.Dropout(0.4)(net)
net = layers.Dense(512)(net)
net = layers.Activation('relu')(net)
net = layers.Dropout(0.4)(net)
net = layers.Dense(10)(net)
prediction = layers.Activation('softmax')(net)
model = models.Model(inputs=inputs, outputs=prediction)
##############################################
# Compile model
##############################################
model.compile(loss='categorical_crossentropy',
optimizer=optimizers.Adam(lr=LEARN_RATE, decay=DECAY_RATE),
metrics=['accuracy']
)
##############################################
# Method #1 - save a checkpoint during training
##############################################
# set up the checkpoint callback
chkpt_callback = ModelCheckpoint(os.path.join(METHOD1_DIR,'keras_chkpt.h5'),
monitor='val_accuracy',
verbose=1,
save_best_only=True,
mode='max')
callback_list = [chkpt_callback]
# Train model with training set
model.fit(X_train,
Y_train,
batch_size=BATCHSIZE,
shuffle=True,
epochs=EPOCHS,
validation_data=(X_test, Y_test),
callbacks = callback_list
)
##############################################
# Evaluate model accuracy with test set
##############################################
scores = model.evaluate(X_test,
Y_test,
batch_size=BATCHSIZE
)
print('Loss: %.3f' % scores[0])
print('Accuracy: %.3f' % scores[1])
##############################################
# Method #2 - as single HDF5 file
##############################################
# save weights, model architecture & optimizer to an HDF5 format file
model.save(os.path.join(METHOD2_DIR,'k_complete_model.h5'))
##############################################
# Method #3 - as JSON and HDF5 files
##############################################
# save just the weights (no architecture) to an HDF5 format file
model.save_weights(os.path.join(METHOD3_DIR,'k_model_weights.h5'))
# save just the architecture (no weights) to a JSON file
with open(os.path.join(METHOD3_DIR,'k_model_architecture.json'), 'w') as f:
f.write(model.to_json())
print ('FINISHED!')
| 28.961353 | 76 | 0.585988 |
697e256ad1634e00ee451ec75b455c7a76204e0f | 3,146 | py | Python | challengenews/tests/test_serializers.py | scpaes/jungledevs-challenge | 188bc7a4aacbb278a9486c57685db53be0477d51 | [
"MIT"
] | null | null | null | challengenews/tests/test_serializers.py | scpaes/jungledevs-challenge | 188bc7a4aacbb278a9486c57685db53be0477d51 | [
"MIT"
] | 6 | 2021-08-10T02:19:35.000Z | 2021-08-10T02:24:05.000Z | challengenews/tests/test_serializers.py | scpaes/jungledevs-challenge | 188bc7a4aacbb278a9486c57685db53be0477d51 | [
"MIT"
] | null | null | null | from django.test import TestCase
from rest_framework import serializers
from django.contrib.auth.models import User
from challengenews.serializers import *
from challengenews.models import *
class UserSerializerTestCase(TestCase):
"""
Teste unitário.
Classe para teste do serializer da model user
"""
def setUp(self) -> None:
self.user = User(
username='BB08',
password='12345678'
)
self.serializer = UserSerializer(instance=self.user)
def test_user_serializer_set_fields(self):
"""
User serializer fields set test.
"""
data = self.serializer.data
self.assertEqual(set(data.keys()), set(['username', 'password']))
def test_user_serializer_fields(self):
"""
User serializer fields test.
"""
data = self.serializer.data
self.assertEqual(self.user.username, data['username'])
self.assertEqual(self.user.password, data['password'])
class ArticlesSerializerTestCase(TestCase):
"""
Teste unitário
Classe para teste do serializer da model articles.
"""
def setUp(self) -> None:
author = Authors(
name='Autor'
)
self.article = Articles(
author=author,
category='Category',
title='Title',
summary='Summary',
first_paragraph='First Paragraph'
)
self.serializer = ArticleSerializer(instance=self.article)
def test_articles_serializer_set_fields(self):
"""
Article serializer fields set test.
"""
data = self.serializer.data
self.assertEqual(set(data.keys()), set(
['id', 'author', 'category', 'title', 'summary', 'first_paragraph', 'slug', 'body']))
def test_articles_serializer_fields(self):
"""
Article serializer fields test.
"""
data = self.serializer.data
self.assertEqual(self.article.author.id, data['author']['id'])
self.assertEqual(self.article.category, data['category'])
self.assertEqual(self.article.title, data['title'])
self.assertEqual(self.article.summary, data['summary'])
self.assertEqual(self.article.first_paragraph,
data['first_paragraph'])
class AuthorSerializerTestCase(TestCase):
"""
Teste unitário.
CLasse para teste do serializer da model authors.
"""
def setUp(self) -> None:
self.author = Authors(
name='Author'
)
self.serializer = AuthorSerializer(instance=self.author)
def test_author_serializer_set_fields(self):
"""
Author serializer fields set test.
"""
data = self.serializer.data
self.assertEqual(
set(data.keys()), set(['id', 'name', 'picture'])
)
def test_author_serializer_fields(self):
"""
Author serializer fields test.
"""
data = self.serializer.data
self.assertEqual(self.author.name, data['name'])
self.assertEqual(self.author.id, data['id'])
| 28.342342 | 97 | 0.602352 |
53c2930a2fd5f0c62418bf6ca75818a0a43809b8 | 650 | py | Python | listings/listings/posts/migrations/0003_auto_20201111_1505.py | AAM77/listings_backend | 7ed0950e54e12b737b46b56b82653c2842fdf6cf | [
"MIT"
] | null | null | null | listings/listings/posts/migrations/0003_auto_20201111_1505.py | AAM77/listings_backend | 7ed0950e54e12b737b46b56b82653c2842fdf6cf | [
"MIT"
] | null | null | null | listings/listings/posts/migrations/0003_auto_20201111_1505.py | AAM77/listings_backend | 7ed0950e54e12b737b46b56b82653c2842fdf6cf | [
"MIT"
] | null | null | null | # Generated by Django 3.0.11 on 2020-11-11 20:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0002_auto_20201110_1619'),
]
operations = [
migrations.AlterField(
model_name='language',
name='listing',
field=models.ManyToManyField(blank=True, null=True, related_name='languages', to='posts.Post'),
),
migrations.AlterField(
model_name='tool',
name='listing',
field=models.ManyToManyField(blank=True, null=True, related_name='tools', to='posts.Post'),
),
]
| 27.083333 | 107 | 0.603077 |
b750d6db0a082a62112ec172013e9c7e24a7b4bb | 2,996 | py | Python | examples/od8_const_vel_two_agent.py | grzPat/bark | 807092815c81eeb23defff473449a535a9c42f8b | [
"MIT"
] | null | null | null | examples/od8_const_vel_two_agent.py | grzPat/bark | 807092815c81eeb23defff473449a535a9c42f8b | [
"MIT"
] | null | null | null | examples/od8_const_vel_two_agent.py | grzPat/bark | 807092815c81eeb23defff473449a535a9c42f8b | [
"MIT"
] | null | null | null | # Copyright (c) 2019 fortiss GmbH
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import numpy as np
import time
import os
from bark.world.agent import *
from bark.models.behavior import *
from bark.world import *
from bark.world.map import *
from bark.models.dynamic import *
from bark.models.execution import *
from bark.geometry import *
from bark.geometry.standard_shapes import *
from modules.runtime.commons.parameters import ParameterServer
from modules.runtime.viewer.pygame_viewer import PygameViewer
# from modules.runtime.viewer.matplotlib_viewer import MPViewer
from modules.runtime.commons.xodr_parser import XodrParser
# Parameters Definitions
param_server = ParameterServer()
# World Definition
world = World(param_server)
# Model Definitions
behavior_model = BehaviorConstantVelocity(param_server)
execution_model = ExecutionModelInterpolate(param_server)
dynamic_model = SingleTrackModel()
behavior_model2 = BehaviorConstantVelocity(param_server)
execution_model2 = ExecutionModelInterpolate(param_server)
dynamic_model2 = SingleTrackModel()
# Map Definition
xodr_parser = XodrParser("modules/runtime/tests/data/Crossing8Course.xodr")
map_interface = MapInterface()
map_interface.set_open_drive_map(xodr_parser.map)
map_interface.set_roadgraph(xodr_parser.roadgraph)
world.set_map(map_interface)
# Agent Definition
agent_2d_shape = CarLimousine()
init_state = np.array([0, -11, -8, 3.14*3.0/4.0, 50/3.6])
agent_params = param_server.addChild("agent1")
agent1 = Agent(init_state,
behavior_model,
dynamic_model,
execution_model,
agent_2d_shape,
agent_params,
2,
map_interface)
world.add_agent(agent1)
agent_2d_shape2 = CarLimousine()
init_state2 = np.array([0, -11, -8, 3.14*3.0/4.0, 5.2])
agent_params2 = param_server.addChild("agent2")
agent2 = Agent(init_state2,
behavior_model2,
dynamic_model2,
execution_model2,
agent_2d_shape2,
agent_params2,
2,
map_interface)
world.add_agent(agent2)
# viewer
viewer = PygameViewer(params=param_server, x_range=[-200, 200], y_range=[-200, 200])
# World Simulation
sim_step_time = param_server["simulation"]["step_time",
"Step-time used in simulation",
0.05]
sim_real_time_factor = param_server["simulation"]["real_time_factor",
"execution in real-time or faster",
1]
for _ in range(0, 30):
world.step(sim_step_time)
viewer.drawWorld(world)
viewer.show(block=False)
time.sleep(sim_step_time/sim_real_time_factor)
param_server.save(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"params",
"od8_const_vel_one_agent_written.json")) | 32.923077 | 85 | 0.685915 |
be3c337dd605cb0ea8b605c922fc5c1286356939 | 551 | py | Python | mail/index.py | VanOvermeire/twittersearch | 7326a1e623c4bb5cb0931805fd1e24544b17ade3 | [
"MIT"
] | null | null | null | mail/index.py | VanOvermeire/twittersearch | 7326a1e623c4bb5cb0931805fd1e24544b17ade3 | [
"MIT"
] | null | null | null | mail/index.py | VanOvermeire/twittersearch | 7326a1e623c4bb5cb0931805fd1e24544b17ade3 | [
"MIT"
] | null | null | null | import boto3
from twittersearchhelpers import twitter_ses, twitter_s3
# doing this outside the lambda is better for performance
s3_client = boto3.client('s3')
ses_client = boto3.client('ses')
def my_handler(event, context):
bucket, key = twitter_s3.extract_bucket_and_key_from_event(event)
url = twitter_s3.generate_url(s3_client, bucket, key)
email, tag = twitter_s3.extract_email_and_tag_from_audio_key(key)
twitter_ses.send_email(ses_client, email, tag, url)
return {
'message': 'mail dispatched to ' + email
}
| 29 | 69 | 0.749546 |
142d4367fd8537a0a8830b288066404606fcf74c | 148 | py | Python | customcalendar/models/__init__.py | IgalMilman/DnDHelper | 334822a489e7dc2b5ae17230e5c068b89c6c5d10 | [
"MIT"
] | null | null | null | customcalendar/models/__init__.py | IgalMilman/DnDHelper | 334822a489e7dc2b5ae17230e5c068b89c6c5d10 | [
"MIT"
] | null | null | null | customcalendar/models/__init__.py | IgalMilman/DnDHelper | 334822a489e7dc2b5ae17230e5c068b89c6c5d10 | [
"MIT"
] | null | null | null | from customcalendar.models import calendarsettings
from customcalendar.models import calendarevent
from customcalendar.models import permissionevent | 49.333333 | 50 | 0.905405 |
5a5fe2adedf7c38ec307479484b3e1f4391b153d | 4,612 | py | Python | mnemonics-training/2_eval/models/resnet_cifar.py | mhd-medfa/class-incremental-learning | c7c0a217d07b285f215672b3021beee52d4ef74f | [
"MIT"
] | 241 | 2020-10-12T08:53:17.000Z | 2022-03-30T02:39:38.000Z | mnemonics-training/2_eval/models/resnet_cifar.py | mhd-medfa/class-incremental-learning | c7c0a217d07b285f215672b3021beee52d4ef74f | [
"MIT"
] | 29 | 2020-12-21T02:46:57.000Z | 2022-03-24T07:51:58.000Z | mnemonics-training/2_eval/models/resnet_cifar.py | mhd-medfa/class-incremental-learning | c7c0a217d07b285f215672b3021beee52d4ef74f | [
"MIT"
] | 40 | 2020-10-14T07:54:31.000Z | 2022-03-29T16:54:38.000Z | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from utils.incremental.conv2d_mtl import Conv2d
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
import pdb
pdb.set_trace()
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=10):
self.inplanes = 16
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 16, layers[0])
self.layer2 = self._make_layer(block, 32, layers[1], stride=2)
self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(8, stride=1)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet20(pretrained=False, **kwargs):
n = 3
model = ResNet(BasicBlock, [n, n, n], **kwargs)
return model
def resnet32(pretrained=False, **kwargs):
n = 5
model = ResNet(BasicBlock, [n, n, n], **kwargs)
return model
def resnet56(pretrained=False, **kwargs):
n = 9
model = ResNet(Bottleneck, [n, n, n], **kwargs)
return model
| 30.543046 | 90 | 0.584562 |
6a7044dec305dfb0134040000c653d423aba6585 | 33,610 | py | Python | mtgjson4/compile_mtg.py | rohitmusti/mtgjson | ed2273dc1de736267aac7f81e26af857acad24be | [
"MIT"
] | null | null | null | mtgjson4/compile_mtg.py | rohitmusti/mtgjson | ed2273dc1de736267aac7f81e26af857acad24be | [
"MIT"
] | null | null | null | mtgjson4/compile_mtg.py | rohitmusti/mtgjson | ed2273dc1de736267aac7f81e26af857acad24be | [
"MIT"
] | null | null | null | """Compile incoming data into the target output format."""
import contextvars
import copy
import json
import logging
import multiprocessing
import os
import pathlib
import re
from typing import Any, Dict, List, Optional, Set, Tuple
from mkmsdk.api_map import _API_MAP
from mkmsdk.mkm import Mkm
import mtgjson4
from mtgjson4 import mtgjson_card
from mtgjson4.mtgjson_card import MTGJSONCard
from mtgjson4.provider import gatherer, mtgstocks, scryfall, tcgplayer, wizards
from mtgjson4.util import is_number
LOGGER = logging.getLogger(__name__)
SESSION: contextvars.ContextVar = contextvars.ContextVar("SESSION")
MKM_SET_CARDS: contextvars.ContextVar = contextvars.ContextVar("MKM_SET_CARDS")
MKM_API: contextvars.ContextVar = contextvars.ContextVar("MKM_API")
def build_output_file(
sf_cards: List[Dict[str, Any]], set_code: str, skip_keys: bool
) -> Dict[str, Any]:
"""
Compile the entire XYZ.json file and pass it off to be written out
:param skip_keys: Skip building TCGPlayer & MKM components
:param sf_cards: Scryfall cards
:param set_code: Set code
:return: Completed JSON file
"""
if not skip_keys and os.environ["MKM_APP_TOKEN"] and os.environ["MKM_APP_SECRET"]:
MKM_API.set(Mkm(_API_MAP["2.0"]["api"], _API_MAP["2.0"]["api_root"]))
output_file: Dict[str, Any] = {}
# Get the set config from Scryfall
set_config = scryfall.download(scryfall.SCRYFALL_API_SETS + set_code)
if set_config["object"] == "error":
LOGGER.error("Set Config for {} was not found, skipping...".format(set_code))
return {"cards": [], "tokens": []}
output_file["name"] = set_config["name"]
output_file["code"] = set_config["code"].upper()
output_file["releaseDate"] = set_config["released_at"]
output_file["type"] = set_config["set_type"]
output_file["keyruneCode"] = (
pathlib.Path(set_config["icon_svg_uri"]).name.split(".")[0].upper()
)
# Try adding MKM Set Name
# Then store the card data for future pulling
if MKM_API.get(None):
mkm_resp = MKM_API.get().market_place.expansions(game=1)
if mkm_resp.status_code != 200:
LOGGER.error("Unable to download MKM correctly: {}".format(mkm_resp))
else:
for set_content in mkm_resp.json()["expansion"]:
if (
set_content["enName"].lower() == output_file["name"].lower()
or set_content["abbreviation"].lower()
== output_file["code"].lower()
):
output_file["mcmId"] = set_content["idExpansion"]
output_file["mcmName"] = set_content["enName"]
break
initialize_mkm_set_cards(output_file.get("mcmId", None))
# Add translations to the files
try:
output_file["translations"] = wizards.get_translations(output_file["code"])
except KeyError:
LOGGER.warning("Unable to find set translations for {}".format(set_code))
# Add optionals if they exist
if "mtgo_code" in set_config.keys():
output_file["mtgoCode"] = set_config["mtgo_code"].upper()
if "parent_set_code" in set_config.keys():
output_file["parentCode"] = set_config["parent_set_code"].upper()
if "block" in set_config.keys():
output_file["block"] = set_config["block"]
if "digital" in set_config.keys():
output_file["isOnlineOnly"] = set_config["digital"]
if "foil_only" in set_config.keys():
output_file["isFoilOnly"] = set_config["foil_only"]
if set_code.upper() in mtgjson4.NON_ENGLISH_SETS:
output_file["isForeignOnly"] = True
# Add booster info based on boosters resource (manually maintained for the time being)
with mtgjson4.RESOURCE_PATH.joinpath("boosters.json").open(
"r", encoding="utf-8"
) as f:
json_dict: Dict[str, List[Any]] = json.load(f)
if output_file["code"] in json_dict.keys():
output_file["boosterV3"] = json_dict[output_file["code"].upper()]
# Add V3 code for some backwards compatibility
with mtgjson4.RESOURCE_PATH.joinpath("gatherer_set_codes.json").open(
"r", encoding="utf-8"
) as f:
json_dict = json.load(f)
if output_file["code"] in json_dict.keys():
output_file["codeV3"] = json_dict[output_file["code"]]
# Declare the version of the build in the output file
output_file["meta"] = {
"version": mtgjson4.__VERSION__,
"date": mtgjson4.__VERSION_DATE__,
"pricesDate": mtgjson4.__PRICE_UPDATE_DATE__,
}
LOGGER.info("Starting cards for {}".format(set_code))
card_holder: List[MTGJSONCard] = convert_to_mtgjson(sf_cards)
card_holder = add_start_flag_and_count_modified(
set_code, set_config["search_uri"], card_holder
)
# Address duplicates in un-sets
card_holder = uniquify_duplicates_in_set(card_holder)
# Move bogus tokens out
card_holder, added_tokens = transpose_tokens(card_holder)
if not skip_keys:
# Add MTGStocks data in
card_holder = add_stocks_data(card_holder)
# Add TCGPlayer information
if "tcgplayer_id" in set_config.keys():
output_file["tcgplayerGroupId"] = set_config["tcgplayer_id"]
if not skip_keys:
add_purchase_fields(output_file["tcgplayerGroupId"], card_holder)
# Set Sizes
output_file["baseSetSize"] = scryfall.get_base_set_size(set_code.upper())
output_file["totalSetSize"] = len(sf_cards)
output_file["cards"] = card_holder
LOGGER.info("Finished cards for {}".format(set_code))
# Handle tokens
LOGGER.info("Starting tokens for {}".format(set_code))
sf_tokens: List[Dict[str, Any]] = scryfall.get_set("t" + set_code)
output_file["tokens"] = build_mtgjson_tokens(sf_tokens + added_tokens)
LOGGER.info("Finished tokens for {}".format(set_code))
# Cleanups and UUIDs
mtgjson_card.DUEL_DECK_LAND_MARKED.set(False)
mtgjson_card.DUEL_DECK_SIDE_COMP.set("a")
for card in sorted(output_file["cards"]):
card.final_card_cleanup()
for token in output_file["tokens"]:
token.final_card_cleanup(is_card=False)
# Add Variations to each entry, as well as mark alternatives
add_variations_and_alternative_fields(output_file["cards"], output_file)
return output_file
def initialize_mkm_set_cards(mcm_id: Optional[str]) -> None:
"""
Initialize the MKM global with the cards found in the set
:param mcm_id: Set's ID, if possible
"""
if mcm_id is None or MKM_API.get(None) is None:
MKM_SET_CARDS.set({})
return
mkm_resp = MKM_API.get().market_place.expansion_singles(1, expansion=mcm_id)
# {SetNum: Object, ... }
dict_by_set_num = {}
for set_content in mkm_resp.json()["single"]:
if not set_content["number"]:
set_content["number"] = ""
# Remove leading zeroes
while set_content["number"].startswith("0"):
set_content["number"] = set_content["number"][1:]
# Split cards get two entries
for name in set_content["enName"].split("//"):
name_no_special_chars = name.strip().lower()
dict_by_set_num[name_no_special_chars] = set_content
MKM_SET_CARDS.set(dict_by_set_num)
def add_stocks_data(cards: List[MTGJSONCard]) -> List[MTGJSONCard]:
"""
Add the MTGStocks content to the card
:param cards:
:return:
"""
for card in cards:
if card.get("tcgplayerProductId"):
stocks_data = mtgstocks.get_card_data(card.get("tcgplayerProductId"))
if stocks_data:
card.set_all(
{
"mtgstocksId": stocks_data["id"],
"prices": {
"paper": stocks_data["paper"],
"paperFoil": stocks_data["foil"],
},
# Future additions may include: "mtgo", "mtgo_foil", and "mtga"
}
)
else:
LOGGER.warning("No TCGPlayer ID Found for {}".format(card.get("name")))
return cards
def transpose_tokens(
cards: List[MTGJSONCard]
) -> Tuple[List[MTGJSONCard], List[Dict[str, Any]]]:
"""
Sometimes, tokens slip through and need to be transplanted
back into their appropriate array. This method will allow
us to pluck the tokens out and return them home.
:param cards: Cards+Tokens to iterate
:return: Cards, Tokens as two separate lists
"""
# Order matters with these, as if you do cards first
# it will shadow the tokens lookup
# Single faced tokens are easy
tokens = [
scryfall.download(scryfall.SCRYFALL_API_CARD + card.get("scryfallId"))
for card in cards
if card.get("layout") in ["token", "emblem"]
]
# Do not duplicate double faced tokens
done_tokens: Set[str] = set()
for card in cards:
if (
card.get("layout") == "double_faced_token"
and card.get("scryfallId") not in done_tokens
):
tokens.append(
scryfall.download(scryfall.SCRYFALL_API_CARD + card.get("scryfallId"))
)
done_tokens.add(card.get("scryfallId"))
# Remaining cards, without any kind of token
cards = [
card
for card in cards
if card.get("layout") not in ["token", "double_faced_token", "emblem"]
]
return cards, tokens
def add_purchase_fields(group_id: int, cards: List[MTGJSONCard]) -> None:
"""
For each card in the set, we will find its tcgplayer ID
and add it to the card if found
:param group_id: group to search for the cards
:param cards: Cards list to add information to
"""
tcg_card_objs = tcgplayer.get_group_id_cards(group_id)
for card in cards:
merge_dict = {}
if tcg_card_objs:
tcgplayer_value = card.add_tcgplayer_fields(tcg_card_objs)
if tcgplayer_value:
merge_dict["tcgplayer"] = tcgplayer_value
if os.environ["MKM_APP_TOKEN"] and os.environ["MKM_APP_SECRET"]:
cardmarket_value = card.get_card_market_link()
if cardmarket_value:
merge_dict["cardmarket"] = cardmarket_value
stocks_value = card.get_mtgstocks_link()
if stocks_value:
merge_dict["mtgstocks"] = stocks_value
card.set("purchaseUrls", merge_dict)
def uniquify_duplicates_in_set(cards: List[MTGJSONCard]) -> List[MTGJSONCard]:
"""
For cards with multiple printings in a set, we need to identify
them against each other.
For silver border sets, we will add (b), (c), ... to the end
of the card name to do so.
:param cards: Cards to check and update for repeats
:return: updated cards list
"""
override_border_color: bool = bool(cards) and cards[0].set_code in ["HHO", "UNH"]
if (
cards
and cards[0].get("borderColor", None) == "silver"
and not override_border_color
):
unique_list = []
duplicate_cards: Dict[str, int] = {}
for card in cards:
# Only if a card is duplicated in a set will it get the (a), (b) appended
total_same_name_cards = sum(
1 for item in cards if item.get("name") == card.get("name")
)
# Ignore basic lands
if (card.get("name") not in mtgjson4.BASIC_LANDS) and (
card.get("name") in duplicate_cards or total_same_name_cards > 1
):
if card.get("name") in duplicate_cards:
duplicate_cards[card.get("name")] += 1
else:
duplicate_cards[card.get("name")] = ord("a")
# Update the name of the card, and remove its names field (as it's not correct here)
new_card = copy.deepcopy(card)
# Only add (b), (c), ... so we have one unique without an altered name
if chr(duplicate_cards[new_card.get("name")]) != "a":
new_card.append(
"name",
" ({0})".format(chr(duplicate_cards[new_card.get("name")])),
)
new_card.remove("names")
unique_list.append(new_card)
else:
# Not a duplicate, just put the normal card into the list
unique_list.append(card)
return unique_list
return cards
def add_variations_and_alternative_fields(
cards: List[MTGJSONCard], file_info: Any
) -> None:
"""
For non-silver bordered sets, we will create a "variations"
field will be created that has UUID of repeat cards.
This will also mark alternative printings within a single set.
:param cards: Cards to check and update for repeats
:param file_info: <<CONST>> object for the file
:return: How many alternative printings were marked
"""
# Non-silver border sets use "variations"
override_border_color: bool = bool(cards) and cards[0].set_code in ["HHO", "UNH"]
if cards and (cards[0].get("borderColor") != "silver" or override_border_color):
for card in cards:
repeats_in_set = [
item
for item in cards
if item.get("name") == card.get("name")
and item.get("uuid") != card.get("uuid")
]
# Add variations field
variations = [r.get("uuid") for r in repeats_in_set]
if variations:
card.set("variations", variations)
# Add alternative tag
# Ignore singleton printings in set, as well as basics
if not repeats_in_set or card.get("name") in mtgjson4.BASIC_LANDS:
continue
# Some hardcoded checking due to inconsistencies upstream
if file_info["code"].upper() in ["UNH", "10E"]:
# Check for duplicates, mark the foils
if (
len(repeats_in_set) >= 1
and card.get("hasFoil")
and not card.get("hasNonFoil")
):
card.set("isAlternative", True)
elif file_info["code"].upper() in ["CN2", "BBD"]:
# Check for set number > set size
if (
int(card.get("number").replace(chr(9733), ""))
> file_info["baseSetSize"]
):
card.set("isAlternative", True)
else:
# Check for a star in the number
if chr(9733) in card.get("number"):
card.set("isAlternative", True)
def add_start_flag_and_count_modified(
set_code: str, search_url: str, mtgjson_cards: List[MTGJSONCard]
) -> List[MTGJSONCard]:
"""
Since SF doesn't provide individual card notices, we can post-process add the starter flag
This method will also tell us how many starter cards are in the set
:param set_code: Set to address
:param search_url: URL to fix up to get non-booster cards
:param mtgjson_cards: Modify the argument and return it
:return: List of cards, number of cards modified
"""
starter_card_url = search_url.replace("&unique=", "++not:booster&unique=")
starter_cards = scryfall.download(starter_card_url)
if starter_cards["object"] == "error":
LOGGER.info("All cards in {} are available in boosters".format(set_code))
return mtgjson_cards
for sf_card in starter_cards["data"]:
# Each card has a unique UUID, even if they're the same card printed twice
try:
card = next(
item
for item in mtgjson_cards
if item.get("scryfallId") == sf_card["id"]
)
if card:
card.set("isStarter", True)
except StopIteration:
LOGGER.warning(
"Passed on {0} with SF_ID {1}".format(
sf_card["name"], sf_card["scryfallId"]
)
)
return mtgjson_cards
def build_mtgjson_tokens(
sf_tokens: List[Dict[str, Any]], sf_card_face: int = 0
) -> List[MTGJSONCard]:
"""
Convert Scryfall tokens to MTGJSON tokens
:param sf_tokens: All tokens in a set
:param sf_card_face: Faces of the token index
:return: List of MTGJSON tokens
"""
token_cards: List[MTGJSONCard] = []
for sf_token in sf_tokens:
token_card = MTGJSONCard(sf_token["set"])
if "card_faces" in sf_token:
token_card.set("names", sf_token["name"].split(" // "))
face_data = sf_token["card_faces"][sf_card_face]
# Prevent duplicate UUIDs for split card halves
# Remove the last character and replace with the id of the card face
token_card.set("scryfallId", sf_token["id"])
token_card.set("scryfallOracleId", sf_token["oracle_id"])
token_card.set("scryfallIllustrationId", sf_token.get("illustration_id"))
# Recursively parse the other cards within this card too
# Only call recursive if it is the first time we see this card object
if sf_card_face == 0:
for i in range(1, len(sf_token["card_faces"])):
LOGGER.info(
"Parsing additional card {0} face {1}".format(
sf_token.get("name"), i
)
)
token_cards += build_mtgjson_tokens([sf_token], i)
if "id" not in sf_token.keys():
LOGGER.info(
"Scryfall_ID not found in {}. Discarding {}".format(
sf_token.get("name"), sf_token
)
)
continue
token_card.set_all(
{
"name": face_data.get("name"),
"type": face_data.get("type_line"),
"text": face_data.get("oracle_text"),
"power": face_data.get("power"),
"colors": face_data.get("colors"),
"colorIdentity": sf_token.get("color_identity"),
"toughness": face_data.get("toughness"),
"loyalty": face_data.get("loyalty"),
"watermark": sf_token.get("watermark"),
"scryfallId": sf_token["id"],
"scryfallOracleId": sf_token.get("oracle_id"),
"scryfallIllustrationId": sf_token.get("illustration_id"),
"layout": "double_faced_token",
"side": chr(97 + sf_card_face),
"borderColor": face_data.get("border_color"),
"artist": face_data.get("artist"),
"isOnlineOnly": sf_token.get("digital"),
"number": sf_token.get("collector_number"),
}
)
else:
token_card.set_all(
{
"name": sf_token.get("name"),
"type": sf_token.get("type_line"),
"text": sf_token.get("oracle_text"),
"power": sf_token.get("power"),
"colors": sf_token.get("colors"),
"colorIdentity": sf_token.get("color_identity"),
"toughness": sf_token.get("toughness"),
"loyalty": sf_token.get("loyalty"),
"watermark": sf_token.get("watermark"),
"scryfallId": sf_token["id"],
"scryfallOracleId": sf_token.get("oracle_id"),
"scryfallIllustrationId": sf_token.get("illustration_id"),
"borderColor": sf_token.get("border_color"),
"artist": sf_token.get("artist"),
"isOnlineOnly": sf_token.get("digital"),
"number": sf_token.get("collector_number"),
}
)
if sf_token.get("layout") == "token":
token_card.set("layout", "normal")
else:
token_card.set("layout", sf_token.get("layout"))
reverse_related: List[str] = []
if "all_parts" in sf_token:
for a_part in sf_token["all_parts"]:
if a_part.get("name") != token_card.get("name"):
reverse_related.append(a_part.get("name"))
token_card.set("reverseRelated", reverse_related)
LOGGER.info(
"Parsed {0} from {1}".format(token_card.get("name"), sf_token.get("set"))
)
token_cards.append(token_card)
return token_cards
def convert_to_mtgjson(sf_cards: List[Dict[str, Any]]) -> List[MTGJSONCard]:
"""
Parallel method to build each card in the set
:param sf_cards: cards to build
:return: list of cards built
"""
# Clear sessions before the fork() to prevent awk issues with urllib3
SESSION.set(None)
with multiprocessing.Pool(multiprocessing.cpu_count()) as pool:
results: List[Any] = pool.map(build_mtgjson_card, sf_cards)
all_cards: List[MTGJSONCard] = []
for cards in results:
for card in cards:
all_cards.append(card)
return all_cards
def get_card_colors(mana_cost: str) -> List[str]:
"""
For some cards, we may have to manually determine the card's color.
:param mana_cost: Mana cost string
:return: Colors based on mana cost
"""
color_options: List[str] = ["W", "U", "B", "R", "G"]
ret_val = []
for color in color_options:
if color in mana_cost:
ret_val.append(color)
return ret_val
def get_cmc(mana_cost: str) -> float:
"""
For some cards, we may have to manually update the converted mana cost.
We do this by reading the inner components of each pair of {} and
deciphering what the contents mean. If number, we're good. Otherwise +1.
:param mana_cost: Mana cost string
:return: One sided cmc
"""
total: float = 0
symbol: List[str] = re.findall(r"{([^{]*)}", mana_cost)
for element in symbol:
# Address 2/W, G/W, etc as "higher" cost always first
if "/" in element:
element = element.split("/")[0]
if is_number(element):
total += float(element)
elif element in ["X", "Y", "Z"]: # Placeholder mana
continue
elif element[0] == "H": # Half mana
total += 0.5
else:
total += 1
return total
def build_mtgjson_card(
sf_card: Dict[str, Any], sf_card_face: int = 0
) -> List[MTGJSONCard]:
"""
Build a mtgjson card (and all sub pieces of that card)
:param sf_card: Card to build
:param sf_card_face: Which part of the card (defaults to 0)
:return: List of card(s) build (usually 1)
"""
mtgjson_cards: List[MTGJSONCard] = []
single_card = MTGJSONCard(sf_card["set"])
# Let us know what card we're trying to parse -- good for debugging :)
LOGGER.info("Parsing {0} from {1}".format(sf_card.get("name"), sf_card.get("set")))
# If flip-type, go to card_faces for alt attributes
face_data: Dict[str, Any] = sf_card
if "card_faces" in sf_card:
single_card.set_all(
{
"names": sf_card["name"].split(" // "),
"scryfallId": sf_card["id"],
"scryfallOracleId": sf_card["oracle_id"],
"scryfallIllustrationId": sf_card.get("illustration_id"),
}
)
face_data = sf_card["card_faces"][sf_card_face]
# Split cards and rotational cards have this field, flip cards do not.
# Remove rotational cards via the additional check
if "mana_cost" in sf_card and "//" in sf_card["mana_cost"]:
single_card.set(
"colors",
get_card_colors(sf_card["mana_cost"].split(" // ")[sf_card_face]),
)
single_card.set(
"faceConvertedManaCost",
get_cmc(sf_card["mana_cost"].split("//")[sf_card_face].strip()),
)
elif sf_card["layout"] in ["split", "transform", "aftermath"]:
# Handle non-normal cards, as they'll a face split
single_card.set(
"faceConvertedManaCost",
get_cmc(face_data.get("mana_cost", "0").strip()),
)
# Watermark is only attributed on the front side, so we'll account for it
single_card.set(
"watermark",
sf_card["card_faces"][0].get("watermark", None),
single_card.clean_up_watermark,
)
if sf_card["card_faces"][-1]["oracle_text"].startswith("Aftermath"):
single_card.set("layout", "aftermath")
single_card.set("artist", sf_card["card_faces"][sf_card_face].get("artist", ""))
# Recursively parse the other cards within this card too
# Only call recursive if it is the first time we see this card object
if sf_card_face == 0:
for i in range(1, len(sf_card["card_faces"])):
LOGGER.info(
"Parsing additional card {0} face {1}".format(
sf_card.get("name"), i
)
)
mtgjson_cards += build_mtgjson_card(sf_card, i)
else:
single_card.set_all(
{
"scryfallId": sf_card.get("id"),
"scryfallOracleId": sf_card["oracle_id"],
"scryfallIllustrationId": sf_card.get("illustration_id"),
}
)
# Characteristics that can are not shared to both sides of flip-type cards
if face_data.get("mana_cost"):
single_card.set("manaCost", face_data.get("mana_cost"))
if "colors" not in single_card.keys():
if "colors" in face_data:
single_card.set("colors", face_data.get("colors"))
else:
single_card.set("colors", sf_card.get("colors"))
single_card.set_all(
{
"borderColor": sf_card.get("border_color"),
"colorIdentity": sf_card.get("color_identity"),
"convertedManaCost": sf_card.get("cmc"),
"frameEffect": sf_card.get("frame_effect"),
"frameVersion": sf_card.get("frame"),
"hand": sf_card.get("hand_modifier"),
"hasFoil": sf_card.get("foil"),
"hasNonFoil": sf_card.get("nonfoil"),
"isFullArt": sf_card.get("full_art"),
"isOnlineOnly": sf_card.get("digital"),
"isOversized": sf_card.get("oversized"),
"isPromo": sf_card.get("promo"),
"isReprint": sf_card.get("reprint"),
"isReserved": sf_card.get("reserved"),
"isStorySpotlight": sf_card.get("story_spotlight"),
"isTextless": sf_card.get("textless"),
"life": sf_card.get("life_modifier"),
"loyalty": face_data.get("loyalty"),
"name": face_data.get("name"),
"number": sf_card.get("collector_number"),
"power": face_data.get("power"),
"tcgplayerProductId": sf_card.get("tcgplayer_id"),
"text": face_data.get("oracle_text"),
"toughness": face_data.get("toughness"),
"type": face_data.get("type_line"),
}
)
# Set MKM IDs if it exists
if MKM_API.get(None):
mkm_card_found = False
for key, mkm_obj in MKM_SET_CARDS.get().items():
if single_card.get("name").lower() not in key:
continue
if "number" not in mkm_obj.keys() or (
mkm_obj.get("number") in single_card.get("number")
):
single_card.set_all(
{
"mcmId": mkm_obj["idProduct"],
"mcmMetaId": mkm_obj["idMetaproduct"],
}
)
single_card.set_mkm_url(mkm_obj["website"])
mkm_card_found = True
break
if not mkm_card_found:
LOGGER.warning(
"Unable to find MKM information for #{} {}".format(
single_card.get("number"), single_card.get("name")
)
)
if "artist" not in single_card.keys():
single_card.set("artist", sf_card.get("artist"))
if "layout" not in single_card.keys():
single_card.set("layout", sf_card.get("layout"))
if "watermark" not in single_card.keys():
single_card.set(
"watermark",
face_data.get("watermark", None),
single_card.clean_up_watermark,
)
# "isPaper", "isMtgo", "isArena"
for game_mode in sf_card.get("games", []):
single_card.set("is{}".format(game_mode.capitalize()), True)
if "flavor_text" in face_data:
single_card.set("flavorText", face_data.get("flavor_text"))
else:
single_card.set("flavorText", sf_card.get("flavor_text"))
if "color_indicator" in face_data:
single_card.set("colorIndicator", face_data.get("color_indicator"))
elif "color_indicator" in sf_card:
single_card.set("colorIndicator", sf_card.get("color_indicator"))
try:
single_card.set("multiverseId", sf_card["multiverse_ids"][sf_card_face])
except IndexError:
try:
single_card.set("multiverseId", sf_card["multiverse_ids"][0])
except IndexError:
single_card.set("multiverseId", None)
# Add a "side" entry for split cards
# Will only work for two faced cards (not meld, as they don't need this)
if "names" in single_card.keys() and single_card.names_count(2):
# chr(97) = 'a', chr(98) = 'b', ...
single_card.set(
"side", chr(single_card.get("names").index(single_card.get("name")) + 97)
)
# Characteristics that we have to format ourselves from provided data
single_card.set(
"isTimeshifted",
(sf_card.get("frame") == "future") or (sf_card.get("set") == "tsb"),
)
single_card.set("rarity", sf_card.get("rarity"))
# Characteristics that we need custom functions to parse
print_search_url: str = sf_card["prints_search_uri"].replace("%22", "")
single_card.set("legalities", scryfall.parse_legalities(sf_card["legalities"]))
single_card.set(
"rulings",
sorted(
scryfall.parse_rulings(sf_card["rulings_uri"]),
key=lambda ruling: ruling["date"],
),
)
single_card.set("printings", sorted(scryfall.parse_printings(print_search_url)))
card_types: Tuple[List[str], List[str], List[str]] = scryfall.parse_card_types(
single_card.get("type")
)
single_card.set("supertypes", card_types[0])
single_card.set("types", card_types[1])
single_card.set("subtypes", card_types[2])
# Handle meld and all parts tokens issues
# Will re-address naming if a split card already
if "all_parts" in sf_card:
meld_holder = []
single_card.set("names", [])
for a_part in sf_card["all_parts"]:
if a_part["component"] != "token":
if "//" in a_part.get("name"):
single_card.set("names", a_part.get("name").split(" // "))
break
# This is a meld only-fix, so we ignore tokens/combo pieces
if "meld" in a_part["component"]:
meld_holder.append(a_part["component"])
single_card.append("names", a_part.get("name"))
# If the only entry is the original card, empty the names array
if single_card.names_count(1) and single_card.get("name") in single_card.get(
"names"
):
single_card.remove("names")
# Meld cards should be CardA, Meld, CardB. This fixes that via swap
# meld_holder
if meld_holder and meld_holder[1] != "meld_result":
single_card.get("names")[1], single_card.get("names")[2] = (
single_card.get("names")[2],
single_card.get("names")[1],
)
# Since we built meld cards later, we will add the "side" attribute now
if single_card.names_count(3): # MELD
if single_card.get("name") == single_card.get("names")[0]:
single_card.set("side", "a")
elif single_card.get("name") == single_card.get("names")[2]:
single_card.set("side", "b")
else:
single_card.set("side", "c")
# Characteristics that we cannot get from Scryfall
# Characteristics we have to do further API calls for
single_card.set(
"foreignData",
scryfall.parse_foreign(
print_search_url,
single_card.get("name"),
single_card.get("number"),
sf_card["set"],
),
)
if single_card.get("multiverseId") is not None:
gatherer_cards = gatherer.get_cards(
single_card.get("multiverseId"), single_card.set_code
)
try:
gatherer_card = gatherer_cards[sf_card_face]
single_card.set("originalType", gatherer_card.original_types)
single_card.set("originalText", gatherer_card.original_text)
except IndexError:
LOGGER.warning(
"Unable to parse originals for {}".format(single_card.get("name"))
)
mtgjson_cards.append(single_card)
return mtgjson_cards
| 37.469342 | 100 | 0.586343 |
661f8e7957ae18bd50aeaf8a1cd68777acafce6d | 1,361 | py | Python | rmfriend/pagedata.py | oisinmulvihill/remarkable-friend | 00650302e9815f546bd9525f5af4140ad6d518b6 | [
"MIT"
] | 2 | 2018-07-14T20:05:55.000Z | 2018-10-07T20:42:00.000Z | rmfriend/pagedata.py | oisinmulvihill/remarkable-friend | 00650302e9815f546bd9525f5af4140ad6d518b6 | [
"MIT"
] | 1 | 2018-08-08T13:41:45.000Z | 2018-08-13T02:14:58.000Z | rmfriend/pagedata.py | oisinmulvihill/remarkable-friend | 00650302e9815f546bd9525f5af4140ad6d518b6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
"""
class PageData(object):
"""This file contains a list of templates used on each page of a notebook.
On the reMarkable device a notebook will have a '<UUID>.pagedata' file.
The first page i.e. element 0 of the pages member represents the template
for page 0 in the NotebookLines pages.
This behaviour comes from my observing the notebooks on my device.
"""
def __init__(self, pages=[]):
"""
"""
self.pages = pages
@classmethod
def new(cls):
print("new")
return cls()
@classmethod
def load(cls, pagedata):
"""Return a Pagedata instance for the given data.
:param pagedata: A string of page data lines.
E.g.::
Blank
P Lines medium
LS Grid margin large
P Grid large
If pagedata is not given or is empty the the Pagedata instance will
not contain any pages. Is this actually possible? I'm not sure.
"""
if pagedata and pagedata.strip():
pages = [page for page in pagedata.split('\n') if page.strip()]
else:
pages = []
return cls(pages)
def dump(self):
"""A list of page templates one per line ready to write to disk."""
return "\n".join(self.pages) if self.pages else ""
| 24.745455 | 78 | 0.584129 |
f5627a2450636cdce4bb0519095054bdf8993bdb | 11,671 | py | Python | src/lambda-function.py | jgreenemi/Parris | 94764011ba81eb74927a548f0c3aa01bfa4280a7 | [
"Apache-2.0"
] | 354 | 2018-01-01T13:17:24.000Z | 2021-12-15T06:00:42.000Z | src/lambda-function.py | jgreenemi/Parris | 94764011ba81eb74927a548f0c3aa01bfa4280a7 | [
"Apache-2.0"
] | 7 | 2018-01-02T02:25:27.000Z | 2018-01-02T03:36:53.000Z | src/lambda-function.py | jgreenemi/Parris | 94764011ba81eb74927a548f0c3aa01bfa4280a7 | [
"Apache-2.0"
] | 26 | 2018-01-01T18:32:05.000Z | 2019-09-01T06:45:26.000Z | # Create the CloudFormation stack with the trainer-script loaded.
import boto3
import json
import logging
import os
from pprint import pprint
def parse_training_config(training_config_path=''):
"""
Pull in a training config and return its contents.
:return:
"""
try:
# Default to the existing training config file in the Lambda function if a specific filename wasn't passed in.
if not training_config_path:
training_config_path = 'config/training-config.json'
config = json.load(open(training_config_path))
else:
client_s3 = boto3.client('s3')
s3_response = client_s3.get_object(
Bucket=training_config_path,
Key='training-config.json'
)
config = json.loads(s3_response['Body'].read().decode('utf-8'))
return config
except Exception as e:
msg = 'parse_config failure: {}'.format(e)
logging.error(msg)
return False
def template_loader(template_path='', cloudformation_template_filename=''):
"""
Given the filepath for the CloudFormation template, load and return it as a dict.
:return:
"""
try:
if not template_path:
template_path = 'config/cloudformation.json'
cfn_template_contents = json.load(open(template_path))
else:
client_s3 = boto3.client('s3')
s3_response = client_s3.get_object(
Bucket=template_path,
Key=cloudformation_template_filename
)
cfn_template_contents = s3_response['Body'].read().decode('utf-8')
return cfn_template_contents
except Exception as e:
msg = 'template_loader failure: {}'.format(e)
logging.error(msg)
return False
def userdata_loader(s3_training_bucket='', trainer_script_name='trainer-script.sh'):
"""
Given the filepath for the trainer-script, load and return its contents as a str.
:param s3_training_bucket:
:param trainer_script_name:
:return:
"""
try:
# If the user didn't pass in another location to pull in the trainer-script from, grab the one in this package.
if not s3_training_bucket:
userdata_filepath = 'src/{}'.format(trainer_script_name)
with open(userdata_filepath, 'r') as f:
userdata_script = f.read()
else:
# If a value was passed in, assume it to be an S3 key - retrieve its contents.
client_s3 = boto3.client('s3')
s3_response = client_s3.get_object(
Bucket=s3_training_bucket,
Key=trainer_script_name
)
userdata_script = s3_response['Body'].read().decode('utf-8')
return userdata_script
except Exception as e:
err = 'userdata_loader failure: {}'.format(e)
logging.error(err)
return False
def stack_creator(testmode=False):
"""
The meat of this script, this launches the CFN stack based on the template and userdata script passed in.
If this was launched with the testmode parameter set, validate the CFN template instead. Note that this does NOT
check the use of the userdata script.
:param testmode:
:return:
"""
try:
logging.debug('stack_creator starting.')
msg = ''
# It's okay if you're not planning to use an S3 bucket - this will be a blank string and the functions will
# handle that properly.
s3_training_bucket = os.environ.get('s3_training_bucket', '')
# Get the training config, either from the S3 training bucket or from the Lambda package.
training_config = parse_training_config(
training_config_path=s3_training_bucket
)
cfn_template_contents = template_loader(
template_path=s3_training_bucket,
cloudformation_template_filename=training_config.get('cloudformation_template_filename', ''))
userdata_script = userdata_loader(
s3_training_bucket=s3_training_bucket,
trainer_script_name=training_config.get('training-script-filename', '')
)
# In the userdata script there may be a termination command. If there is one, do a string replacement with the
# time limit configured in the training-config. If the string replacement doesn't find anything to replace,
# the original string is returned unchanged rather than an error, so no try/except is necessary here.
termination_value = 'never'
if 'at-fixed-time' in training_config.get('termination-method', ''):
termination_value = training_config['time-limit']
# elif 'around-cost' in training_config.get('termination-method', ''):
# Pending https://github.com/jgreenemi/Parris/issues/1
# Disabling for now as this is not ready for use.
# termination_value = #...
userdata_script = userdata_script.replace('${TERMINATION_TIME_LIMIT}', str(termination_value))
logging.warning('Updated userdata with {} minute termination window.'.format(termination_value))
client_cfn = boto3.client('cloudformation')
if testmode:
create_stack_response = client_cfn.validate_template(
TemplateBody='{}'.format(cfn_template_contents)
)
msg = 'CloudFormation template passed validation!'
else:
try:
create_stack_response = client_cfn.create_stack(
StackName=training_config.get('training-job-name', 'parris-stack'),
TemplateBody='{}'.format(cfn_template_contents),
OnFailure='DELETE',
Parameters=[
{
'ParameterKey': 'TrainingJobName',
'ParameterValue': training_config.get('training-job-name', '')
},
{
'ParameterKey': 'UserDataScript',
'ParameterValue': str(userdata_script)
},
{
'ParameterKey': 'InstanceType',
'ParameterValue': training_config.get('instance-type', '')
},
{
'ParameterKey': 'InstanceIAMRoleName',
'ParameterValue': training_config.get('instance-iam-role-name', '')
},
{
'ParameterKey': 'SecurityGroupId',
'ParameterValue': training_config.get('security-group-id', '')
},
{
'ParameterKey': 'SubnetId',
'ParameterValue': training_config.get('subnet-id', '')
},
{
'ParameterKey': 'KeyPairName',
'ParameterValue': training_config.get('ec2-keypair-name', '')
}
]
)
msg = 'CloudFormation stack has started launching successfully!'
except Exception as create_err:
# If the training-config is set to allow stack replacements and the create function above failed,
# update the existing stack.
if training_config.get('stack-replacement', False) and 'AlreadyExistsException' in str(create_err):
update_stack_response = client_cfn.update_stack(
StackName=training_config.get('training-job-name', 'parris-stack'),
TemplateBody='{}'.format(cfn_template_contents),
Parameters=[
{
'ParameterKey': 'TrainingJobName',
'ParameterValue': training_config.get('training-job-name', '')
},
{
'ParameterKey': 'UserDataScript',
'ParameterValue': str(userdata_script)
},
{
'ParameterKey': 'InstanceType',
'ParameterValue': training_config.get('instance-type', '')
},
{
'ParameterKey': 'SecurityGroupId',
'ParameterValue': training_config.get('security-group-id', '')
},
{
'ParameterKey': 'SubnetId',
'ParameterValue': training_config.get('subnet-id', '')
},
{
'ParameterKey': 'KeyPairName',
'ParameterValue': training_config.get('ec2-keypair-name', '')
}
]
)
msg = 'CloudFormation stack has updated successfully!'
# If the training config is not set to allow stack updates, or if the returned error wasn't
# AlreadyExistsException, pass this up to the parent catch block.
else:
raise Exception(create_err)
logging.warning(msg)
return True, msg
except Exception as e:
err = 'stack_creator failure: {}'.format(e)
logging.error(err)
return False, err
def _test_stack_creator():
"""
Launch a stack with a test userdata script.
:return:
"""
stack_creator_passfail, stack_creator_msg = stack_creator(testmode=True)
if not stack_creator_passfail:
logging.error('_test_stack_creator() Failed: {}'.format(stack_creator_msg))
return
def lambda_handler(event, context):
"""
The function intended to be kicked off when loaded up by AWS Lambda. Takes the expected event and context arguments.
:param event:
:ptype event: dict, list, str, int, float, or NoneType
:param context:
:ptype context: LambdaContext
:return:
"""
logging.debug('Got invocation from Lambda event: {}'.format(str(event)))
# If you'd like to have the handler return some values (for example, for integration into a custom training-job
# dashboard from which one can launch new training stacks), you can set those here. For now, it just returns an
# empty dict.
return_values = {}
stack_creator_passfail, stack_creator_msg = stack_creator()
if not stack_creator_passfail:
err_msg = 'stack_creator() Failed: {}'.format(stack_creator_msg)
logging.error(err_msg)
raise Exception(err_msg)
return return_values
def _test_userdata_loader():
"""
Test that the trainer-script.sh file is loaded properly. Particularly handy for verifying S3 files are reachable.
:return:
"""
try:
s3_training_bucket = 'com.jgreenemi.mlbucket'
training_config = parse_training_config(s3_training_bucket)
userdata_script = userdata_loader(
s3_training_bucket,
training_config.get('training-script-filename', '')
)
return True
except Exception as e:
return False
if __name__ == '__main__':
_test_userdata_loader()
_test_stack_creator()
logging.warning('Tests finished!')
| 38.774086 | 120 | 0.566532 |
b68951b55b5e5ecb4afe037fb94cb1df68998324 | 1,500 | py | Python | tally/api/tests.py | Mapkin/tally | 731940362c4c6003eed7f576620a89206fda312d | [
"MIT"
] | 1 | 2022-02-14T05:52:20.000Z | 2022-02-14T05:52:20.000Z | tally/api/tests.py | Mapkin/tally | 731940362c4c6003eed7f576620a89206fda312d | [
"MIT"
] | null | null | null | tally/api/tests.py | Mapkin/tally | 731940362c4c6003eed7f576620a89206fda312d | [
"MIT"
] | null | null | null | """
Unit tests for the API application.
"""
from django.contrib.auth.models import User
from django.test import TestCase
from tally.api.models import Client, Counter
import datetime
import time
class ClientTest(TestCase):
def test_create(self):
user = User.objects.create_user('test', 'test')
client = Client(user=user, app_name='Terrible App')
client.save()
self.assertEqual(len(client.api_key), 20)
self.assertEqual(client.user.id, user.id)
def test_uniqueness(self):
user1 = User.objects.create_user('user1', 'xxx')
client1 = Client(user=user1, app_name='Cromulent', api_key='derp')
client1.save()
user2 = User.objects.create_user('user2', 'xxx')
client2 = Client(user=user2, app_name='Fungible', api_key='derp')
client2.save()
class CounterTest(TestCase):
def test_increment_with_today(self):
today = datetime.date.today()
counter = Counter(name="test", last_modified=today)
print counter
counter.increment()
self.assertEqual(counter.count, 1)
def test_increment_with_yesterday(self):
yesterday = datetime.date.fromtimestamp(time.time()-60*60*24)
counter = Counter(name="test", count=1, last_modified=yesterday)
counter.increment()
self.assertEqual(counter.count, 1)
def test_reset(self):
counter = Counter(name="test", count=7)
counter.reset()
self.assertEqual(counter.count, 0)
| 29.411765 | 74 | 0.663333 |
3ad527eb840cdf962099f95c0c2685e4534db6eb | 11,564 | py | Python | utils.py | blockchainhelppro/Segwit-Development- | e4dfe990a69a2a1fcdd902b82121c2ecb1aa332d | [
"MIT"
] | null | null | null | utils.py | blockchainhelppro/Segwit-Development- | e4dfe990a69a2a1fcdd902b82121c2ecb1aa332d | [
"MIT"
] | null | null | null | utils.py | blockchainhelppro/Segwit-Development- | e4dfe990a69a2a1fcdd902b82121c2ecb1aa332d | [
"MIT"
] | null | null | null | """
Miscellaneous utility functions, wrappers around some subprocess procedures.
"""
import importlib
import inspect
import os
import shlex
import subprocess
import sys
import fcntl
from core import CoreCommandError
from core import logger
DEVNULL = open(os.devnull, "wb")
def _detach_init():
"""
Fork a child process and exit.
:return: nothing
"""
if os.fork():
# parent exits
os._exit(0)
os.setsid()
def _valid_module(path, file_name):
"""
Check if file is a valid python module.
:param str path: path to file
:param str file_name: file name to check
:return: True if a valid python module file, False otherwise
:rtype: bool
"""
file_path = os.path.join(path, file_name)
if not os.path.isfile(file_path):
return False
if file_name.startswith("_"):
return False
if not file_name.endswith(".py"):
return False
return True
def _is_class(module, member, clazz):
"""
Validates if a module member is a class and an instance of a CoreService.
:param module: module to validate for service
:param member: member to validate for service
:param clazz: clazz type to check for validation
:return: True if a valid service, False otherwise
:rtype: bool
"""
if not inspect.isclass(member):
return False
if not issubclass(member, clazz):
return False
if member.__module__ != module.__name__:
return False
return True
def _is_exe(file_path):
"""
Check if a given file path exists and is an executable file.
:param str file_path: file path to check
:return: True if the file is considered and executable file, False otherwise
:rtype: bool
"""
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
def close_onexec(fd):
"""
Close on execution of a shell process.
:param fd: file descriptor to close
:return: nothing
"""
fdflags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, fdflags | fcntl.FD_CLOEXEC)
def check_executables(executables):
"""
Check executables, verify they exist and are executable.
:param list[str] executables: executable to check
:return: nothing
:raises EnvironmentError: when an executable doesn't exist or is not executable
"""
for executable in executables:
if not _is_exe(executable):
raise EnvironmentError("executable not found: %s" % executable)
def make_tuple(obj):
"""
Create a tuple from an object, or return the object itself.
:param obj: object to convert to a tuple
:return: converted tuple or the object itself
:rtype: tuple
"""
if hasattr(obj, "__iter__"):
return tuple(obj)
else:
return obj,
def make_tuple_fromstr(s, value_type):
"""
Create a tuple from a string.
:param str|unicode s: string to convert to a tuple
:param value_type: type of values to be contained within tuple
:return: tuple from string
:rtype: tuple
"""
# remove tuple braces and strip commands and space from all values in the tuple string
values = []
for x in s.strip("(), ").split(","):
x = x.strip("' ")
if x:
values.append(x)
return tuple(value_type(i) for i in values)
def split_args(args):
"""
Convenience method for splitting potential string commands into a shell-like syntax list.
:param list/str args: command list or string
:return: shell-like syntax list
:rtype: list
"""
if isinstance(args, basestring):
args = shlex.split(args)
return args
def mute_detach(args, **kwargs):
"""
Run a muted detached process by forking it.
:param list[str]|str args: arguments for the command
:param dict kwargs: keyword arguments for the command
:return: process id of the command
:rtype: int
"""
args = split_args(args)
kwargs["preexec_fn"] = _detach_init
kwargs["stdout"] = DEVNULL
kwargs["stderr"] = subprocess.STDOUT
return subprocess.Popen(args, **kwargs).pid
def cmd(args, wait=True):
"""
Runs a command on and returns the exit status.
:param list[str]|str args: command arguments
:param bool wait: wait for command to end or not
:return: command status
:rtype: int
"""
args = split_args(args)
logger.debug("command: %s", args)
try:
p = subprocess.Popen(args)
if not wait:
return 0
return p.wait()
except OSError:
raise CoreCommandError(-1, args)
def cmd_output(args):
"""
Execute a command on the host and return a tuple containing the exit status and result string. stderr output
is folded into the stdout result string.
:param list[str]|str args: command arguments
:return: command status and stdout
:rtype: tuple[int, str]
:raises CoreCommandError: when the file to execute is not found
"""
args = split_args(args)
logger.debug("command: %s", args)
try:
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, _ = p.communicate()
status = p.wait()
return status, stdout.strip()
except OSError:
raise CoreCommandError(-1, args)
def check_cmd(args, **kwargs):
"""
Execute a command on the host and return a tuple containing the exit status and result string. stderr output
is folded into the stdout result string.
:param list[str]|str args: command arguments
:param dict kwargs: keyword arguments to pass to subprocess.Popen
:return: combined stdout and stderr
:rtype: str
:raises CoreCommandError: when there is a non-zero exit status or the file to execute is not found
"""
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.STDOUT
args = split_args(args)
logger.debug("command: %s", args)
try:
p = subprocess.Popen(args, **kwargs)
stdout, _ = p.communicate()
status = p.wait()
if status != 0:
raise CoreCommandError(status, args, stdout)
return stdout.strip()
except OSError:
raise CoreCommandError(-1, args)
def hex_dump(s, bytes_per_word=2, words_per_line=8):
"""
Hex dump of a string.
:param str s: string to hex dump
:param bytes_per_word: number of bytes per word
:param words_per_line: number of words per line
:return: hex dump of string
"""
dump = ""
count = 0
total_bytes = bytes_per_word * words_per_line
while s:
line = s[:total_bytes]
s = s[total_bytes:]
tmp = map(lambda x: ("%02x" * bytes_per_word) % x,
zip(*[iter(map(ord, line))] * bytes_per_word))
if len(line) % 2:
tmp.append("%x" % ord(line[-1]))
dump += "0x%08x: %s\n" % (count, " ".join(tmp))
count += len(line)
return dump[:-1]
def file_munge(pathname, header, text):
"""
Insert text at the end of a file, surrounded by header comments.
:param str pathname: file path to add text to
:param str header: header text comments
:param str text: text to append to file
:return: nothing
"""
# prevent duplicates
file_demunge(pathname, header)
with open(pathname, "a") as append_file:
append_file.write("# BEGIN %s\n" % header)
append_file.write(text)
append_file.write("# END %s\n" % header)
def file_demunge(pathname, header):
"""
Remove text that was inserted in a file surrounded by header comments.
:param str pathname: file path to open for removing a header
:param str header: header text to target for removal
:return: nothing
"""
with open(pathname, "r") as read_file:
lines = read_file.readlines()
start = None
end = None
for i in range(len(lines)):
if lines[i] == "# BEGIN %s\n" % header:
start = i
elif lines[i] == "# END %s\n" % header:
end = i + 1
if start is None or end is None:
return
with open(pathname, "w") as write_file:
lines = lines[:start] + lines[end:]
write_file.write("".join(lines))
def expand_corepath(pathname, session=None, node=None):
"""
Expand a file path given session information.
:param str pathname: file path to expand
:param core.session.Session session: core session object to expand path with
:param core.netns.LxcNode node: node to expand path with
:return: expanded path
:rtype: str
"""
if session is not None:
pathname = pathname.replace("~", "/home/%s" % session.user)
pathname = pathname.replace("%SESSION%", str(session.session_id))
pathname = pathname.replace("%SESSION_DIR%", session.session_dir)
pathname = pathname.replace("%SESSION_USER%", session.user)
if node is not None:
pathname = pathname.replace("%NODE%", str(node.objid))
pathname = pathname.replace("%NODENAME%", node.name)
return pathname
def sysctl_devname(devname):
"""
Translate a device name to the name used with sysctl.
:param str devname: device name to translate
:return: translated device name
:rtype: str
"""
if devname is None:
return None
return devname.replace(".", "/")
def load_config(filename, d):
"""
Read key=value pairs from a file, into a dict. Skip comments; strip newline characters and spacing.
:param str filename: file to read into a dictionary
:param dict d: dictionary to read file into
:return: nothing
"""
with open(filename, "r") as f:
lines = f.readlines()
for line in lines:
if line[:1] == "#":
continue
try:
key, value = line.split("=", 1)
d[key] = value.strip()
except ValueError:
logger.exception("error reading file to dict: %s", filename)
def load_classes(path, clazz):
"""
Dynamically load classes for use within CORE.
:param path: path to load classes from
:param clazz: class type expected to be inherited from for loading
:return: list of classes loaded
"""
# validate path exists
logger.debug("attempting to load modules from path: %s", path)
if not os.path.isdir(path):
logger.warn("invalid custom module directory specified" ": %s" % path)
# check if path is in sys.path
parent_path = os.path.dirname(path)
if parent_path not in sys.path:
logger.debug("adding parent path to allow imports: %s", parent_path)
sys.path.append(parent_path)
# retrieve potential service modules, and filter out invalid modules
base_module = os.path.basename(path)
module_names = os.listdir(path)
module_names = filter(lambda x: _valid_module(path, x), module_names)
module_names = map(lambda x: x[:-3], module_names)
# import and add all service modules in the path
classes = []
for module_name in module_names:
import_statement = "%s.%s" % (base_module, module_name)
logger.debug("importing custom module: %s", import_statement)
try:
module = importlib.import_module(import_statement)
members = inspect.getmembers(module, lambda x: _is_class(module, x, clazz))
for member in members:
valid_class = member[1]
classes.append(valid_class)
except:
logger.exception("unexpected error during import, skipping: %s", import_statement)
return classes
| 28.412776 | 112 | 0.642079 |
fbe0e1859dc705fd7758522e94a99aaf97f104cb | 6,740 | py | Python | library/skytap_environment.py | skytap/ansiblebase | d259c5e98a02d9bdb1202cd8792f40094a60d52b | [
"Apache-2.0"
] | null | null | null | library/skytap_environment.py | skytap/ansiblebase | d259c5e98a02d9bdb1202cd8792f40094a60d52b | [
"Apache-2.0"
] | null | null | null | library/skytap_environment.py | skytap/ansiblebase | d259c5e98a02d9bdb1202cd8792f40094a60d52b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2016 Ben Coleman
# Software provided under the terms of the Apache 2.0 license http://www.apache.org/licenses/LICENSE-2.0.txt
#
# 2019-01-23 - M.Measel - added Edit param to Modify action
# 2019-01-24 - M2 - added list_networks
# 2019-01-25 - M2 - added add_tag action
# 2019-01-29 - M2 - added listByTag action
# 2019-01-29 - M2 - added readVM action
DOCUMENTATION = '''
---
module: skytap_environment
short_description: Build and control Skytap cloud environments
'''
import json
import requests
import sys
import time
from ansible.module_utils.basic import AnsibleModule
# API endpoint for Skytap REST API
API_BASE = 'https://cloud.skytap.com/'
API_HEADERS = {'accept': 'application/json', 'content-type': 'application/json'}
# Basic REST call to Skytap API
def restCall(auth, method, path, data=None):
try:
if(method == 'GET'):
result = requests.get(API_BASE + path, headers=API_HEADERS, auth=auth)
if(method == 'POST'):
result = requests.post(API_BASE + path, headers=API_HEADERS, auth=auth, data=data)
if(method == 'PUT'):
result = requests.put(API_BASE + path, headers=API_HEADERS, auth=auth, data=data)
if(method == 'DELETE'):
result = requests.delete(API_BASE + path, headers=API_HEADERS, auth=auth, allow_redirects=True)
if len(result.content) > 0:
return result.status_code, result.json()
else:
return result.status_code, None
except:
return -1, None
# Main module code here
def main():
module = AnsibleModule(
argument_spec = dict(
username = dict(required=True),
token = dict(required=True),
action = dict(default='create', choices=['create', 'modify', 'delete', 'read', 'readVM', 'list', 'listByTag', 'list_networks', 'wait_ratelimit', 'copy', 'add_tag']),
template_id = dict(required=False),
environment_id = dict(required=False),
name = dict(required=False),
edit = dict(required=False),
component = dict(required=False),
vm_id = dict(required=False),
tag = dict(required=False),
state = dict(required=False, choices=['running', 'stopped', 'suspended', 'halted', 'reset'])
),
supports_check_mode=False
)
auth = (module.params.get('username'), module.params.get('token'))
if module.params.get('action') == 'create':
if not module.params.get('template_id'):
module.fail_json(msg="template_id is required param when action=create")
request_data = {"template_id": module.params.get('template_id')}
if module.params.get('name'):
request_data['name'] = module.params.get('name')
status, result = restCall(auth, 'POST', '/v1/configurations', data=json.dumps(request_data))
if module.params.get('action') == 'modify':
request_data = {}
if not module.params.get('environment_id'):
module.fail_json(msg="environment_id is required param when action=modify")
if module.params.get('state'):
request_data['runstate'] = module.params.get('state')
if module.params.get('name'):
request_data['name'] = module.params.get('name')
if module.params.get('edit'):
edparms = module.params.get('edit')
obj = edparms.split(':')[0]
val = edparms.split(':')[1]
request_data[obj] = val
if not module.params.get('component'):
status, result = restCall(auth, 'PUT', '/v1/configurations/'+str(module.params.get('environment_id')), data=json.dumps(request_data))
else:
component = module.params.get('component')
status, result = restCall(auth, 'PUT', '/v1/configurations/'+str(module.params.get('environment_id') + '/' + component), data=json.dumps(request_data))
if module.params.get('action') == 'add_tag':
tag = module.params.get('tag')
body = '[{"value":"' + tag + '"}]'
status, result = restCall(auth, 'PUT', '/v1/configurations/'+str(module.params.get('environment_id') + '/tags'), data=body)
if module.params.get('action') == 'delete':
if not module.params.get('environment_id'):
module.fail_json(msg="environment_id is required param when action=delete")
status, result = restCall(auth, 'DELETE', '/v1/configurations/'+str(module.params.get('environment_id')))
if module.params.get('action') == 'read':
if not module.params.get('environment_id'):
module.fail_json(msg="environment_id is required param when action=read")
status, result = restCall(auth, 'GET', '/v1/configurations/'+str(module.params.get('environment_id')))
if module.params.get('action') == 'readVM':
if not module.params.get('vm_id'):
module.fail_json(msg="vm_id is required param when action=readVM")
if not module.params.get('environment_id'):
status, result = restCall(auth, 'GET', '/v1/vms/'+str(module.params.get('vm_id')))
else:
status, result = restCall(auth, 'GET', '/v2/configurations/'+str(module.params.get('environment_id'))+'/vms/'+str(module.params.get('vm_id')))
if module.params.get('action') == 'list':
status, result = restCall(auth, 'GET', '/v2/configurations?scope=me&count=100')
if module.params.get('action') == 'listByTag':
if not module.params.get('tag'):
module.fail_json(msg="tag is required param when action=listByTag")
select_tag = module.params.get('tag')
status, result = restCall(auth, 'GET', '/v2/configurations?scope=company&query=name:' + select_tag + '*&tags=true' + '&count=100')
if module.params.get('action') == 'list_networks':
status, result = restCall(auth, 'GET', '/v2/configurations/' +str(module.params.get('environment_id')) + '/networks')
if module.params.get('action') == 'copy':
if not module.params.get('environment_id'):
module.fail_json(msg="environment_id is required param when action=copy")
request_data = {"configuration_id": module.params.get('environment_id')}
if module.params.get('name'):
request_data['name'] = module.params.get('name')
status, result = restCall(auth, 'POST', '/v1/configurations', data=json.dumps(request_data))
if module.params.get('action') == 'wait_ratelimit':
if not module.params.get('environment_id'):
module.fail_json(msg="environment_id is required param when action=wait_ratelimit")
tries = 0
status = -1
while True:
status, result = restCall(auth, 'GET', '/v1/configurations/'+str(module.params.get('environment_id')))
tries = tries + 1
if (status != 423 or status != 422) or tries > 30:
time.sleep(5)
break
time.sleep(5)
# Check results and exit
if status != requests.codes.ok:
err = "No error message given, likely connection or network failure"
if result != None and result.has_key('error'): err = result['error']
module.fail_json(msg="API call failed, HTTP status: "+str(status)+", error: "+err)
else:
module.exit_json(changed=True, api_result=result, status_code=status)
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| 40.60241 | 168 | 0.695252 |
a8722436c8e2845dd408c6ce1090810f0732769e | 764 | py | Python | benchmarks/pymcell4/B5000_autophosphorylation/instantiation.py | mcellteam/mcell-tests | 34d2d967b75d56edbae999bf0090641850f4f4fe | [
"MIT"
] | 1 | 2021-08-13T20:40:54.000Z | 2021-08-13T20:40:54.000Z | benchmarks/pymcell4/B5000_autophosphorylation/instantiation.py | mcellteam/mcell_tests | 34d2d967b75d56edbae999bf0090641850f4f4fe | [
"MIT"
] | null | null | null | benchmarks/pymcell4/B5000_autophosphorylation/instantiation.py | mcellteam/mcell_tests | 34d2d967b75d56edbae999bf0090641850f4f4fe | [
"MIT"
] | null | null | null | # WARNING: This is an automatically generated file and will be overwritten
# by CellBlender on the next model export.
import os
import shared
import mcell as m
from parameters import *
from subsystem import *
MODEL_PATH = os.path.dirname(os.path.abspath(__file__))
# ---- instantiation ----
# ---- release sites ----
# ---- surface classes assignment ----
box = m.geometry_utils.create_box('box', 0.14422)
box.is_bngl_compartment = True
# ---- create instantiation object and add components ----
instantiation = m.Instantiation()
instantiation.add_geometry_object(box)
# load seed species information from bngl file
instantiation.load_bngl_compartments_and_seed_species(os.path.join(MODEL_PATH, 'model.bngl'), None, shared.parameter_overrides)
| 25.466667 | 127 | 0.751309 |
d9f9b8448cfccea2ce1e7e85589539c6dda36276 | 9,488 | py | Python | tests/test_blockchain.py | s7p/bitcoingraph | 0d5edf9957db63c497bffb9d7bc0fb083c05bced | [
"MIT"
] | null | null | null | tests/test_blockchain.py | s7p/bitcoingraph | 0d5edf9957db63c497bffb9d7bc0fb083c05bced | [
"MIT"
] | null | null | null | tests/test_blockchain.py | s7p/bitcoingraph | 0d5edf9957db63c497bffb9d7bc0fb083c05bced | [
"MIT"
] | null | null | null | import unittest
from tests.rpc_mock import BitcoinProxyMock
from bitcoingraph.blockchain import Blockchain, BlockchainException
from bitcoingraph.model import Input, Output
BH1 = "000000000002d01c1fccc21636b607dfd930d31d01c3a62104612a1719011250"
BH1_HEIGHT = 99999
BH2 = "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506"
BH2_HEIGHT = 100000
BH3 = "00000000000080b66c911bd5ba14a74260057311eaeb1982802f7010f1a9f090"
BH3_HEIGHT = 100001
# standard transactions
TX1 = "8c14f0db3df150123e6f3dbbf30f8b955a8249b62ac1d1ff16284aefa3d06d87"
TX2 = "fff2525b8931402dd09222c50775608f75787bd2b87e56995a7bdd30f79702c4"
TX3 = "87a157f3fd88ac7907c05fc55e271dc4acdc5605d187d646604ca8c0e9382e03"
# transaction with unknown output
TXE = "a288fec5559c3f73fd3d93db8e8460562ebfe2fcf04a5114e8d0f2920a6270dc"
# transaction with multiple in and outputs
TXM = "d5f013abf2cf4af6d68bcacd675c91f19bab5b7103b4ac2f4941686eb47da1f0"
class TestBlockchainObject(unittest.TestCase):
def setUp(self):
self.bitcoin_proxy = BitcoinProxyMock()
self.blockchain = Blockchain(self.bitcoin_proxy)
def test_init(self):
self.assertIsNotNone(self.blockchain)
self.assertIsNotNone(self.bitcoin_proxy)
class TestBlock(TestBlockchainObject):
def test_time(self):
block = self.blockchain.get_block_by_hash(BH1)
self.assertEqual(block.timestamp, 1293623731)
def test_time_as_dt(self):
block = self.blockchain.get_block_by_hash(BH1)
self.assertEqual(block.formatted_time(), "2010-12-29 11:55:31")
def test_height(self):
block = self.blockchain.get_block_by_hash(BH1)
self.assertEqual(block.height, BH1_HEIGHT)
def test_hash(self):
block = self.blockchain.get_block_by_hash(BH1)
self.assertEqual(block.hash, BH1)
def test_nextblockhash(self):
block = self.blockchain.get_block_by_hash(BH1)
self.assertTrue(block.has_next_block())
block = self.blockchain.get_block_by_hash(BH3)
self.assertFalse(block.has_next_block())
def hasnextblock(self):
block = self.blockchain.get_block_by_hash(BH1)
self.assertTrue(block.has_next_block())
block = self.blockchain.get_block_by_hash(BH3)
self.assertFalse(block.has_next_block())
def test_nextblock(self):
block = self.blockchain.get_block_by_hash(BH1)
self.assertEqual(block.next_block.height, BH2_HEIGHT)
block = self.blockchain.get_block_by_hash(BH3)
self.assertIsNone(block.next_block)
def test_tx_count(self):
block = self.blockchain.get_block_by_hash(BH1)
self.assertEqual(len(block.transactions), 1)
block = self.blockchain.get_block_by_hash(BH2)
self.assertEqual(len(block.transactions), 4)
def test_tx_ids(self):
block = self.blockchain.get_block_by_hash(BH2)
self.assertTrue(TX1 in [transaction.txid for transaction in block.transactions])
def test_transactions(self):
block = self.blockchain.get_block_by_hash(BH1)
txs = [tx for tx in block.transactions]
self.assertEqual(len(txs), 1)
for tx in txs:
self.assertIsNotNone(tx.txid)
block = self.blockchain.get_block_by_hash(BH2)
txs = [tx for tx in block.transactions]
self.assertEqual(len(txs), 4)
for tx in txs:
self.assertIsNotNone(tx.txid)
def test_difficulty(self):
block = self.blockchain.get_block_by_hash(BH1)
self.assertAlmostEqual(block.difficulty, 14484.1623612254)
def test_prev_hash(self):
block = self.blockchain.get_block_by_hash(BH1)
self.assertEqual(block.previous_block.hash, "0000000000002103637910d267190996687fb095880d432c6531a527c8ec53d1")
class TestTxInput(TestBlockchainObject):
def test_is_coinbase(self):
tx = self.blockchain.get_transaction(TX1)
tx_input = tx.inputs[0]
self.assertTrue(tx_input.is_coinbase)
def test_is_not_coinbase(self):
tx = self.blockchain.get_transaction(TX2)
tx_input = tx.inputs[0]
self.assertFalse(tx_input.is_coinbase)
def test_prev_tx_hash(self):
tx = self.blockchain.get_transaction(TX2)
tx_input = tx.inputs[0]
self.assertEqual(tx_input.output_reference['txid'], TX3)
def test_prev_tx_coinbase(self):
tx = self.blockchain.get_transaction(TX1)
tx_input = tx.inputs[0]
self.assertIsNone(tx_input.output_reference)
def test_tx_output_index(self):
tx = self.blockchain.get_transaction(TX2)
tx_input = tx.inputs[0]
self.assertEqual(tx_input.output_reference['vout'], 0)
def test_prev_tx_output(self):
tx = self.blockchain.get_transaction(TX2)
tx_input = tx.inputs[0]
prev_tx_output = tx_input.output
self.assertIsNotNone(prev_tx_output)
def test_addresses(self):
tx = self.blockchain.get_transaction(TX2)
tx_input = tx.inputs[0]
self.assertEqual("12bCGuqBso4K8pKoNFXHUR4eMZg8j4xqN3",
tx_input.output.addresses[0])
class TestTxOutput(TestBlockchainObject):
def test_index(self):
tx = self.blockchain.get_transaction(TX2)
self.assertEqual(0, tx.outputs[0].index)
self.assertEqual(1, tx.outputs[1].index)
def test_value(self):
tx = self.blockchain.get_transaction(TX2)
self.assertEqual(5.56000000, tx.outputs[0].value)
self.assertEqual(44.44000000, tx.outputs[1].value)
def test_addresses(self):
tx = self.blockchain.get_transaction(TX2)
self.assertEqual("1JqDybm2nWTENrHvMyafbSXXtTk5Uv5QAn",
tx.outputs[0].addresses[0])
self.assertEqual("1EYTGtG4LnFfiMvjJdsU7GMGCQvsRSjYhx",
tx.outputs[1].addresses[0])
def test_empty_addresses(self):
"""
Test if empty list is return when no output addresses are present.
"""
tx = self.blockchain.get_transaction(TXE)
self.assertEqual(["pk_0469b7eaf1cca8a7c8592ad49313b4cb6474a845604456d48b4b252904e1d61ceda95ac987ad163e957bdbd2da2736861fbfad93dbf8e0a218308a49d94ab9a077"],
tx.outputs[0].addresses)
self.assertFalse(tx.outputs[1].addresses)
class TestTransaction(TestBlockchainObject):
def test_blocktime(self):
tx = self.blockchain.get_transaction(TX1)
self.assertEqual(tx.block.timestamp, 1293623863)
def test_blocktime_as_dt(self):
tx = self.blockchain.get_transaction(TX1)
self.assertEqual(tx.block.formatted_time(), "2010-12-29 11:57:43")
def test_id(self):
tx = self.blockchain.get_transaction(TX1)
self.assertEqual(tx.txid, TX1)
def test_get_input_count(self):
tx = self.blockchain.get_transaction(TX1)
self.assertEqual(len(tx.inputs), 1)
tx = self.blockchain.get_transaction(TX2)
self.assertEqual(len(tx.inputs), 1)
def test_get_inputs(self):
tx = self.blockchain.get_transaction(TX1)
for tx_input in tx.inputs:
self.assertIsInstance(tx_input, Input)
def test_is_coinbase_tx(self):
self.assertTrue(self.blockchain.get_transaction(TX1).is_coinbase())
self.assertFalse(self.blockchain.get_transaction(TX2).is_coinbase())
def test_get_output_count(self):
tx = self.blockchain.get_transaction(TX1)
self.assertEqual(len(tx.outputs), 1)
tx = self.blockchain.get_transaction(TX2)
self.assertEqual(len(tx.outputs), 2)
def test_get_outputs(self):
tx = self.blockchain.get_transaction(TX1)
for tx_output in tx.outputs:
self.assertIsInstance(tx_output, Output)
class TestBlockchain(TestBlockchainObject):
def test_get_block_by_hash(self):
block = self.blockchain.get_block_by_hash(BH1)
self.assertEqual(block.hash, BH1)
def test_get_block_by_height(self):
block = self.blockchain.get_block_by_height(BH1_HEIGHT)
self.assertEqual(block.height, BH1_HEIGHT)
def test_get_blocks_in_range(self):
blocks = [block for block in self.blockchain.get_blocks_in_range(
99999, 100001)]
self.assertEqual(len(blocks), 3)
self.assertEqual(blocks[0].height, 99999)
self.assertEqual(blocks[1].height, 100000)
self.assertEqual(blocks[2].height, 100001)
def test_get_transaction(self):
tx = self.blockchain.get_transaction(TX1)
self.assertEqual(tx.txid, TX1)
def test_get_transactions(self):
tx_ids = [TX1, TX2]
txs = self.blockchain.get_transactions(tx_ids)
self.assertEqual(2, len(txs))
def test_get_max_blockheight(self):
max_height = self.blockchain.get_max_block_height()
self.assertEqual(max_height, 100001)
def test_exceptions(self):
with self.assertRaises(BlockchainException) as cm:
self.blockchain.get_block_by_hash("aa")
self.assertEqual("Cannot retrieve block aa", cm.exception.msg)
with self.assertRaises(BlockchainException) as cm:
self.blockchain.get_block_by_height(123)
self.assertEqual("Cannot retrieve block with height 123",
cm.exception.msg)
with self.assertRaises(BlockchainException) as cm:
self.blockchain.get_transaction("bb")
self.assertEqual("Cannot retrieve transaction with id bb",
cm.exception.msg)
| 36.492308 | 163 | 0.701518 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.