hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
794dd64e84dcbe00dc7ecff4251e7f009f045d9e
| 3,506
|
py
|
Python
|
demo/GridLabelRenderer.py
|
ferren/pyXterm
|
b4444cf90893089423295b8792173f0c08b73b52
|
[
"Apache-2.0"
] | 3
|
2018-03-19T07:57:10.000Z
|
2021-07-05T08:55:14.000Z
|
demo/GridLabelRenderer.py
|
ferren/pyXterm
|
b4444cf90893089423295b8792173f0c08b73b52
|
[
"Apache-2.0"
] | 6
|
2020-03-24T15:40:18.000Z
|
2021-12-13T19:46:09.000Z
|
demo/GridLabelRenderer.py
|
ferren/pyXterm
|
b4444cf90893089423295b8792173f0c08b73b52
|
[
"Apache-2.0"
] | 4
|
2018-03-29T21:59:55.000Z
|
2019-12-16T14:56:38.000Z
|
#!/usr/bin/env python
import wx
import wx.grid as grid
import wx.lib.mixins.gridlabelrenderer as glr
#----------------------------------------------------------------------
class MyGrid(grid.Grid, glr.GridWithLabelRenderersMixin):
def __init__(self, *args, **kw):
grid.Grid.__init__(self, *args, **kw)
glr.GridWithLabelRenderersMixin.__init__(self)
class MyRowLabelRenderer(glr.GridLabelRenderer):
def __init__(self, bgcolor):
self._bgcolor = bgcolor
def Draw(self, grid, dc, rect, row):
dc.SetBrush(wx.Brush(self._bgcolor))
dc.SetPen(wx.TRANSPARENT_PEN)
dc.DrawRectangle(rect)
hAlign, vAlign = grid.GetRowLabelAlignment()
text = grid.GetRowLabelValue(row)
self.DrawBorder(grid, dc, rect)
self.DrawText(grid, dc, rect, text, hAlign, vAlign)
class MyColLabelRenderer(glr.GridLabelRenderer):
def __init__(self, bgcolor):
self._bgcolor = bgcolor
def Draw(self, grid, dc, rect, col):
dc.SetBrush(wx.Brush(self._bgcolor))
dc.SetPen(wx.TRANSPARENT_PEN)
dc.DrawRectangle(rect)
hAlign, vAlign = grid.GetColLabelAlignment()
text = grid.GetColLabelValue(col)
self.DrawBorder(grid, dc, rect)
self.DrawText(grid, dc, rect, text, hAlign, vAlign)
class MyCornerLabelRenderer(glr.GridLabelRenderer):
def __init__(self):
import images
self._bmp = images.Smiles.GetBitmap()
def Draw(self, grid, dc, rect, rc):
x = rect.left + (rect.width - self._bmp.GetWidth()) / 2
y = rect.top + (rect.height - self._bmp.GetHeight()) / 2
dc.DrawBitmap(self._bmp, x, y, True)
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
ROWS = 27
COLS = 15
g = MyGrid(self, size=(100,100))
g.CreateGrid(ROWS, COLS)
g.SetCornerLabelRenderer(MyCornerLabelRenderer())
for row in range(0, ROWS, 3):
g.SetRowLabelRenderer(row+0, MyRowLabelRenderer('#ffe0e0'))
g.SetRowLabelRenderer(row+1, MyRowLabelRenderer('#e0ffe0'))
g.SetRowLabelRenderer(row+2, MyRowLabelRenderer('#e0e0ff'))
for col in range(0, COLS, 3):
g.SetColLabelRenderer(col+0, MyColLabelRenderer('#e0ffe0'))
g.SetColLabelRenderer(col+1, MyColLabelRenderer('#e0e0ff'))
g.SetColLabelRenderer(col+2, MyColLabelRenderer('#ffe0e0'))
self.Sizer = wx.BoxSizer()
self.Sizer.Add(g, 1, wx.EXPAND)
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = """<html><body>
<h2><center>GridLabelRenderer</h2>
The <tt>wx.lib.mixins.gridlabelrenderer</tt> module provides a mixin
class for wx.grid.Grid that enables it to have plugin renderers that
work like the normal cell renderers do. If desired you can specify a
different renderer for each row or col label, and even for the little
corner label in the upper left corner of the grid. When each of those
labels needs to be drawn the mixin calls the render's Draw method with
the dc and rectangle, allowing your renderer class to do just about
anything that it wants.
</body></html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| 30.754386
| 71
| 0.62065
|
794dd6ed81a137cae34ae23fc45a6b24d618c83c
| 366
|
py
|
Python
|
experiments/jacobi-2d/tmp_files/7161.py
|
LoopTilingBenchmark/benchmark
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
[
"BSD-2-Clause"
] | null | null | null |
experiments/jacobi-2d/tmp_files/7161.py
|
LoopTilingBenchmark/benchmark
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
[
"BSD-2-Clause"
] | null | null | null |
experiments/jacobi-2d/tmp_files/7161.py
|
LoopTilingBenchmark/benchmark
|
52a3d2e70216552a498fd91de02a2fa9cb62122c
|
[
"BSD-2-Clause"
] | null | null | null |
from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/jacobi-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/jacobi-2d/tmp_files/7161.c')
procedure('kernel_jacobi_2d')
loop(0)
known(' n > 2 ')
tile(0,2,64,2)
tile(0,4,16,4)
tile(1,2,64,2)
tile(1,4,16,4)
| 28.153846
| 118
| 0.751366
|
794dd89781cf82c742bf2d8fef7c3186339c8ccd
| 8,027
|
py
|
Python
|
train/bert.py
|
benywon/ComQA
|
6731d63d16b731d6c3654b2dc7d2503cf333127f
|
[
"Apache-2.0"
] | 15
|
2021-01-16T09:48:56.000Z
|
2022-03-12T07:22:40.000Z
|
train/bert.py
|
benywon/ComQA
|
6731d63d16b731d6c3654b2dc7d2503cf333127f
|
[
"Apache-2.0"
] | null | null | null |
train/bert.py
|
benywon/ComQA
|
6731d63d16b731d6c3654b2dc7d2503cf333127f
|
[
"Apache-2.0"
] | 5
|
2021-01-16T10:15:56.000Z
|
2021-11-19T06:30:25.000Z
|
# -*- coding: utf-8 -*-
"""
@Time : 2021/1/14 下午5:34
@FileName: bert.py
@author: 王炳宁
@contact: wangbingning@sogou-inc.com
"""
import sys
import time
import apex
import torch
import torch.distributed as dist
from apex import amp
sys.path.append('..')
from modules.BERT import Bert
from train.parser import get_argument_parser
from utils import *
np.random.seed(1000)
torch.manual_seed(1024)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args = get_argument_parser()
print(args.local_rank, dist.get_rank(), dist.get_world_size())
torch.cuda.set_device(args.local_rank)
vocab_size = 50000
n_embedding = 128
n_hidden = 768
n_layer = 12
n_head = 12
batch_size = 8
max_learning_rate = 4e-5
doc_max_length_size = 1024
train_data = load_file(args.train_file_path)
dev_data = load_file(args.dev_file_path)
dev_data = sorted(dev_data, key=lambda x: len(x[0]))
remove_data_size = len(dev_data) % dist.get_world_size()
thread_dev_data = [dev_data[x + args.local_rank] for x in
range(0, len(dev_data) - remove_data_size, dist.get_world_size())]
print('train data size is {} test size {}'.format(len(train_data), len(dev_data)))
model = Bert(vocab_size, n_embedding, n_hidden, n_layer, n_head)
filename = args.pretrain_model
state_dict = load_file(filename)
for name, para in model.named_parameters():
if name not in state_dict:
if dist.get_rank() == 0:
print('{} not load'.format(name))
continue
para.data = torch.FloatTensor(state_dict[name])
print('model size {}'.format(get_model_parameters(model)))
model.cuda()
if args.optimizer.lower() == 'adam':
optimizer = apex.optimizers.FusedLAMB
elif args.optimizer.lower() == 'lamb':
optimizer = apex.optimizers.FusedLAMB
else:
optimizer = apex.optimizers.FusedSGD
optim = optimizer(
model.parameters(),
eps=2.0e-7,
lr=1.0e-7,
)
model, optim = amp.initialize(model, optim, opt_level="O2", verbosity=0)
model = apex.parallel.DistributedDataParallel(model)
warm_up_steps = 500
lr_opt_steps = max_learning_rate / 1000000
warm_up_lr_opt_steps = max_learning_rate / warm_up_steps
def metric_sum(val):
tensor = torch.tensor(val).cuda()
dist.reduce(tensor, 0)
return tensor.item()
def metric_mean(val):
tensor = torch.tensor(val).cuda()
dist.reduce(tensor, 0)
return tensor.item() / dist.get_world_size()
def get_shuffle_train_data():
pool = {}
for one in train_data:
length = len(one[0]) // 5
if length not in pool:
pool[length] = []
pool[length].append(one)
for one in pool:
np.random.shuffle(pool[one])
length_lst = list(pool.keys())
np.random.shuffle(length_lst)
whole_data = [x for y in length_lst for x in pool[y]]
remove_data_size = len(whole_data) % dist.get_world_size()
thread_data = [whole_data[x + args.local_rank] for x in
range(0, len(whole_data) - remove_data_size, dist.get_world_size())]
return thread_data
def get_train_data(batch, max_len=doc_max_length_size):
batch, _ = padding(batch, max_len=max_len)
seq = batch.flatten()
real_end_pos = np.where(seq == -1)[0]
np.put(seq, real_end_pos, vocab_size)
all_end_pos_seq = np.where(seq == vocab_size)[0]
label = np.zeros(shape=len(all_end_pos_seq), dtype='float32')
for i, j in enumerate(all_end_pos_seq):
if j in real_end_pos:
label[i] = 1
batch = seq.reshape(batch.shape)
return batch, label
current_number = 0
update_number = 0
def evaluation(epo):
results = []
for i in range(dist.get_world_size()):
results.extend(load_file('{}.tmp.obj'.format(i)))
os.remove('{}.tmp.obj'.format(i))
print('epoch:{},total:{}'.format(epo, len(results)))
threshold = 0.5
precision, recall, f1, macro_f1, accuracy = evaluate_comqa(results, threshold)
print('threshold:{}\nprecision:{}\nrecall:{}\nf1:{}\nmacro_f1:{}\naccuracy:{}\n{}'.format(
threshold, precision,
recall, f1,
macro_f1, accuracy,
'===' * 10))
return [precision, recall, macro_f1, f1, accuracy]
def dev(epo):
model.eval()
total = len(thread_dev_data)
results = []
with torch.no_grad():
for i in tqdm(range(0, total, batch_size)):
sample = thread_dev_data[i:i + batch_size]
context_raw = [x[0] for x in sample]
paras = [x[1] for x in sample]
batch, label = get_train_data(context_raw, 1024)
batch = torch.LongTensor(batch)
mask_idx = torch.eq(batch, vocab_size)
answer_logits = model([batch.cuda(), None])
end_num = mask_idx.sum(1).data.numpy().tolist()
answer_logits = answer_logits.cpu().data.numpy().tolist()
start = 0
for one_sent_end_num, para in zip(end_num, paras):
pred = answer_logits[start:start + one_sent_end_num]
results.append([pred, para])
start += one_sent_end_num
dump_file(results, '{}.tmp.obj'.format(dist.get_rank()))
dist.barrier()
if dist.get_rank() == 0:
return evaluation(epo)
return None
def train(epo):
global current_number, update_number
model.train()
data = get_shuffle_train_data()
total = len(data)
total_loss = 0
num = 0
pre_time = None
instance_number = 0
for i in range(0, total, batch_size):
context = [x[0] for x in data[i:i + batch_size]]
batch, label = get_train_data(context)
batch = torch.LongTensor(batch)
loss = model([batch.cuda(), torch.FloatTensor(label).cuda()])
with amp.scale_loss(loss, optim) as scaled_loss:
scaled_loss.backward()
total_loss += loss.item() * len(context)
instance_number += len(context)
optim.step()
optim.zero_grad()
update_number += 1
for param_group in optim.param_groups:
if update_number > warm_up_steps:
param_group['lr'] -= lr_opt_steps
else:
param_group['lr'] += warm_up_lr_opt_steps
num += 1
if num % args.log_interval == 0:
if pre_time is None:
eclipse = 0
else:
eclipse = time.time() - pre_time
total_loss = metric_sum(total_loss)
instance_number = metric_sum(instance_number)
if dist.get_rank() == 0:
print(
'epoch {}, mask loss is {:5.4f}, ms per batch is {:7.4f}, eclipse {:4.3f}% lr={:e}'.format(epo,
total_loss / instance_number,
1000 * eclipse / instance_number,
i * 100 / total,
optim.param_groups[
0][
'lr']))
pre_time = time.time()
total_loss = 0
instance_number = 0
if __name__ == '__main__':
results = []
best_f1 = 0
for i in range(args.epoch):
train(i)
results = dev(i)
output = {}
if dist.get_rank() == 0:
print('epoch {} done!! result is {}'.format(i, results))
if results[2] > best_f1:
best_f1 = results[2]
for name, param in model.module.named_parameters():
output[name] = param.data.cpu().numpy()
dump_file(output, args.model_save_path)
| 34.303419
| 145
| 0.572817
|
794dd8fe041edced5cc7397a91312f47fed7167e
| 3,723
|
py
|
Python
|
lib/galaxy/webapps/galaxy/api/roles.py
|
Nerdinacan/galaxy
|
4b07545d251622dde24d34b62d3d9a857e232780
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/webapps/galaxy/api/roles.py
|
Nerdinacan/galaxy
|
4b07545d251622dde24d34b62d3d9a857e232780
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/webapps/galaxy/api/roles.py
|
Nerdinacan/galaxy
|
4b07545d251622dde24d34b62d3d9a857e232780
|
[
"CC-BY-3.0"
] | null | null | null |
"""
API operations on Role objects.
"""
import logging
from sqlalchemy import false
from galaxy import web
from galaxy.webapps.base.controller import BaseAPIController, url_for
log = logging.getLogger(__name__)
class RoleAPIController(BaseAPIController):
@web.legacy_expose_api
def index(self, trans, **kwd):
"""
GET /api/roles
Displays a collection (list) of roles.
"""
rval = []
for role in trans.sa_session.query(trans.app.model.Role).filter(trans.app.model.Role.table.c.deleted == false()):
if trans.user_is_admin or trans.app.security_agent.ok_to_display(trans.user, role):
item = role.to_dict(value_mapper={'id': trans.security.encode_id})
encoded_id = trans.security.encode_id(role.id)
item['url'] = url_for('role', id=encoded_id)
rval.append(item)
return rval
@web.legacy_expose_api
def show(self, trans, id, **kwd):
"""
GET /api/roles/{encoded_role_id}
Displays information about a role.
"""
role_id = id
try:
decoded_role_id = trans.security.decode_id(role_id)
except Exception:
trans.response.status = 400
return "Malformed role id ( %s ) specified, unable to decode." % str(role_id)
try:
role = trans.sa_session.query(trans.app.model.Role).get(decoded_role_id)
except Exception:
role = None
if not role or not (trans.user_is_admin or trans.app.security_agent.ok_to_display(trans.user, role)):
trans.response.status = 400
return "Invalid role id ( %s ) specified." % str(role_id)
item = role.to_dict(view='element', value_mapper={'id': trans.security.encode_id})
item['url'] = url_for('role', id=role_id)
return item
@web.legacy_expose_api
def create(self, trans, payload=None, **kwd):
"""
POST /api/roles
Creates a new role.
"""
payload = payload or {}
if not trans.user_is_admin:
trans.response.status = 403
return "You are not authorized to create a new role."
name = payload.get('name', None)
description = payload.get('description', None)
if not name or not description:
trans.response.status = 400
return "Enter a valid name and a description"
if trans.sa_session.query(trans.app.model.Role).filter(trans.app.model.Role.table.c.name == name).first():
trans.response.status = 400
return "A role with that name already exists"
role_type = trans.app.model.Role.types.ADMIN # TODO: allow non-admins to create roles
role = trans.app.model.Role(name=name, description=description, type=role_type)
trans.sa_session.add(role)
user_ids = payload.get('user_ids', [])
users = [trans.sa_session.query(trans.model.User).get(trans.security.decode_id(i)) for i in user_ids]
group_ids = payload.get('group_ids', [])
groups = [trans.sa_session.query(trans.model.Group).get(trans.security.decode_id(i)) for i in group_ids]
# Create the UserRoleAssociations
for user in users:
trans.app.security_agent.associate_user_role(user, role)
# Create the GroupRoleAssociations
for group in groups:
trans.app.security_agent.associate_group_role(group, role)
trans.sa_session.flush()
encoded_id = trans.security.encode_id(role.id)
item = role.to_dict(view='element', value_mapper={'id': trans.security.encode_id})
item['url'] = url_for('role', id=encoded_id)
return [item]
| 39.189474
| 121
| 0.6296
|
794dd9575af46b084a69857ec2db88fc21819ff2
| 6,856
|
py
|
Python
|
lecluvindex.py
|
cganterh/lecluvindex
|
b2cd082d460ddb0ca4d7d582fead9babcd457516
|
[
"MIT"
] | null | null | null |
lecluvindex.py
|
cganterh/lecluvindex
|
b2cd082d460ddb0ca4d7d582fead9babcd457516
|
[
"MIT"
] | 2
|
2019-10-18T15:59:38.000Z
|
2021-06-01T22:10:19.000Z
|
lecluvindex.py
|
cganterh/lecluvindex
|
b2cd082d460ddb0ca4d7d582fead9babcd457516
|
[
"MIT"
] | null | null | null |
"""Provide UV index reporting capabilities to Le bot."""
from contextlib import contextmanager
from datetime import time
from logging import getLogger
import requests
from telegram.ext import CommandHandler
from telegram.ext.jobqueue import Days
logger = getLogger()
places = {
'antartica': 'ANTÁRTICA',
'antofagasta': 'ANTOFAGASTA',
'arica': 'ARICA (UNIVERSIDAD DE TARAPACÁ)',
'caldera': 'CALDERA',
'concepcion': 'CONCEPCIÓN',
'cordillera_rm': 'CORDILLERA REGIÓN METROPOLITANA',
'coyhaique': 'COYHAIQUE',
'tololo': 'EL TOLOLO',
'iquique': 'IQUIQUE',
'isla_pascua': 'ISLA DE PASCUA',
'serena': 'LA SERENA',
'litoral_central': 'LITORAL CENTRAL',
'pmontt': 'PUERTO MONTT',
'parenas': 'PUNTA ARENAS',
'rancagua': 'RANCAGUA',
'san_pedro': 'SAN PEDRO DE ATACAMA',
'santiago': 'SANTIAGO',
'talca': 'TALCA (UNIVERSIDAD AUTONOMA)',
'temuco': 'TEMUCO (UNIVERSIDAD CATÓLICA DE TEMUCO)',
'chillan': 'TERMAS DE CHILLÁN',
'valdivia': 'VALDIVIA',
}
place_ids = places.keys()
comma_sep_place_ids = ', '.join(place_ids)
valid_places_msg_fragment = 'Valid places are: {}.'.format(comma_sep_place_ids)
@contextmanager
def handle_unhandled_exceptions(bot, chat_id):
"""Handle unhandled exceptions."""
try:
yield
except Exception:
logger.exception('Unhandled exception!')
bot.sendMessage(chat_id=chat_id, text='Something went wrong!')
def print_uv_index(bot, chat_id, place):
"""Print the UV index corresponding to ``place``."""
try:
response = requests.get(
'http://archivos.meteochile.gob.cl/portaldmc/meteochile/js/'
'indice_radiacion.json'
)
data = response.json()
radiation_data = data['RadiacionUVB']
radiation_stgo = next(
filter(lambda p: p['nombre'] == places[place], radiation_data)
)
date = radiation_stgo['fechapron']
index = radiation_stgo['indicepron'].split(':')
text = 'Pronostico Dia: {}. UV index: {}({})'.format(
date, index[0], index[1])
bot.sendMessage(chat_id=chat_id, text=text)
except KeyError:
if place not in places:
error_message_template = (
'{place} is not a valid place. {valid_places_msg_fragment}')
error_message = error_message_template.format(
place=place,
valid_places_msg_fragment=valid_places_msg_fragment
)
bot.sendMessage(chat_id=chat_id, text=error_message)
else:
raise
def get_uv_index(bot, update, args):
"""Parse args to extract a place and print the place's uv index."""
with handle_unhandled_exceptions(bot, update.message.chat_id):
try:
print_uv_index(bot, update.message.chat_id, args[0])
except IndexError:
if len(args) < 1:
place_ids = places.keys()
comma_sep_ids = ', '.join(place_ids)
error_message_template = (
'You have to specify a place. Valid places are: {}.')
error_message = error_message_template.format(comma_sep_ids)
bot.sendMessage(
chat_id=update.message.chat_id, text=error_message)
else:
raise
def callback_uv_index(bot, job):
"""Print UV index for the corresponding job context.
``job.context`` shoud be a tuple of the form: ``(chat_id, place)``
"""
print_uv_index(bot, *job.context)
def start_uv_index(bot, update, job_queue, args):
"""Schedule a calls to ``callback_uv_index``."""
with handle_unhandled_exceptions(bot, update.message.chat_id):
try:
hour = int(args[0])
minute = int(args[1])
place = args[2]
if place not in places:
raise InvalidPlace
job_queue.run_daily(
callback_uv_index,
time=time(hour, minute),
days=(Days.MON, Days.TUE, Days.WED, Days.THU, Days.FRI),
context=(update.message.chat_id, place),
name='UV Index Daily Report'
)
bot.sendMessage(
chat_id=update.message.chat_id,
text='Initiating UV Index daily report.'
)
except IndexError:
if len(args) is not 3:
error_message_template = (
'You have to pass the hour, minute and place in the '
'format: 12 59 santiago. {valid_places_msg_fragment}'
)
error_message = error_message_template.format(
valid_places_msg_fragment=valid_places_msg_fragment)
bot.sendMessage(
chat_id=update.message.chat_id, text=error_message)
else:
raise
except InvalidPlace:
error_message_template = (
'You entered an invalid place. {valid_places_msg_fragment}'
)
error_message = error_message_template.format(
valid_places_msg_fragment=valid_places_msg_fragment)
bot.sendMessage(
chat_id=update.message.chat_id, text=error_message)
except ValueError as ve:
if 'invalid literal for int' in str(ve):
text = (
'You seem to have entered a wrong integer number. Check '
'that you are passing well formatted integers and that '
'the parameters in the correct order (hour, minute, '
'place).'
)
bot.send_message(update.message.chat_id, text)
else:
raise
def stop_uv_index(bot, update, job_queue):
"""Stop an UV index report job."""
with handle_unhandled_exceptions(bot, update.message.chat_id):
for j in job_queue.jobs():
if j.name == 'UV Index Daily Report' and \
j.context[0] == update.message.chat_id:
j.schedule_removal()
bot.sendMessage(
chat_id=update.message.chat_id,
text='Canceled all UV Index daily reports.'
)
send_handler = CommandHandler('uvindex', get_uv_index, pass_args=True)
start_handler = CommandHandler(
'start_uvindex', start_uv_index, pass_args=True, pass_job_queue=True)
stop_handler = CommandHandler(
'stop_uvindex', stop_uv_index, pass_job_queue=True)
class InvalidPlace(ValueError):
"""Raise when the user enters an invalid place."""
def __init__(self, info=None):
"""Initialize the error."""
message = 'An invalid place was passed.'
super().__init__(message + ' ' + info if info else message)
| 30.744395
| 79
| 0.596849
|
794dd982dd7bf0c9f21bab506fcca95965de6d27
| 8,021
|
py
|
Python
|
tests/secret_links/test_sharing.py
|
wgresshoff/invenio-rdm-records
|
91945829884ea4e46b05be26c97f11ffd045bcec
|
[
"MIT"
] | null | null | null |
tests/secret_links/test_sharing.py
|
wgresshoff/invenio-rdm-records
|
91945829884ea4e46b05be26c97f11ffd045bcec
|
[
"MIT"
] | null | null | null |
tests/secret_links/test_sharing.py
|
wgresshoff/invenio-rdm-records
|
91945829884ea4e46b05be26c97f11ffd045bcec
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CERN.
# Copyright (C) 2021 TU Wien.
# Copyright (C) 2021 Northwestern University.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test sharing of restricted records via secret links."""
from io import BytesIO
import pytest
from flask_principal import AnonymousIdentity, Identity, UserNeed
from invenio_access.permissions import any_user, authenticated_user
from invenio_db import db
from invenio_records_resources.services.errors import PermissionDeniedError
from marshmallow.exceptions import ValidationError
from invenio_rdm_records.proxies import current_rdm_records
from invenio_rdm_records.records import RDMRecord
from invenio_rdm_records.secret_links.permissions import LinkNeed
@pytest.fixture()
def service(running_app, es_clear):
"""RDM Record Service."""
return running_app.app.extensions['invenio-rdm-records'].records_service
@pytest.fixture()
def restricted_record(service, minimal_record, identity_simple):
"""Restricted record fixture."""
data = minimal_record.copy()
data["files"]["enabled"] = True
data["access"]["record"] = "restricted"
data["access"]["files"] = "restricted"
# Create
draft = service.create(identity_simple, data)
# Add a file
service.draft_files.init_files(
draft.id, identity_simple, data=[{'key': 'test.pdf'}])
service.draft_files.set_file_content(
draft.id, 'test.pdf', identity_simple, BytesIO(b'test file')
)
service.draft_files.commit_file(
draft.id, 'test.pdf', identity_simple)
# Publish
record = service.publish(draft.id, identity_simple)
# Put in edit mode so that draft exists
draft = service.edit(draft.id, identity_simple)
return record
def test_invalid_level(service, restricted_record, identity_simple):
"""Test invalid permission level."""
record = restricted_record
with pytest.raises(ValidationError):
service.secret_links.create(record.id, identity_simple, {
"permission": "invalid"})
def test_permission_levels(
service, restricted_record, identity_simple, client):
"""Test invalid permission level."""
id_ = restricted_record.id
view_link = service.secret_links.create(
id_, identity_simple, {"permission": "view"})
preview_link = service.secret_links.create(
id_, identity_simple, {"permission": "preview"})
edit_link = service.secret_links.create(
id_, identity_simple, {"permission": "edit"})
# == Anonymous user
anon = AnonymousIdentity()
anon.provides.add(any_user)
# Deny anonymous to read restricted record and draft
pytest.raises(PermissionDeniedError, service.read, id_, anon)
pytest.raises(PermissionDeniedError, service.files.list_files, id_, anon)
pytest.raises(PermissionDeniedError, service.read_draft, id_, anon)
pytest.raises(
PermissionDeniedError, service.draft_files.list_files, id_, anon)
# === Anonymous user with view link ===
anon.provides.add(LinkNeed(view_link.id))
# Allow anonymous with view link to read record
service.read(id_, anon)
service.files.list_files(id_, anon)
# Deny anonymous with view link to read draft
pytest.raises(PermissionDeniedError, service.read_draft, id_, anon)
pytest.raises(
PermissionDeniedError, service.draft_files.list_files, id_, anon)
# === Anonymous user with preview link ===
anon.provides.remove(LinkNeed(view_link.id))
anon.provides.add(LinkNeed(preview_link.id))
# Allow anonymous with preview link to read record and draft
service.read(id_, anon)
service.files.list_files(id_, anon)
service.read_draft(id_, anon)
service.draft_files.list_files(id_, anon)
service.draft_files.get_file_content(id_, 'test.pdf', anon)
service.draft_files.read_file_metadata(id_, 'test.pdf', anon)
# Deny anonymous with preview link to update/delete/edit/publish draft
pytest.raises(PermissionDeniedError, service.update_draft, id_, anon, {})
pytest.raises(PermissionDeniedError, service.edit, id_, anon)
pytest.raises(PermissionDeniedError, service.delete_draft, id_, anon)
pytest.raises(PermissionDeniedError, service.new_version, id_, anon)
pytest.raises(PermissionDeniedError, service.publish, id_, anon)
pytest.raises(
PermissionDeniedError,
service.draft_files.init_files, id_, anon, {})
pytest.raises(
PermissionDeniedError,
service.draft_files.update_file_metadata, id_, 'test.pdf', anon, {})
pytest.raises(
PermissionDeniedError,
service.draft_files.commit_file, id_, 'test.pdf', anon)
pytest.raises(
PermissionDeniedError,
service.draft_files.delete_file, id_, 'test.pdf', anon)
pytest.raises(
PermissionDeniedError,
service.draft_files.delete_all_files, id_, anon)
pytest.raises(
PermissionDeniedError,
service.draft_files.set_file_content, id_, 'test.pdf', anon, None)
# === Authenticated user with edit link ===
i = Identity(100)
i.provides.add(UserNeed(100))
i.provides.add(authenticated_user)
i.provides.add(LinkNeed(edit_link.id))
# Allow user with edit link to read record and draft
service.read(id_, i)
service.files.list_files(id_, i)
service.read_draft(id_, i)
service.draft_files.list_files(id_, i)
service.draft_files.get_file_content(id_, 'test.pdf', i)
service.draft_files.read_file_metadata(id_, 'test.pdf', i)
# Deny user with edit link to share the links
pytest.raises(
PermissionDeniedError,
service.secret_links.create, id_, i, {})
pytest.raises(
PermissionDeniedError, service.secret_links.read_all,
id_, i)
pytest.raises(
PermissionDeniedError, service.secret_links.read,
id_, i, edit_link.id)
pytest.raises(
PermissionDeniedError,
service.secret_links.update, id_, i, edit_link.id,
{})
pytest.raises(
PermissionDeniedError,
service.secret_links.delete, id_, i, edit_link.id)
# Allow user with edit link to update, delete, edit, publish
draft = service.read_draft(id_, i)
data = draft.data
data['metadata']['title'] = 'allow it'
service.update_draft(id_, i, data)
service.delete_draft(id_, i)
service.edit(id_, i)
service.publish(id_, i)
new_draft = service.new_version(id_, i)
new_id = new_draft.id
service.import_files(new_id, i)
service.draft_files.delete_file(new_id, 'test.pdf', i)
def test_read_restricted_record_with_secret_link(
service, minimal_record, identity_simple, client
):
"""Test access to a restricted record via a shared link."""
record_data = minimal_record.copy()
record_data["access"]["files"] = "restricted"
record_data["access"]["record"] = "restricted"
draft = service.create(identity=identity_simple, data=record_data)
record = service.publish(id_=draft.id, identity=identity_simple)
recid = record.id
link = record._record.parent.access.links.create(
permission_level="view",
)
# FIXME without this, commit() won't work (b/c of jsonschema)
record._record.pop("status", None)
record._record.commit()
record._record.parent.commit()
db.session.commit()
# the record shouldn't be accessible without login and/or token
response = client.get(f"/records/{recid}")
assert response.status_code == 403
# but it should be accessible with the token
response = client.get(
f"/records/{recid}",
query_string={"token": link.token},
)
assert response.status_code == 200
# the record shouldn't be showing up in search results, however
RDMRecord.index.refresh()
res = client.get(
"/records", query_string={"q": f"id:{recid}"}
)
assert res.status_code == 200
assert res.json["hits"]["total"] == 0
| 35.49115
| 77
| 0.705772
|
794dd9c4b37030dd9336cd310a69b2b5b684ac82
| 6,661
|
py
|
Python
|
plenum/test/view_change/test_view_change_timeout.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/view_change/test_view_change_timeout.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 1
|
2019-03-20T14:57:22.000Z
|
2019-03-20T15:01:55.000Z
|
plenum/test/view_change/test_view_change_timeout.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from plenum.test.delayers import vcd_delay
from plenum.test.stasher import delay_rules
from plenum.test.helper import waitForViewChange, perf_monitor_disabled, view_change_timeout
from plenum.test.node_request.helper import sdk_ensure_pool_functional
from plenum.test.node_catchup.helper import ensure_all_nodes_have_same_data
from plenum.test.spy_helpers import get_count, getAllReturnVals
from plenum.test.test_node import get_master_primary_node, \
ensureElectionsDone
from stp_core.loop.eventually import eventually
nodeCount = 7
VIEW_CHANGE_TIMEOUT = 5
@pytest.fixture(scope="module")
def tconf(tconf):
with view_change_timeout(tconf, VIEW_CHANGE_TIMEOUT), \
perf_monitor_disabled(tconf):
yield tconf
def _check_view_change_completed_count(node):
return get_count(node, node._check_view_change_completed)
def _check_view_change_completed_true(node):
return len(getAllReturnVals(node, node._check_view_change_completed, compare_val_to=True))
def _check_view_change_completed_stats(nodes):
return {node.name: (_check_view_change_completed_count(node), _check_view_change_completed_true(node))
for node in nodes}
def check_watchdog_called_expected_times(nodes, stats, times):
def call_count(node):
return _check_view_change_completed_count(node) - stats[node.name][0]
def true_count(node):
return _check_view_change_completed_true(node) - stats[node.name][1]
n = nodeCount
f = (n - 1) // 3
call_counts = [call_count(node) for node in nodes]
true_counts = [true_count(node) for node in nodes]
ok = True
ok = ok and all(v <= times for v in call_counts)
ok = ok and all(v <= times for v in true_counts)
ok = ok and sum(call_counts) >= times * (n - f)
ok = ok and sum(true_counts) >= times * (n - f)
if not ok:
actual = ""
for node in nodes:
actual += "{}: called {}, returned true {}\n".format(node.name, call_count(node), true_count(node))
raise AssertionError("Watchdog expected to be called {} times, actual counts:\n{}".format(times, actual))
def stop_next_primary(nodes):
m_next_primary_name = nodes[0]._elector._next_primary_node_name_for_master(
nodes[0].nodeReg, nodes[0].nodeIds)
next(node for node in nodes if node.name == m_next_primary_name).stop()
alive_nodes = list(filter(lambda x: x.name != m_next_primary_name, nodes))
return alive_nodes
def start_view_change(nodes, next_view_no):
for n in nodes:
n.view_changer.start_view_change(next_view_no)
@pytest.fixture()
def setup(txnPoolNodeSet, looper):
m_primary_node = get_master_primary_node(list(txnPoolNodeSet))
initial_view_no = waitForViewChange(looper, txnPoolNodeSet)
timeout_callback_stats = _check_view_change_completed_stats(txnPoolNodeSet)
return m_primary_node, initial_view_no, timeout_callback_stats
def test_view_change_retry_by_timeout(
txnPoolNodeSet, looper, tconf, setup, sdk_pool_handle, sdk_wallet_client):
"""
Verifies that a view change is restarted if it is not completed in time
"""
m_primary_node, initial_view_no, timeout_callback_stats = setup
stashers = [n.nodeIbStasher for n in txnPoolNodeSet]
with delay_rules(stashers, vcd_delay()):
start_view_change(txnPoolNodeSet, initial_view_no + 1)
# First view change should fail, because of delayed ViewChangeDone
# messages. This then leads to new view change that we need.
with pytest.raises(AssertionError):
ensureElectionsDone(looper=looper,
nodes=txnPoolNodeSet,
customTimeout=1.5 * VIEW_CHANGE_TIMEOUT)
# Now as ViewChangeDone messages are unblocked view changes should finish successfully
ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
new_m_primary_node = get_master_primary_node(list(txnPoolNodeSet))
assert m_primary_node.name != new_m_primary_node.name
# The timeout method was called one time
check_watchdog_called_expected_times(txnPoolNodeSet, timeout_callback_stats, 1)
# 2 view changes have been initiated
for node in txnPoolNodeSet:
assert node.viewNo - initial_view_no == 2
sdk_ensure_pool_functional(looper, txnPoolNodeSet,
sdk_wallet_client,
sdk_pool_handle)
def test_multiple_view_change_retries_by_timeouts(
txnPoolNodeSet, looper, tconf, setup,
sdk_pool_handle, sdk_wallet_client):
"""
Verifies that a view change is restarted each time
when the previous one is timed out
"""
_, initial_view_no, timeout_callback_stats = setup
stashers = [n.nodeIbStasher for n in txnPoolNodeSet]
with delay_rules(stashers, vcd_delay()):
start_view_change(txnPoolNodeSet, initial_view_no + 1)
# Wait until timeout callback is called 3 times
looper.run(eventually(check_watchdog_called_expected_times,
txnPoolNodeSet, timeout_callback_stats, 3,
retryWait=1,
timeout=3 * VIEW_CHANGE_TIMEOUT + 2))
# View changes should fail
with pytest.raises(AssertionError):
ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet, customTimeout=1)
# This view change must be completed with no problems
ensureElectionsDone(looper=looper, nodes=txnPoolNodeSet)
ensure_all_nodes_have_same_data(looper, nodes=txnPoolNodeSet)
# 4 view changes must have been initiated (initial one + 3 retries)
for node in txnPoolNodeSet:
assert node.viewNo - initial_view_no == 4
sdk_ensure_pool_functional(looper, txnPoolNodeSet,
sdk_wallet_client,
sdk_pool_handle)
def test_view_change_restarted_by_timeout_if_next_primary_disconnected(
txnPoolNodeSet, looper, tconf, setup):
"""
Verifies that a view change is restarted by timeout
if the next primary has been disconnected
"""
_, initial_view_no, timeout_callback_stats = setup
start_view_change(txnPoolNodeSet, initial_view_no + 1)
alive_nodes = stop_next_primary(txnPoolNodeSet)
ensureElectionsDone(looper=looper, nodes=alive_nodes, instances_list=range(3))
# There were 2 view changes
for node in alive_nodes:
assert (node.viewNo - initial_view_no) == 2
# The timeout method was called 1 time
check_watchdog_called_expected_times(txnPoolNodeSet, timeout_callback_stats, 1)
| 37.846591
| 113
| 0.720462
|
794dda10140bcdca45194e5023f970414f5f58ba
| 1,396
|
py
|
Python
|
changes/models/bazeltarget.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 443
|
2015-01-03T16:28:39.000Z
|
2021-04-26T16:39:46.000Z
|
changes/models/bazeltarget.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 12
|
2015-07-30T19:07:16.000Z
|
2016-11-07T23:11:21.000Z
|
changes/models/bazeltarget.py
|
vault-the/changes
|
37e23c3141b75e4785cf398d015e3dbca41bdd56
|
[
"Apache-2.0"
] | 47
|
2015-01-09T10:04:00.000Z
|
2020-11-18T17:58:19.000Z
|
from __future__ import absolute_import, division
import uuid
from datetime import datetime
from sqlalchemy import Column, DateTime, ForeignKey, Text, Integer
from changes.config import db
from changes.constants import Result, ResultSource, Status
from changes.db.types.enum import Enum
from changes.db.types.guid import GUID
class BazelTarget(db.Model):
__tablename__ = 'bazeltarget'
id = Column(GUID, nullable=False, primary_key=True, default=uuid.uuid4)
step_id = Column(GUID, ForeignKey('jobstep.id', ondelete="CASCADE"))
job_id = Column(GUID, ForeignKey('job.id', ondelete="CASCADE"), nullable=False)
name = Column(Text, nullable=False)
status = Column(Enum(Status), nullable=False, default=Status.unknown)
result = Column(Enum(Result), default=Result.unknown, nullable=False)
result_source = Column(Enum(ResultSource), default=ResultSource.from_self)
duration = Column(Integer, default=0)
date_created = Column(DateTime, default=datetime.utcnow, nullable=False)
def __init__(self, **kwargs):
super(BazelTarget, self).__init__(**kwargs)
if self.id is None:
self.id = uuid.uuid4()
if self.result is None:
self.result = Result.unknown
if self.status is None:
self.status = Status.unknown
if self.date_created is None:
self.date_created = datetime.utcnow()
| 38.777778
| 83
| 0.712751
|
794dda564e4d1989129ebc4546d74e723cb3ab87
| 7,092
|
py
|
Python
|
python/Kaos/ktime.py
|
D34D9001/R4ND0M_73571NG
|
d198c67b728f55cc39c6aab4f418a53483e3487b
|
[
"MIT"
] | 1
|
2022-01-05T06:53:39.000Z
|
2022-01-05T06:53:39.000Z
|
python/Kaos/ktime.py
|
D34D9001/R4ND0M_73571NG
|
d198c67b728f55cc39c6aab4f418a53483e3487b
|
[
"MIT"
] | 5
|
2022-01-05T08:06:34.000Z
|
2022-01-11T05:31:08.000Z
|
python/Kaos/ktime.py
|
D34D9001/R4ND0M_73571NG
|
d198c67b728f55cc39c6aab4f418a53483e3487b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created: Aug 2017
@author: D34D9001@9R0GR4M13
"""
import kerri
import time
from datetime import datetime
###################
# TIME MANAGEMENT #
###################
class Time(object):
""" This class controls the flow of time in your program """
def __init__(self):
self.days = {0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday',
4:'Friday', 5:'Saturday', 6:'Sunday'}
self.months = {1:'January', 2:'February', 3:'March', 4:'April',5:'May',
6:'June', 7:'July', 8:'August', 9:'September', 10:'October',
11:'November', 12:'December'}
def __str__(self):
now = time.localtime(time.time())
date = str("%s/%s/%s" % (now[1], now[2], now[0]))
if now[3] == 0:
hour = "0%s" % now[3]
ampm = "am"
else:
pass
if now[3] > 12:
hour = now[3] - 12
ampm = "pm"
elif now[3] != 0 and now[3] <= 12 and now[3] >= 10:
hour = "%s" % now[3]
ampm = "am"
else:
hour = "0%s" % now[3]
ampm = "am"
if now[5] <= 9:
second = "0%s" % now[5]
else:
second = now[5]
if now[4] <= 9:
min = "0%s" % now[4]
else:
min = now[4]
ktime = str("%s:%s:%s %s" % (hour,min,second, ampm))
return str(date)+" "+str(ktime)
def marker(self, *args):
"""
Get Marker For Timing Things
"""
if len(args) >= 1:
raise kerri.ExcessArguments("marker()", 0)
else:
pass
marker = time.time()
return marker
def wait(self, itime='1', *args):
""" Wait for specified time {in seconds} before continuing """
if len(args) >= 1:
raise kerri.ExcessArguments("wait()", 1)
else:
pass
time.sleep(itime)
def now(self, *args):
""" Return current date and time """
if len(args) >= 1:
raise kerri.ExcessArguments("now()", 0)
else:
pass
return datetime.now()
def p_now(self, *args):
""" Return current date and time in user friendly format """
if len(args) >= 1:
raise kerri.ExcessArguments("p_now()", 0)
else:
pass
now = time.localtime(time.time())
day = now[6]
return str("%s %s/%s/%s %s:%s:%s" % (self.days[day], now[1], now[2], now[0],
now[3], now[4], now[5]))
def c_hour(self, *args):
""" Return the currnet hour """
if len(args) >= 1:
raise kerri.ExcessArguments("c_hour()", 0)
else:
pass
now = time.localtime(time.time())
hour = now[3]
return hour
def c_min(self, *args):
""" Return the current minute [0-59]"""
if len(args) >= 1:
raise kerri.ExcessArguments("c_min()", 0)
else:
pass
now = time.localtime(time.time())
minute = now[4]
return minute
def c_sec(self, *args):
""" Return the current second [0-59] """
if len(args) >= 1:
raise kerri.ExcessArguments("c_sec()", 0)
else:
pass
now = time.localtime(time.time())
second = now[5]
return second
def month(self, *args):
""" Return current month [Returns months name not integer] """
if len(args) >= 1:
raise kerri.ExcessArguments("month()", 0)
else:
pass
now = time.localtime(time.time())
month = now[1]
return self.months[month]
def m_day(self, *args):
""" Return the day of the month """
if len(args) >= 1:
raise kerri.ExcessArguments("m_day()", 0)
else:
pass
now = time.localtime(time.time())
d_month = now[2]
return d_month
def year(self, *args):
""" Return the year """
if len(args) >= 1:
raise kerri.ExcessArguments("year()", 0)
else:
pass
now = time.localtime(time.time())
year = now[0]
return year
def _time(self, *args):
""" Return the current time [HH:MM:SS]"""
if len(args) >= 1:
raise kerri.ExcessArguments("_time()", 0)
else:
pass
now = time.localtime(time.time())
if now[3] == 0:
hour = "0%s" % now[3]
ampm = "am"
else:
pass
if now[3] > 12:
hour = now[3] - 12
ampm = "pm"
elif now[3] != 0 and now[3] <= 12 and now[3] >= 10:
hour = "%s" % now[3]
ampm = "am"
else:
hour = "0%s" % now[3]
ampm = "am"
if now[5] <= 9:
second = "0%s" % now[5]
else:
second = now[5]
if now[4] <= 9:
min = "0%s" % now[4]
else:
min = now[4]
return str("%s:%s:%s %s" % (hour,min,second, ampm))
def _date(self, *args):
""" Return the current date [MM/DD/YY] """
if len(args) >= 1:
raise kerri.ExcessArguments("_date()", 0)
else:
pass
now = time.localtime(time.time())
return str("%s/%s/%s" % (now[1], now[2], now[0]))
def w_day(self, *args):
""" Return the current day of the week [Returns name of the
day not integer value] {EX: Monday, Tuesday Wed...etc,.}"""
if len(args) >= 1:
raise kerri.ExcessArguments("w_day()", 0)
else:
pass
now = time.localtime(time.time())
day = now[6]
return self.days[day]
def y_day(self, *args):
""" Return integer value of the current day of the year """
if len(args) >= 1:
raise kerri.ExcessArguments("y_day()", 0)
else:
pass
now = time.localtime(time.time())
d_num = now[7]
return d_num
def summer_chk(self, *args):
""" Returns (1) if it is summer time, (0) if it is not and (-1)
if it can not be determined """
if len(args) >= 1:
raise kerri.ExcessArguments("summer_chk()", 0)
else:
pass
now = time.localtime(time.time())
check = now[8]
if check == 1:
return 1, ":) It\'s Summer Time!!!"
elif check == 0:
return 0, ":( It\'s Not Summer Time!!!"
else:
return -1, "I\'m not even really sure who I am...\nI don\'t think I am qualified to answer that."
#########
# INITs #
#########
ktime = Time()
marker = ktime.marker
wait = ktime.wait
now = ktime.now
p_now = ktime.p_now
c_hour = ktime.c_hour
c_min = ktime.c_min
c_sec = ktime.c_sec
month = ktime.month
m_day = ktime.m_day
year = ktime.year
_time = ktime._time
_date = ktime._date
w_day = ktime.w_day
y_day = ktime.y_day
summer_chk = ktime.summer_chk
| 26.762264
| 109
| 0.47335
|
794dda947fac8e50fd855b98da37bf99b3f2fb10
| 344
|
py
|
Python
|
dojo/github_issue_link/urls.py
|
mtcolman/django-DefectDojo
|
76175aca446e077884bdb5e1d8e2a671a0840775
|
[
"BSD-3-Clause"
] | 1,772
|
2018-01-22T23:32:15.000Z
|
2022-03-31T14:49:33.000Z
|
dojo/github_issue_link/urls.py
|
mtcolman/django-DefectDojo
|
76175aca446e077884bdb5e1d8e2a671a0840775
|
[
"BSD-3-Clause"
] | 3,461
|
2018-01-20T19:12:28.000Z
|
2022-03-31T17:14:39.000Z
|
dojo/github_issue_link/urls.py
|
mtcolman/django-DefectDojo
|
76175aca446e077884bdb5e1d8e2a671a0840775
|
[
"BSD-3-Clause"
] | 1,173
|
2018-01-23T07:10:23.000Z
|
2022-03-31T14:40:43.000Z
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^github-webhook', views.webhook, name='github_web_hook'),
url(r'^github/add', views.new_github, name='add_github'),
url(r'^github/(?P<tid>\d+)/delete$', views.delete_github,
name='delete_github'),
url(r'^github$', views.github, name='github')]
| 34.4
| 67
| 0.668605
|
794ddb0e90f69ec2e32158175344ab71424663e8
| 1,187
|
py
|
Python
|
kevin/tests/leet/test_restore_the_array.py
|
kalyons11/kevin
|
4e222fcd8ee7f2a275390f532c2fa5fef39060e8
|
[
"MIT"
] | 1
|
2017-08-03T04:21:14.000Z
|
2017-08-03T04:21:14.000Z
|
kevin/tests/leet/test_restore_the_array.py
|
kalyons11/kevin
|
4e222fcd8ee7f2a275390f532c2fa5fef39060e8
|
[
"MIT"
] | null | null | null |
kevin/tests/leet/test_restore_the_array.py
|
kalyons11/kevin
|
4e222fcd8ee7f2a275390f532c2fa5fef39060e8
|
[
"MIT"
] | null | null | null |
"""
https://leetcode.com/problems/restore-the-array/
"""
from unittest import TestCase
from kevin.leet.restore_the_array import Solution
class TestRestoreTheArray(TestCase):
def _base_test_restore_the_array(self, s: str, k: int, expected: int):
sol = Solution()
actual = sol.number_of_arrays(s, k)
assert expected == actual, (expected, actual)
def test_restore_the_array_basic(self):
s = '1000'
k = 10000
expected = 1
self._base_test_restore_the_array(s, k, expected)
def test_restore_the_array_none(self):
s = '1000'
k = 10
expected = 0
self._base_test_restore_the_array(s, k, expected)
def test_restore_the_array_medium(self):
s = '1317'
k = 2000
expected = 8
self._base_test_restore_the_array(s, k, expected)
def test_restore_the_array_singleton(self):
s = '2020'
k = 30
expected = 1
self._base_test_restore_the_array(s, k, expected)
def test_restore_the_array_large(self):
s = '1234567890'
k = 90
expected = 34
self._base_test_restore_the_array(s, k, expected)
| 23.74
| 74
| 0.634372
|
794ddb20968ee6a04991592b2b833a00d6e1c73a
| 4,259
|
py
|
Python
|
services/snet_grpc_wrapper.py
|
TENSAE21/network-analytics-services
|
72c5dbe93e11e98f003758e646acf453073c43aa
|
[
"MIT"
] | null | null | null |
services/snet_grpc_wrapper.py
|
TENSAE21/network-analytics-services
|
72c5dbe93e11e98f003758e646acf453073c43aa
|
[
"MIT"
] | null | null | null |
services/snet_grpc_wrapper.py
|
TENSAE21/network-analytics-services
|
72c5dbe93e11e98f003758e646acf453073c43aa
|
[
"MIT"
] | null | null | null |
# Tested on python3.6
import grpc
from concurrent import futures
import time
import logging
import network_analytics_pb2
import network_analytics_pb2_grpc
import bipartite_graphs
SLEEP_TIME = 86400 # One day
class NetworkAnalytics(network_analytics_pb2_grpc.NetowrkAnalyticsServicer):
def BipartiteGraph(self,request,context):
print('>>>>>>>>>>>>>>In endpoint BipartiteGraph')
print(time.strftime("%c"))
nodes = request.nodes
edges = request.edges
b = bipartite_graphs.BipartiteGraphs()
try:
edges_list = []
for edges_proto in edges:
edges_list.append(list(edges_proto.edge))
nodes_in = {"bipartite_0":list(nodes.bipartite_0),"bipartite_1":list(nodes.bipartite_1)}
edges_in = {"edges": edges_list}
ret = b.bipartite_graph(nodes_in, edges_in)
resp = network_analytics_pb2.BipartiteGraphResponse(status=ret[0],message=ret[1])
if resp.status:
edges_resp = []
for edge_ret in ret[2]["edges"]:
edges_resp.append(network_analytics_pb2.Edge(edge=edge_ret))
graph_resp = network_analytics_pb2.BipartiteGraph(bipartite_0=ret[2]["bipartite_0"],bipartite_1=ret[2]["bipartite_1"],edges=edges_resp)
resp = network_analytics_pb2.BipartiteGraphResponse(status=ret[0],message=ret[1],output=graph_resp)
print('status:',resp.status)
print('message:',resp.message)
print('Waiting for next call on port 5000.')
return resp
except Exception as e:
logging.exception("message")
resp = network_analytics_pb2.BipartiteGraphResponse(status=False,message=str(e))
print('status:', resp.status)
print('message:', resp.message)
print('Waiting for next call on port 5000.')
return resp
def ProjectedGraph(self,request,context):
print('>>>>>>>>>>>>>>In endpoint ProjectedGraph')
print(time.strftime("%c"))
bipartite_graph = request.graph
nodes = request.nodes
weight = request.weight
print (bipartite_graph)
b = bipartite_graphs.BipartiteGraphs()
try:
edges_list = []
for edges_proto in bipartite_graph.edges:
edges_list.append(list(edges_proto.edge))
bipartite_graph_in = {"bipartite_0":list(bipartite_graph.bipartite_0),"bipartite_1":list(bipartite_graph.bipartite_1),"edges":edges_list}
nodes_in = {"nodes": list(nodes)}
ret = b.projected_graph(bipartite_graph_in, nodes_in, weight)
resp = network_analytics_pb2.ProjecetedGraphResponse(status=ret[0],message=ret[1])
if resp.status:
edges_resp = []
for edge_ret in ret[2]["edges"]:
edges_resp.append(network_analytics_pb2.Edge(edge=edge_ret))
graph_resp = network_analytics_pb2.Graph(nodes=ret[2]["nodes"],edges=edges_resp,weights=ret[2]["weights"])
resp = network_analytics_pb2.ProjecetedGraphResponse(status=ret[0],message=ret[1],output=graph_resp)
print('status:',resp.status)
print('message:',resp.message)
print('Waiting for next call on port 5000.')
return resp
except Exception as e:
logging.exception("message")
resp = network_analytics_pb2.ProjecetedGraphResponse(status=False,message=str(e))
print('status:', resp.status)
print('message:', resp.message)
print('Waiting for next call on port 5000.')
return resp
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
network_analytics_pb2_grpc.add_NetowrkAnalyticsServicer_to_server(NetworkAnalytics(), server)
print('Starting server. Listening on port 5000.')
server.add_insecure_port('127.0.0.1:5000')
server.start()
try:
while True:
time.sleep(SLEEP_TIME)
except KeyboardInterrupt:
server.stop(0)
__end__ = '__end__'
if __name__ == '__main__':
serve()
pass
| 24.477011
| 151
| 0.625264
|
794ddb3b3ae984b8b94aedaf22a470aaf6098a63
| 1,512
|
py
|
Python
|
appengine_module/gae_ts_mon/instrument_endpoint.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | 74
|
2015-04-01T02:35:15.000Z
|
2021-12-17T22:10:56.000Z
|
appengine_module/gae_ts_mon/instrument_endpoint.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | 123
|
2015-04-01T04:02:57.000Z
|
2022-03-02T12:49:55.000Z
|
appengine_module/gae_ts_mon/instrument_endpoint.py
|
allaparthi/monorail
|
e18645fc1b952a5a6ff5f06e0c740d75f1904473
|
[
"BSD-3-Clause"
] | 32
|
2015-04-03T01:40:47.000Z
|
2021-11-13T15:20:13.000Z
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import time
import functools
# Not all apps enable endpoints. If the import fails, the app will not
# use @instrument_endpoint() decorator, so it is safe to ignore it.
try:
import endpoints
except ImportError: # pragma: no cover
pass
from infra_libs.ts_mon import exporter
from infra_libs.ts_mon.common import http_metrics
def instrument(time_fn=time.time):
"""Decorator to instrument Cloud Endpoint methods."""
def decorator(fn):
method_name = fn.__name__
assert method_name
@functools.wraps(fn)
def decorated(service, *args, **kwargs):
service_name = service.__class__.__name__
endpoint_name = '/_ah/spi/%s.%s' % (service_name, method_name)
start_time = time_fn()
response_status = 0
time_now = time_fn()
try:
with exporter.parallel_flush(time_now):
ret = fn(service, *args, **kwargs)
response_status = 200
return ret
except endpoints.ServiceException as e:
response_status = e.http_status
raise
except Exception:
response_status = 500
raise
finally:
elapsed_ms = int((time_fn() - start_time) * 1000)
http_metrics.update_http_server_metrics(endpoint_name, response_status,
elapsed_ms)
return decorated
return decorator
| 28.528302
| 79
| 0.671958
|
794ddc06c43a2e0280eb014c60a8503fa7e76c27
| 4,413
|
py
|
Python
|
PaddleSeg/paddleseg/datasets/ade.py
|
Pd-RegSea/pdrs-4-7
|
7b75690d30346a9ac13f730a20c52c327d7123ad
|
[
"Apache-2.0"
] | 2
|
2021-08-19T08:06:22.000Z
|
2021-11-09T10:42:17.000Z
|
PaddleSeg/paddleseg/datasets/ade.py
|
Pd-RegSea/pdrs-4-7
|
7b75690d30346a9ac13f730a20c52c327d7123ad
|
[
"Apache-2.0"
] | null | null | null |
PaddleSeg/paddleseg/datasets/ade.py
|
Pd-RegSea/pdrs-4-7
|
7b75690d30346a9ac13f730a20c52c327d7123ad
|
[
"Apache-2.0"
] | 1
|
2021-07-14T08:43:31.000Z
|
2021-07-14T08:43:31.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from PIL import Image
from paddleseg.datasets import Dataset
from paddleseg.utils.download import download_file_and_uncompress
from paddleseg.utils import seg_env
from paddleseg.cvlibs import manager
from paddleseg.transforms import Compose
import paddleseg.transforms.functional as F
URL = "http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip"
@manager.DATASETS.add_component
class ADE20K(Dataset):
"""
ADE20K dataset `http://sceneparsing.csail.mit.edu/`.
Args:
transforms (list): A list of image transformations.
dataset_root (str, optional): The ADK20K dataset directory. Default: None.
mode (str, optional): A subset of the entire dataset. It should be one of ('train', 'val'). Default: 'train'.
edge (bool, optional): Whether to compute edge while training. Default: False
"""
NUM_CLASSES = 150
def __init__(self, transforms, dataset_root=None, mode='train', edge=False):
self.dataset_root = dataset_root
self.transforms = Compose(transforms)
mode = mode.lower()
self.mode = mode
self.file_list = list()
self.num_classes = self.NUM_CLASSES
self.ignore_index = 255
self.edge = edge
if mode not in ['train', 'val']:
raise ValueError(
"`mode` should be one of ('train', 'val') in ADE20K dataset, but got {}."
.format(mode))
if self.transforms is None:
raise ValueError("`transforms` is necessary, but it is None.")
if self.dataset_root is None:
self.dataset_root = download_file_and_uncompress(
url=URL,
savepath=seg_env.DATA_HOME,
extrapath=seg_env.DATA_HOME,
extraname='ADEChallengeData2016')
elif not os.path.exists(self.dataset_root):
self.dataset_root = os.path.normpath(self.dataset_root)
savepath, extraname = self.dataset_root.rsplit(
sep=os.path.sep, maxsplit=1)
self.dataset_root = download_file_and_uncompress(
url=URL,
savepath=savepath,
extrapath=savepath,
extraname=extraname)
if mode == 'train':
img_dir = os.path.join(self.dataset_root, 'images/training')
label_dir = os.path.join(self.dataset_root, 'annotations/training')
elif mode == 'val':
img_dir = os.path.join(self.dataset_root, 'images/validation')
label_dir = os.path.join(self.dataset_root,
'annotations/validation')
img_files = os.listdir(img_dir)
label_files = [i.replace('.jpg', '.png') for i in img_files]
for i in range(len(img_files)):
img_path = os.path.join(img_dir, img_files[i])
label_path = os.path.join(label_dir, label_files[i])
self.file_list.append([img_path, label_path])
def __getitem__(self, idx):
image_path, label_path = self.file_list[idx]
if self.mode == 'val':
im, _ = self.transforms(im=image_path)
label = np.asarray(Image.open(label_path))
# The class 0 is ignored. And it will equal to 255 after
# subtracted 1, because the dtype of label is uint8.
label = label - 1
label = label[np.newaxis, :, :]
return im, label
else:
im, label = self.transforms(im=image_path, label=label_path)
label = label - 1
if self.edge:
edge_mask = F.mask_to_binary_edge(
label, radius=2, num_classes=self.num_classes)
return im, label, edge_mask
else:
return im, label
| 40.118182
| 117
| 0.629957
|
794ddc5c4b55c6bb4f7b989c798443cbc172661a
| 2,414
|
py
|
Python
|
resources/stat.py
|
world3/focusplus-api
|
4d9211983095689f6f0f72c676d1d95edfe14819
|
[
"Apache-2.0"
] | null | null | null |
resources/stat.py
|
world3/focusplus-api
|
4d9211983095689f6f0f72c676d1d95edfe14819
|
[
"Apache-2.0"
] | null | null | null |
resources/stat.py
|
world3/focusplus-api
|
4d9211983095689f6f0f72c676d1d95edfe14819
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime, timedelta, date
from flask_restful import Resource
from flask_jwt_extended import get_jwt_identity, jwt_required
from http import HTTPStatus
import json
from models.stat import Stat
from schemas.stat import StatSchema
from models.history import History
stat_list_schema = StatSchema(many=True)
class StatListResource(Resource):
@jwt_required
def get(self, start, end):
current_user = get_jwt_identity()
stats = Stat.get_range(current_user, start, end)
start_date = datetime.strptime(start, '%Y%m%d').date()
end_date = datetime.strptime(end, '%Y%m%d').date()
if (end_date - start_date).days + 1 > stats.count():
stat_dict = {stat.stat_key: stat for stat in stats}
for stat_date in self.date_range(start_date, end_date):
key = stat_date.strftime('%Y%m%d')
if key not in stat_dict:
stat = self.create_statistics(current_user, key)
stats = Stat.get_range(current_user, start, end)
return stat_list_schema.dump(stats), HTTPStatus.OK
@classmethod
def create_statistics(cls, user_id, stat_date):
histories = History.get_by_user_id(user_id, stat_date)
pomos = [0] * 24
breaks = [0] * 24
total_pomos = 0
total_breaks = 0
interruptions = 0
day_start = datetime.strptime(stat_date, '%Y%m%d')
for history in histories:
start_time = history.start
end_time = history.end
index = ((start_time - day_start).seconds + history.utc_offset * 60) // 3600
minutes = (end_time - start_time).seconds // 60
if history.type == 'Pomodoro':
pomos[index] += minutes
total_pomos += minutes
if history.status == 'Interrupted':
interruptions += 1
else:
breaks[index] += minutes
total_breaks += minutes
stat = Stat(stat_key=stat_date, pomos=json.dumps(pomos), breaks=json.dumps(breaks), totalPomos=total_pomos,
totalBreaks=total_breaks, interruptions=interruptions, user_id=user_id)
stat.save()
return stat
@classmethod
def date_range(cls, start_date, end_date):
for i in range((end_date - start_date).days + 1):
yield start_date + timedelta(i)
| 36.575758
| 115
| 0.623447
|
794ddc940eb10d51cc790806ff78236f3b2fb644
| 7,697
|
py
|
Python
|
guardian/testapp/tests/test_core.py
|
wlanslovenija/django-guardian
|
501695a190ff20aff13715311fd05062e67401c6
|
[
"MIT"
] | null | null | null |
guardian/testapp/tests/test_core.py
|
wlanslovenija/django-guardian
|
501695a190ff20aff13715311fd05062e67401c6
|
[
"MIT"
] | null | null | null |
guardian/testapp/tests/test_core.py
|
wlanslovenija/django-guardian
|
501695a190ff20aff13715311fd05062e67401c6
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from itertools import chain
from django.conf import settings
# Try the new app settings (Django 1.7) and fall back to the old system
try:
from django.apps import apps as django_apps
auth_app = django_apps.get_app_config("auth")
except ImportError:
from django.contrib.auth import models as auth_app
from django.contrib.auth.models import Group, Permission, AnonymousUser
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from guardian.core import ObjectPermissionChecker
from guardian.compat import get_user_model, create_permissions
from guardian.exceptions import NotUserNorGroup
from guardian.models import UserObjectPermission, GroupObjectPermission
from guardian.shortcuts import assign_perm
from guardian.management import create_anonymous_user
User = get_user_model()
class CustomUserTests(TestCase):
def test_create_anonymous_user(self):
create_anonymous_user(object())
self.assertEqual(1, User.objects.all().count())
anonymous = User.objects.all()[0]
self.assertEqual(anonymous.username, settings.ANONYMOUS_USER_NAME)
class ObjectPermissionTestCase(TestCase):
def setUp(self):
self.group, created = Group.objects.get_or_create(name='jackGroup')
self.user, created = User.objects.get_or_create(username='jack')
self.user.groups.add(self.group)
self.ctype = ContentType.objects.create(
model='bar', app_label='fake-for-guardian-tests')
self.anonymous_user = User.objects.get(
username=settings.ANONYMOUS_USER_NAME)
class ObjectPermissionCheckerTest(ObjectPermissionTestCase):
def setUp(self):
super(ObjectPermissionCheckerTest, self).setUp()
# Required if MySQL backend is used :/
create_permissions(auth_app, [], 1)
def test_cache_for_queries_count(self):
settings.DEBUG = True
try:
from django.db import connection
ContentType.objects.clear_cache()
checker = ObjectPermissionChecker(self.user)
# has_perm on Checker should spawn only two queries plus one extra
# for fetching the content type first time we check for specific
# model and two more content types as there are additional checks
# at get_user_obj_perms_model and get_group_obj_perms_model
query_count = len(connection.queries)
res = checker.has_perm("change_group", self.group)
if 'guardian.testapp' in settings.INSTALLED_APPS:
expected = 5
else:
# TODO: This is strange, need to investigate; totally not sure
# why there are more queries if testapp is not included
expected = 11
self.assertEqual(len(connection.queries), query_count + expected)
# Checking again shouldn't spawn any queries
query_count = len(connection.queries)
res_new = checker.has_perm("change_group", self.group)
self.assertEqual(res, res_new)
self.assertEqual(len(connection.queries), query_count)
# Checking for other permission but for Group object again
# shouldn't spawn any query too
query_count = len(connection.queries)
checker.has_perm("delete_group", self.group)
self.assertEqual(len(connection.queries), query_count)
# Checking for same model but other instance should spawn 2 queries
new_group = Group.objects.create(name='new-group')
query_count = len(connection.queries)
checker.has_perm("change_group", new_group)
self.assertEqual(len(connection.queries), query_count + 2)
# Checking for permission for other model should spawn 3 queries
# (again: content type and actual permissions for the object...
query_count = len(connection.queries)
checker.has_perm("change_user", self.user)
self.assertEqual(len(connection.queries), query_count + 3)
finally:
settings.DEBUG = False
def test_init(self):
self.assertRaises(NotUserNorGroup, ObjectPermissionChecker,
user_or_group=ContentType())
self.assertRaises(NotUserNorGroup, ObjectPermissionChecker)
def test_anonymous_user(self):
user = AnonymousUser()
check = ObjectPermissionChecker(user)
# assert anonymous user has no object permissions at all for obj
self.assertTrue([] == list(check.get_perms(self.ctype)))
def test_superuser(self):
user = User.objects.create(username='superuser', is_superuser=True)
check = ObjectPermissionChecker(user)
ctype = ContentType.objects.get_for_model(self.ctype)
perms = sorted(chain(*Permission.objects
.filter(content_type=ctype)
.values_list('codename')))
self.assertEqual(perms, check.get_perms(self.ctype))
for perm in perms:
self.assertTrue(check.has_perm(perm, self.ctype))
def test_not_active_superuser(self):
user = User.objects.create(username='not_active_superuser',
is_superuser=True, is_active=False)
check = ObjectPermissionChecker(user)
ctype = ContentType.objects.get_for_model(self.ctype)
perms = sorted(chain(*Permission.objects
.filter(content_type=ctype)
.values_list('codename')))
self.assertEqual(check.get_perms(self.ctype), [])
for perm in perms:
self.assertFalse(check.has_perm(perm, self.ctype))
def test_not_active_user(self):
user = User.objects.create(username='notactive')
assign_perm("change_contenttype", user, self.ctype)
# new ObjectPermissionChecker is created for each User.has_perm call
self.assertTrue(user.has_perm("change_contenttype", self.ctype))
user.is_active = False
self.assertFalse(user.has_perm("change_contenttype", self.ctype))
# use on one checker only (as user's is_active attr should be checked
# before try to use cache
user = User.objects.create(username='notactive-cache')
assign_perm("change_contenttype", user, self.ctype)
check = ObjectPermissionChecker(user)
self.assertTrue(check.has_perm("change_contenttype", self.ctype))
user.is_active = False
self.assertFalse(check.has_perm("change_contenttype", self.ctype))
def test_get_perms(self):
group = Group.objects.create(name='group')
obj1 = ContentType.objects.create(
model='foo', app_label='guardian-tests')
obj2 = ContentType.objects.create(
model='bar', app_label='guardian-tests')
assign_perms = {
group: ('change_group', 'delete_group'),
obj1: ('change_contenttype', 'delete_contenttype'),
obj2: ('delete_contenttype',),
}
check = ObjectPermissionChecker(self.user)
for obj, perms in assign_perms.items():
for perm in perms:
UserObjectPermission.objects.assign_perm(perm, self.user, obj)
self.assertEqual(sorted(perms), sorted(check.get_perms(obj)))
check = ObjectPermissionChecker(self.group)
for obj, perms in assign_perms.items():
for perm in perms:
GroupObjectPermission.objects.assign_perm(
perm, self.group, obj)
self.assertEqual(sorted(perms), sorted(check.get_perms(obj)))
| 42.291209
| 79
| 0.664804
|
794ddd70b9faf7b5287a0f8716bb6791ffe5afee
| 191
|
py
|
Python
|
hadoop-examples/reducer.py
|
inpefess/AIUDSA18
|
443a9b3fe62f5dc1208db750642dd3bb94ad6fdd
|
[
"MIT"
] | null | null | null |
hadoop-examples/reducer.py
|
inpefess/AIUDSA18
|
443a9b3fe62f5dc1208db750642dd3bb94ad6fdd
|
[
"MIT"
] | null | null | null |
hadoop-examples/reducer.py
|
inpefess/AIUDSA18
|
443a9b3fe62f5dc1208db750642dd3bb94ad6fdd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
counter = 0
while True:
try:
# use `raw_input()` for Python 2
line = input()
except EOFError:
break
counter += int(line)
print(counter)
| 15.916667
| 40
| 0.565445
|
794dde43cb5e78f5fe69b764ec21acf66a02c39c
| 10,204
|
py
|
Python
|
InplusTrader/backtestEngine/mod/analyser/mod.py
|
zhengwsh/InplusTrader_Linux
|
5f7eb17004da0b76ceafb93cb314de7a6009cd04
|
[
"MIT"
] | 17
|
2017-04-20T05:17:25.000Z
|
2020-09-30T08:58:03.000Z
|
InplusTrader/backtestEngine/mod/analyser/mod.py
|
vladhj38/InplusTrader_Linux
|
5f7eb17004da0b76ceafb93cb314de7a6009cd04
|
[
"MIT"
] | 1
|
2017-11-12T01:24:06.000Z
|
2019-09-19T08:50:38.000Z
|
InplusTrader/backtestEngine/mod/analyser/mod.py
|
vladhj38/InplusTrader_Linux
|
5f7eb17004da0b76ceafb93cb314de7a6009cd04
|
[
"MIT"
] | 17
|
2017-04-17T08:17:00.000Z
|
2020-10-25T01:56:49.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from collections import defaultdict
import six
from enum import Enum
import numpy as np
import pandas as pd
from rqalpha.interface import AbstractMod
from rqalpha.events import EVENT
from rqalpha.const import ACCOUNT_TYPE, EXIT_CODE
from rqalpha.utils.risk import Risk
from rqalpha.utils.repr import properties
from rqalpha.execution_context import ExecutionContext
class AnalyserMod(AbstractMod):
def __init__(self):
self._env = None
self._mod_config = None
self._enabled = False
self._result = None
self._orders = defaultdict(list)
self._trades = []
self._total_portfolios = []
self._sub_portfolios = defaultdict(list)
self._positions = defaultdict(list)
self._benchmark_daily_returns = []
self._portfolio_daily_returns = []
self._latest_portfolio = None
self._latest_benchmark_portfolio = None
def start_up(self, env, mod_config):
self._env = env
self._mod_config = mod_config
self._enabled = (self._mod_config.record or self._mod_config.plot or self._mod_config.output_file or
self._mod_config.plot_save_file or self._mod_config.report_save_path)
if self._enabled:
env.event_bus.add_listener(EVENT.POST_SETTLEMENT, self._collect_daily)
env.event_bus.add_listener(EVENT.TRADE, self._collect_trade)
env.event_bus.add_listener(EVENT.ORDER_CREATION_PASS, self._collect_order)
def _collect_trade(self, account, trade):
self._trades.append(self._to_trade_record(trade))
def _collect_order(self, account, order):
self._orders[order.trading_datetime.date()].append(order)
def _collect_daily(self):
date = self._env.calendar_dt.date()
portfolio = self._env.account.get_portfolio(date)
self._latest_portfolio = portfolio
self._portfolio_daily_returns.append(portfolio.daily_returns)
self._total_portfolios.append(self._to_portfolio_record(date, portfolio))
if ACCOUNT_TYPE.BENCHMARK in self._env.accounts:
self._latest_benchmark_portfolio = self._env.accounts[ACCOUNT_TYPE.BENCHMARK].portfolio
self._benchmark_daily_returns.append(self._latest_benchmark_portfolio.daily_returns)
else:
self._benchmark_daily_returns.append(0)
for account_type, account in six.iteritems(self._env.accounts):
portfolio = account.get_portfolio(date)
self._sub_portfolios[account_type].append(self._to_portfolio_record2(date, portfolio))
for order_book_id, position in six.iteritems(portfolio.positions):
self._positions[account_type].append(self._to_position_record(date, order_book_id, position))
def _symbol(self, order_book_id):
return self._env.data_proxy.instruments(order_book_id).symbol
@staticmethod
def _safe_convert(value, ndigits=3):
if isinstance(value, Enum):
return value.name
if isinstance(value, (float, np.float64, np.float32, np.float16, np.float)):
return round(value, ndigits)
return value
def _to_portfolio_record(self, date, portfolio):
data = {
k: self._safe_convert(v, 3) for k, v in six.iteritems(properties(portfolio))
if not k.startswith('_') and not k.endswith('_') and k not in {
"positions", "start_date", "starting_cash"
}
}
data['date'] = date
return data
def _to_portfolio_record2(self, date, portfolio):
data = {
k: self._safe_convert(v, 3) for k, v in six.iteritems(portfolio.__dict__)
if not k.startswith('_') and not k.endswith('_') and k not in {
"positions", "start_date", "starting_cash"
}
}
data['date'] = date
return data
def _to_position_record(self, date, order_book_id, position):
data = {
k: self._safe_convert(v, 3) for k, v in six.iteritems(position.__dict__)
if not k.startswith('_') and not k.endswith('_')
}
data['order_book_id'] = order_book_id
data['symbol'] = self._symbol(order_book_id)
data['date'] = date
return data
def _to_trade_record(self, trade):
data = {
k: self._safe_convert(v) for k, v in six.iteritems(properties(trade))
if not k.startswith('_') and not k.endswith('_') and k != 'order'
}
data['order_book_id'] = trade.order.order_book_id
data['symbol'] = self._symbol(trade.order.order_book_id)
data['side'] = self._safe_convert(trade.order.side)
data['position_effect'] = self._safe_convert(trade.order.position_effect)
data['datetime'] = data['datetime'].strftime("%Y-%m-%d %H:%M:%S")
data['trading_datetime'] = data['trading_datetime'].strftime("%Y-%m-%d %H:%M:%S")
return data
def tear_down(self, code, exception=None):
if code != EXIT_CODE.EXIT_SUCCESS or not self._enabled:
return
strategy_name = os.path.basename(self._env.config.base.strategy_file).split(".")[0]
data_proxy = self._env.data_proxy
summary = {
'strategy_name': strategy_name,
}
for k, v in six.iteritems(self._env.config.base.__dict__):
if k in ["trading_calendar", "account_list", "timezone", "persist_mode",
"resume_mode", "data_bundle_path", "handle_split", "persist"]:
continue
summary[k] = self._safe_convert(v, 2)
risk = Risk(np.array(self._portfolio_daily_returns), np.array(self._benchmark_daily_returns),
data_proxy.get_risk_free_rate(self._env.config.base.start_date, self._env.config.base.end_date),
(self._env.config.base.end_date - self._env.config.base.start_date).days + 1)
summary.update({
'alpha': self._safe_convert(risk.alpha, 3),
'beta': self._safe_convert(risk.beta, 3),
'sharpe': self._safe_convert(risk.sharpe, 3),
'information_ratio': self._safe_convert(risk.information_ratio, 3),
'downside_risk': self._safe_convert(risk.annual_downside_risk, 3),
'tracking_error': self._safe_convert(risk.annual_tracking_error, 3),
'sortino': self._safe_convert(risk.sortino, 3),
'volatility': self._safe_convert(risk.annual_volatility, 3),
'max_drawdown': self._safe_convert(risk.max_drawdown, 3),
})
summary.update({
k: self._safe_convert(v, 3) for k, v in six.iteritems(properties(self._latest_portfolio))
if k not in ["positions", "daily_returns", "daily_pnl"]
})
if self._latest_benchmark_portfolio:
summary['benchmark_total_returns'] = self._latest_benchmark_portfolio.total_returns
summary['benchmark_annualized_returns'] = self._latest_benchmark_portfolio.annualized_returns
trades = pd.DataFrame(self._trades)
if 'datetime' in trades.columns:
trades = trades.set_index('datetime')
df = pd.DataFrame(self._total_portfolios)
df['date'] = pd.to_datetime(df['date'])
total_portfolios = df.set_index('date').sort_index()
result_dict = {
'summary': summary,
'trades': trades,
'total_portfolios': total_portfolios,
}
if ExecutionContext.plots is not None:
plots = ExecutionContext.plots.get_plots()
plots_items = defaultdict(dict)
for series_name, value_dict in six.iteritems(plots):
for date, value in six.iteritems(value_dict):
plots_items[date][series_name] = value
plots_items[date]["date"] = date
df = pd.DataFrame([dict_data for date, dict_data in six.iteritems(plots_items)])
df["date"] = pd.to_datetime(df["date"])
df = df.set_index("date").sort_index()
result_dict["plots"] = df
for account_type, account in six.iteritems(self._env.accounts):
account_name = account_type.name.lower()
portfolios_list = self._sub_portfolios[account_type]
df = pd.DataFrame(portfolios_list)
df["date"] = pd.to_datetime(df["date"])
portfolios_df = df.set_index("date").sort_index()
result_dict["{}_portfolios".format(account_name)] = portfolios_df
positions_list = self._positions[account_type]
positions_df = pd.DataFrame(positions_list)
if "date" in positions_df.columns:
positions_df["date"] = pd.to_datetime(positions_df["date"])
positions_df = positions_df.set_index("date").sort_index()
result_dict["{}_positions".format(account_name)] = positions_df
self._result = result_dict
if self._mod_config.output_file:
with open(self._mod_config.output_file, 'wb') as f:
pickle.dump(result_dict, f)
if self._mod_config.plot:
from rqalpha.plot import plot_result
plot_result(result_dict)
if self._mod_config.plot_save_file:
from rqalpha.plot import plot_result
plot_result(result_dict, False, self._mod_config.plot_save_file)
if self._mod_config.report_save_path:
from rqalpha.utils.report import generate_report
generate_report(result_dict, self._mod_config.report_save_path)
| 41.819672
| 116
| 0.650529
|
794ddec6c094ff04a82af0d77f3021e8ddb26cf1
| 930
|
py
|
Python
|
tests/test_preprocess/test_tte_list.py
|
HughPaynter/PyGRB
|
2eaf834cf3c62a639a056285ca9518456daa4b7c
|
[
"BSD-3-Clause"
] | 14
|
2020-05-29T02:43:18.000Z
|
2022-03-12T13:27:36.000Z
|
tests/test_preprocess/test_tte_list.py
|
HughPaynter/PyGRB
|
2eaf834cf3c62a639a056285ca9518456daa4b7c
|
[
"BSD-3-Clause"
] | 8
|
2020-08-03T02:41:52.000Z
|
2021-06-22T05:41:52.000Z
|
tests/test_preprocess/test_tte_list.py
|
HughPaynter/PyGRB
|
2eaf834cf3c62a639a056285ca9518456daa4b7c
|
[
"BSD-3-Clause"
] | 14
|
2020-06-30T07:10:47.000Z
|
2022-03-12T13:27:39.000Z
|
import os
import unittest
from PyGRB.preprocess.GRB_class import BATSEGRB
class TestBATSEGRB(unittest.TestCase):
def setUp(self):
self.burst = 3770
self.datatype = 'tte_list'
def tearDown(self):
del self.burst
del self.datatype
def test_burst_assignment_tte_list(self):
burst = 3770
datatype = 'tte_list'
_path = 'data/BATSE/TTE_list_data/'
path = f'{_path}channel_{1}_d01234567_{"bins"}.npy'
if os.path.exists(path):
delete = False
else:
delete = True
test = BATSEGRB(burst, datatype = datatype)
assert(os.path.exists(path))
if delete:
for c in range(1,5):
for q in ["bins", "diff", "counts"]:
path = f'{_path}channel_{c}_d01234567_{q}.npy'
os.remove(path)
if __name__ == '__main__':
unittest.main()
| 21.627907
| 66
| 0.564516
|
794de12e12bb716c29742a754aaa612808350907
| 6,345
|
py
|
Python
|
Coins_history.py
|
jacobbaruch/Coin_market_cap
|
c8eab8622b3b5d808f7530c036463c2544f8779e
|
[
"MIT"
] | 6
|
2018-03-19T10:18:18.000Z
|
2020-02-13T15:49:30.000Z
|
Coins_history.py
|
jacobbaruch/Coin_market_cap
|
c8eab8622b3b5d808f7530c036463c2544f8779e
|
[
"MIT"
] | null | null | null |
Coins_history.py
|
jacobbaruch/Coin_market_cap
|
c8eab8622b3b5d808f7530c036463c2544f8779e
|
[
"MIT"
] | 3
|
2018-09-13T03:18:00.000Z
|
2021-08-30T17:58:03.000Z
|
import json
import sys
import requests
from bs4 import BeautifulSoup
import pandas as pd
from _datetime import datetime, date, timedelta
def save_crypto_coins_history(i_rank_start=1, i_rank_end=10, i_coin_file_path='crypto_coins',
i_from_date=None, i_to_date=None, i_min_volume=100000,
i_coin_markets=[]):
"""
:param int i_rank_start: pull data from coin current ranking [includes]
:param int i_rank_end: pull data till coin current ranking [includes]
:param str i_coin_file_path: target csv file name
:param str 'YYYY-MM-DD' i_from_date: pull data from this date [includes]
:param str 'YYYY-MM-DD' i_to_date: pull data till this date [includes]
:param int i_min_volume: pull coins with 24 Hrs volume bigger/equal than this value
:param list i_coin_markets: pull coins that traded at least in one of the markets, if empty we ignore this
writes to a csv file - historic data of coins
"""
from_date, to_date = get_from_to_dates(i_from_date, i_to_date)
rank_range_from_start = i_rank_end-i_rank_start+1
coins_ranking_dict = get_coins_current_ranking(i_rank_start, rank_range_from_start, i_min_volume)
df_coins = pd.DataFrame([])
for rank, coin in coins_ranking_dict.items():
if is_coin_in_markets(coin, set(i_coin_markets)):
df_coins = df_coins.append(get_coins_historical_data(rank, coin, from_date, to_date))
write_df_to_csv(df_coins, i_coin_file_path + '.csv')
def get_coins_current_ranking(i_start, i_limit, i_min_volume):
"""
:param int i_start: pull data from coin current ranking [includes]
:param int i_limit: pull data till coin current ranking [includes]
:param float i_min_volume: pull coins with 24 Hrs volume bigger [includes] than this value
:return dict: rank, coin name
"""
url_coin_list_json = 'https://api.coinmarketcap.com/v1/ticker/?start={}&limit={}'.format(i_start - 1, i_limit)
page = requests.get(url_coin_list_json)
json_file = json.loads(page.text)
coins_dict = {}
for k in json_file:
if float(k['24h_volume_usd']) >= i_min_volume:
coins_dict[k['rank']] = k['id']
return coins_dict
def get_coins_historical_data(i_rank, i_coin, i_from_date, i_to_date):
"""
:param int i_rank: current coin rank
:param str i_coin: coin name
:param date 'YYYYMMDD' i_from_date: pull data from this date [includes]
:param date 'YYYYMMDD' i_to_date: pull data till this date [includes]
return list: coin history data includes current ranking
"""
df_coin = get_specific_coin_historical_data(i_coin, i_from_date, i_to_date)
df_coin['Coin'] = i_coin
df_coin['Cur. Rank'] = i_rank
df_coin = pd.concat([df_coin.iloc[:,7:], df_coin.iloc[:,0:7]], axis=1, join_axes=[df_coin.index])
return df_coin
def get_specific_coin_historical_data(i_coin, i_from_date, i_to_date):
"""
:param str i_coin: coin name
:param date 'YYYYMMDD' i_from_date: pull data from this date [includes]
:param date 'YYYYMMDD' i_to_date: pull data till this date [includes]
return list: coin history data
"""
currencies = "https://coinmarketcap.com/currencies/"
currencies_end = '/historical-data/'
dates = '?start={}&end={}'.format(i_from_date, i_to_date)
# collect and parse coin historical page
url = currencies + i_coin + currencies_end + dates
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
# Pull and append historic data
table = soup.find('table')
data = {
'Date': [],
'Open': [],
'High': [],
'Low': [],
'Close': [],
'Volume': [],
'Market Cap': []
}
try:
rows = table.findAll('tr')[1:]
for row in rows:
cols = row.findAll('td')
data['Date'].append(cols[0].string)
data['Open'].append(cols[1].string)
data['High'].append(cols[2].string)
data['Low'].append(cols[3].string)
data['Close'].append(cols[4].string)
data['Volume'].append(cols[5].string)
data['Market Cap'].append(cols[6].string)
coin_data = pd.DataFrame(data)
except AttributeError as e:
print('input parameters not valid')
sys.exit(13)
return coin_data
def write_df_to_csv(i_df, i_file):
"""
:param dataframe i_df: dataframe to save
:param str i_file: output csv file name
:exception IOerror: trying to save the file
"""
try:
i_df.to_csv(i_file)
except IOError as e:
print(e)
sys.exit(13)
def get_from_to_dates(i_from_date, i_to_date):
"""
:param str 'YYYY-MM-DD' i_from_date: pull data from this date [includes]
:param str 'YYYY-MM-DD' i_to_date: pull data till this date [includes]
:exception ValueError: date format is not as asked
:return tuple: dates in format 'YYYYMMDD' - dates ready to be scrapped
"""
try:
if i_from_date is None:
from_date = str(date.today() + timedelta(days=-30))
else:
from_date = i_from_date
from_date = datetime.strptime(from_date, '%Y-%m-%d').strftime('%Y%m%d')
if i_to_date is None:
to_date = str(date.today() + timedelta(days=-1))
else:
to_date = i_to_date
to_date = datetime.strptime(to_date, '%Y-%m-%d').strftime('%Y%m%d')
return from_date, to_date
except ValueError as e:
print(e)
sys.exit(13)
def is_coin_in_markets(i_coin, i_coin_markets_to_search):
'''
:param str i_coin: see if this coin available in following markets
:param set i_coin_markets_to_search: markets set to search in
:param int i_min_market_volume: minimum trading volume to a market
:return boolean : True - if coin traded in one of the markets to search or market set is empty
False - coin isn't traded at the markets
'''
coin_in_markets = False
coin_markets_url = 'https://coinmarketcap.com/currencies/{}/#markets'.format(i_coin)
if not i_coin_markets_to_search:
coin_in_markets = True
else:
# collect and parse coin historical page
page = requests.get(coin_markets_url)
soup = BeautifulSoup(page.text, 'html.parser')
table = soup.find('table')
rows = table.findAll('tr')[1:]
#getting markets of coin
markets = set()
for row in rows:
cols = row.findAll('td')
if cols[1].text is not None:
markets.add(cols[1].text.upper())
for market in i_coin_markets_to_search:
if market.upper() in markets:
coin_in_markets = True
break
return coin_in_markets
| 32.538462
| 112
| 0.696139
|
794de6c3bfccf0ae717e872d9e59096332d06a90
| 89
|
py
|
Python
|
tests/integration/hogwarts/potions/apps.py
|
ducdetronquito/polyjuice
|
6a55a6b2eb1ce3fd9d3614b0e788167a976e89c4
|
[
"Unlicense"
] | 1
|
2020-07-16T06:29:58.000Z
|
2020-07-16T06:29:58.000Z
|
tests/integration/hogwarts/potions/apps.py
|
ducdetronquito/polyjuice
|
6a55a6b2eb1ce3fd9d3614b0e788167a976e89c4
|
[
"Unlicense"
] | 3
|
2020-07-31T12:26:54.000Z
|
2020-08-01T10:11:20.000Z
|
tests/integration/hogwarts/potions/apps.py
|
ducdetronquito/polyjuice
|
6a55a6b2eb1ce3fd9d3614b0e788167a976e89c4
|
[
"Unlicense"
] | null | null | null |
from django.apps import AppConfig
class PotionsConfig(AppConfig):
name = "potions"
| 14.833333
| 33
| 0.752809
|
794de6f33bc9ffd78e9aed15908d5a550430302f
| 2,791
|
py
|
Python
|
sql_queries.py
|
WisnuMulya/Data-Modeling-Postgres
|
d83f1505d398bba05280c571c4cf5782a3d139a9
|
[
"MIT"
] | null | null | null |
sql_queries.py
|
WisnuMulya/Data-Modeling-Postgres
|
d83f1505d398bba05280c571c4cf5782a3d139a9
|
[
"MIT"
] | null | null | null |
sql_queries.py
|
WisnuMulya/Data-Modeling-Postgres
|
d83f1505d398bba05280c571c4cf5782a3d139a9
|
[
"MIT"
] | null | null | null |
# DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS songplays"
user_table_drop = "DROP TABLE IF EXISTS users"
song_table_drop = "DROP TABLE IF EXISTS songs"
artist_table_drop = "DROP TABLE IF EXISTS artists"
time_table_drop = "DROP TABLE IF EXISTS time"
# CREATE TABLES
songplay_table_create = ("""
CREATE TABLE IF NOT EXISTS songplays (
songplay_id SERIAL PRIMARY KEY,
start_time bigint NOT NULL,
user_id integer REFERENCES users(user_id),
level text,
song_id text REFERENCES songs(song_id),
artist_id text REFERENCES artists(artist_id),
session_id integer,
location text,
user_agent text);
""")
user_table_create = ("""
CREATE TABLE IF NOT EXISTS users (
user_id integer PRIMARY KEY,
first_name text,
last_name text,
gender char,
level text);
""")
song_table_create = ("""
CREATE TABLE IF NOT EXISTS songs (
song_id text PRIMARY KEY,
title text,
artist_id text NOT NULL REFERENCES artists(artist_id),
year integer,
duration numeric);
""")
artist_table_create = ("""
CREATE TABLE IF NOT EXISTS artists (
artist_id text PRIMARY KEY,
name text,
location text,
latitude numeric,
longitude numeric);
""")
time_table_create = ("""
CREATE TABLE IF NOT EXISTS time (
start_time bigint PRIMARY KEY,
hour integer, day integer,
week integer,
month integer,
year integer,
weekday integer);
""")
# INSERT RECORDS
songplay_table_insert = ("""
INSERT INTO songplays (start_time, user_id, level, song_id, artist_id,
session_id, location, user_agent)\
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
""")
user_table_insert = ("""
INSERT INTO users (user_id, first_name, last_name, gender, level)\
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (user_id)
DO UPDATE SET level = EXCLUDED.level
""")
song_table_insert = ("""
INSERT INTO songs (song_id, title, artist_id, year, duration)\
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING
""")
artist_table_insert = ("""
INSERT INTO artists (artist_id, name, location, latitude, longitude)\
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING
""")
time_table_insert = ("""
INSERT INTO time (start_time, hour, day, week, month, year, weekday)\
VALUES (%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING
""")
# FIND SONGS
song_select = ("""
SELECT song_id, songs.artist_id
FROM songs
JOIN artists ON artists.artist_id = songs.artist_id
WHERE (title = %s) AND (name = %s) AND (duration = %s)
""")
# QUERY LISTS
create_table_queries = [user_table_create,
artist_table_create,
song_table_create,
songplay_table_create,
time_table_create]
drop_table_queries = [songplay_table_drop,
user_table_drop,
song_table_drop,
artist_table_drop,
time_table_drop]
| 24.060345
| 71
| 0.683626
|
794de72b1ad8f5a36e72fa7276df5c2165b4f10e
| 8,649
|
py
|
Python
|
dl_code/pcode/distributed_running_nlp.py
|
ZhicongLiang/ChocoSGD
|
a127f32e0629d32dc6ca4c4f2837c3980fa3f76c
|
[
"Apache-2.0"
] | 39
|
2019-06-16T15:28:49.000Z
|
2022-03-15T09:20:35.000Z
|
dl_code/pcode/distributed_running_nlp.py
|
Distributed-Deep-Learning/ChocoSGD
|
c7715b368cc9f66674720ea9c823032c8058bdf6
|
[
"Apache-2.0"
] | 6
|
2020-04-15T21:02:49.000Z
|
2021-05-24T23:24:03.000Z
|
dl_code/pcode/distributed_running_nlp.py
|
Distributed-Deep-Learning/ChocoSGD
|
c7715b368cc9f66674720ea9c823032c8058bdf6
|
[
"Apache-2.0"
] | 18
|
2019-07-24T22:19:10.000Z
|
2022-02-09T08:20:10.000Z
|
# -*- coding: utf-8 -*-
from copy import deepcopy
import numpy as np
import torch
from pcode.utils.checkpoint import save_to_checkpoint
from pcode.utils.logging import (
display_training_stat,
display_test_stat,
dispaly_best_test_stat,
)
from pcode.utils.stat_tracker import RuntimeTracker
from pcode.utils.timer import Timer
from pcode.utils.auxiliary import get_model_difference
import pcode.utils.error_handler as error_handler
from pcode.create_dataset import load_data_batch
# sys.excepthook = error_handler.global_except_hook
def train_and_validate(
conf, model, criterion, scheduler, optimizer, metrics, data_loader
):
print("=>>>> start training and validation.\n")
assert (
optimizer.__class__.__name__ != "ParallelCHOCO"
), "NLP tasks right now do not support ParallelCHOCO based on multiprocessing (please use optimizer=parallel_choco_v instead)."
# define runtime stat tracker and start the training.
tracker_tr = RuntimeTracker(metrics_to_track=metrics.metric_names)
# get the timer.
timer = conf.timer
# break until finish expected full epoch training.
print("=>>>> enter the training.\n")
while True:
# init the hidden state.
_hidden = (
model.module.init_hidden(conf.batch_size)
if "DataParallel" == model.__class__.__name__
else model.init_hidden(conf.batch_size)
)
# configure local step.
for idx, batch in enumerate(data_loader["train_loader"]):
model.train()
scheduler.step(optimizer)
# repackage the hidden.
_hidden = (
model.module.repackage_hidden(_hidden)
if "DataParallel" == model.__class__.__name__
else model.repackage_hidden(_hidden)
)
# load data
with timer("load_data", epoch=scheduler.epoch_):
_input = batch.text[
:,
conf.graph.rank
* conf.batch_size : (conf.graph.rank + 1)
* conf.batch_size,
]
_target = batch.target[
:,
conf.graph.rank
* conf.batch_size : (conf.graph.rank + 1)
* conf.batch_size,
]
_input, _target = load_data_batch(conf, _input, _target)
# inference and get current performance.
with timer("forward_pass", epoch=scheduler.epoch_):
optimizer.zero_grad()
loss, _hidden = inference(
conf,
model,
criterion,
metrics,
_input,
_target,
_hidden,
tracker_tr,
)
print(conf.graph.rank, "finish inference", idx)
with timer("backward_pass", epoch=scheduler.epoch_):
loss.backward()
print(conf.graph.rank, "finish backward", idx)
with timer("sync_complete", epoch=scheduler.epoch_):
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm_(model.parameters(), conf.rnn_clip)
n_bits_to_transmit = optimizer.step(timer=timer)
# display the logging info.
display_training_stat(conf, scheduler, tracker_tr, n_bits_to_transmit)
# finish one epoch training and to decide if we want to val our model.
if scheduler.epoch_ % 1 == 0:
if tracker_tr.stat["loss"].avg > 1e3 or np.isnan(
tracker_tr.stat["loss"].avg
):
print("\nThe process diverges!!!!!Early stop it.")
error_handler.abort()
# each worker finish one epoch training.
do_validate(
conf, model, optimizer, criterion, scheduler, metrics, data_loader
)
# refresh the logging cache at the begining of each epoch.
tracker_tr.reset()
# determine if the training is finished.
if scheduler.is_stop():
conf.logger.save_json()
return
# display tracking time.
if (
conf.graph.rank == 0
and conf.display_tracked_time
and scheduler.local_index % conf.summary_freq == 0
):
print(timer.summary())
def inference(conf, model, criterion, metrics, _input, _target, _hidden, tracker=None):
"""Inference on the given model and get loss and accuracy."""
output, _hidden = model(_input, _hidden)
loss = criterion(output.view(-1, conf.n_tokens), _target.contiguous().view(-1))
performance = metrics.evaluate(loss, output, _target)
if tracker is not None:
tracker.update_metrics([loss.item()] + performance, n_samples=_input.size(0))
return loss, _hidden
def do_validate(conf, model, optimizer, criterion, scheduler, metrics, data_loader):
"""Evaluate the model on the test dataset and save to the checkpoint."""
# wait until the whole group enters this function, and then evaluate.
performance = validate(
conf, model, optimizer, criterion, scheduler, metrics, data_loader
)
# remember best performance and display the val info.
scheduler.best_tracker.update(performance[0], scheduler.epoch_)
dispaly_best_test_stat(conf, scheduler)
# save to the checkpoint.
save_to_checkpoint(
conf,
{
"arch": conf.arch,
"current_epoch": scheduler.epoch,
"local_index": scheduler.local_index,
"best_perf": scheduler.best_tracker.best_perf,
"optimizer": optimizer.state_dict(),
"state_dict": model.state_dict(),
},
scheduler.best_tracker.is_best,
dirname=conf.checkpoint_dir,
filename="checkpoint.pth.tar",
save_all=conf.save_all_models,
)
print("Finished validation.")
def validate(conf, model, optimizer, criterion, scheduler, metrics, data_loader):
"""A function for model evaluation."""
def _evaluate(_model, label):
# define stat.
tracker_te = RuntimeTracker(metrics_to_track=metrics.metric_names)
# switch to evaluation mode
_model.eval()
# define hidden state for RNN.
_hidden = (
model.module.init_hidden(conf.batch_size)
if "DataParallel" == model.__class__.__name__
else model.init_hidden(conf.batch_size)
)
for batch in data_loader["val_loader"]:
# load data and check performance.
_input, _target = batch.text, batch.target
# repackage the hidden.
_hidden = (
model.module.repackage_hidden(_hidden)
if "DataParallel" == model.__class__.__name__
else model.repackage_hidden(_hidden)
)
with torch.no_grad():
_, _hidden = inference(
conf,
_model,
criterion,
metrics,
_input,
_target,
_hidden,
tracker_te,
)
# display the test stat.
display_test_stat(conf, scheduler, tracker_te, label)
# get global (mean) performance
global_performance = tracker_te.evaluate_global_metrics()
return global_performance
# # evaluate the averaged local model on the validation dataset.
# if (
# conf.graph_topology != "complete"
# and conf.graph_topology != "data_center"
# and not conf.train_fast
# ):
# copied_model = deepcopy(model)
# optimizer.world_aggregator.agg_model(copied_model, op="avg")
# _evaluate(copied_model, label="averaged_model")
# # get the l2 distance of the local model to the averaged model
# conf.logger.log_metric(
# name="stat",
# values={
# "rank": conf.graph.rank,
# "epoch": scheduler.epoch_,
# "distance": get_model_difference(model, copied_model),
# },
# tags={"split": "test", "type": "averaged_model"},
# )
# evaluate each local model on the validation dataset.
global_performance = _evaluate(model, label="local_model")
return global_performance
| 35.592593
| 131
| 0.582033
|
794de83463e105565b14d3a027712fe844da1910
| 5,184
|
py
|
Python
|
utils/align_custom.py
|
darpan-jain/registration-master
|
0b7f894bf6d0cee496e846d4bff56a07f545e544
|
[
"MIT"
] | 2
|
2021-09-03T18:14:57.000Z
|
2021-09-19T17:54:27.000Z
|
utils/align_custom.py
|
darpan-jain/registration-master
|
0b7f894bf6d0cee496e846d4bff56a07f545e544
|
[
"MIT"
] | 4
|
2021-08-25T15:19:49.000Z
|
2022-03-11T23:31:11.000Z
|
utils/align_custom.py
|
darpan-jain/registration-master
|
0b7f894bf6d0cee496e846d4bff56a07f545e544
|
[
"MIT"
] | null | null | null |
'''
Implement Dlib Face alignment strategy
However, this method/approach doesn't deform the original image like Dlib does.
This also categorizes the face in 3 types: Center, Left, Right
Align face based on facial landmarks
'''
import math
import cv2
import numpy as np
class AlignCustom(object):
def __init__(self):
pass
def getPos(self, points):
if abs(points[0] - points[2]) / abs(points[1] - points[2]) > 2:
return "Right";
elif abs(points[1] - points[2]) / abs(points[0] - points[2]) > 2:
return "Left";
return "Center"
def list2colmatrix(self, pts_list):
"""
convert list to column matrix
Parameters:
----------
pts_list:
input list
Retures:
-------
colMat:
"""
assert len(pts_list) > 0
colMat = []
for i in range(len(pts_list)):
colMat.append(pts_list[i][0])
colMat.append(pts_list[i][1])
colMat = np.matrix(colMat).transpose()
return colMat
def find_tfrom_between_shapes(self, from_shape, to_shape):
"""
find transform between shapes
Parameters:
----------
from_shape:
to_shape:
Retures:
-------
tran_m:
tran_b:
"""
assert from_shape.shape[0] == to_shape.shape[0] and from_shape.shape[0] % 2 == 0
sigma_from = 0.0
sigma_to = 0.0
cov = np.matrix([[0.0, 0.0], [0.0, 0.0]])
# compute the mean and cov
from_shape_points = from_shape.reshape(int(from_shape.shape[0] / 2), 2)
to_shape_points = to_shape.reshape(int(to_shape.shape[0] / 2), 2)
mean_from = from_shape_points.mean(axis=0)
mean_to = to_shape_points.mean(axis=0)
for i in range(from_shape_points.shape[0]):
temp_dis = np.linalg.norm(from_shape_points[i] - mean_from)
sigma_from += temp_dis * temp_dis
temp_dis = np.linalg.norm(to_shape_points[i] - mean_to)
sigma_to += temp_dis * temp_dis
cov += (to_shape_points[i].transpose() - mean_to.transpose()) * (from_shape_points[i] - mean_from)
sigma_from = sigma_from / to_shape_points.shape[0]
sigma_to = sigma_to / to_shape_points.shape[0]
cov = cov / to_shape_points.shape[0]
# compute the affine matrix
s = np.matrix([[1.0, 0.0], [0.0, 1.0]])
u, d, vt = np.linalg.svd(cov)
if np.linalg.det(cov) < 0:
if d[1] < d[0]:
s[1, 1] = -1
else:
s[0, 0] = -1
r = u * s * vt
c = 1.0
if sigma_from != 0:
c = 1.0 / sigma_from * np.trace(np.diag(d) * s)
tran_b = mean_to.transpose() - c * r * mean_from.transpose()
tran_m = c * r
return tran_m, tran_b
def align(self, desired_size, img, landmarks, padding=0.1):
"""
Align face in BGR format.
:param size: size image
:type size: number
:param img_face: face image detected
:type img_face: array 3D
:return aligned_face: align face
:rtype aligned_face: array 3D
:return pos: position of face
:rtype pos: 'Left', 'Center', 'Right'
"""
shape = []
for k in range(int(len(landmarks) / 2)):
shape.append(landmarks[k])
shape.append(landmarks[k + 5])
if padding > 0:
padding = padding
else:
padding = 0
# average positions of face points
mean_face_shape_x = [0.224152, 0.75610125, 0.490127, 0.254149, 0.726104]
mean_face_shape_y = [0.2119465, 0.2119465, 0.628106, 0.780233, 0.780233]
from_points = []
to_points = []
for i in range(int(len(shape) / 2)):
x = (padding + mean_face_shape_x[i]) / (2 * padding + 1) * desired_size
y = (padding + mean_face_shape_y[i]) / (2 * padding + 1) * desired_size
to_points.append([x, y])
from_points.append([shape[2 * i], shape[2 * i + 1]])
# convert the points to Mat
from_mat = self.list2colmatrix(from_points)
to_mat = self.list2colmatrix(to_points)
# compute the similar transfrom
tran_m, tran_b = self.find_tfrom_between_shapes(from_mat, to_mat)
probe_vec = np.matrix([1.0, 0.0]).transpose()
probe_vec = tran_m * probe_vec
scale = np.linalg.norm(probe_vec)
angle = 180.0 / math.pi * math.atan2(probe_vec[1, 0], probe_vec[0, 0])
from_center = [(shape[0] + shape[2]) / 2.0, (shape[1] + shape[3]) / 2.0]
to_center = [0, 0]
to_center[1] = desired_size * 0.4
to_center[0] = desired_size * 0.5
ex = to_center[0] - from_center[0]
ey = to_center[1] - from_center[1]
rot_mat = cv2.getRotationMatrix2D((from_center[0], from_center[1]), -1 * angle, scale)
rot_mat[0][2] += ex
rot_mat[1][2] += ey
chips = cv2.warpAffine(img, rot_mat, (desired_size, desired_size))
return chips, self.getPos(landmarks)
| 31.803681
| 110
| 0.552662
|
794de87d8ecbc5cdbeca2b68f90654aaf31cf396
| 22,046
|
py
|
Python
|
examples/hybrid_parallelism/model/transformer_encoder.py
|
Melon-Zhou/FleetX
|
ad055d27f3f77184f73430f31ece81aa88c51906
|
[
"Apache-2.0"
] | 1
|
2021-06-08T12:03:38.000Z
|
2021-06-08T12:03:38.000Z
|
examples/hybrid_parallelism/model/transformer_encoder.py
|
Melon-Zhou/FleetX
|
ad055d27f3f77184f73430f31ece81aa88c51906
|
[
"Apache-2.0"
] | null | null | null |
examples/hybrid_parallelism/model/transformer_encoder.py
|
Melon-Zhou/FleetX
|
ad055d27f3f77184f73430f31ece81aa88c51906
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer encoder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import paddle
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import numpy as np
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + fluid.layers.tanh((np.sqrt(2.0 / np.pi) * (x + 0.044715 * fluid.layers.pow(x, 3.0)))))
return x * cdf
def _build_linear_column_parallel(x, n_in, n_out, name, init, mp):
return paddle.distributed.split(x,
size=(n_in, n_out),
operation='linear',
axis=1,
gather_out=False,
num_partitions=mp,
weight_attr=fluid.ParamAttr(
name='%s.w_0' % name if name is not None else None,
initializer=init),
bias_attr='%s.b_0' % name if name is not None else None, )
def _build_linear_row_parallel(x, n_in, n_out, name, init, mp):
return paddle.distributed.split(x,
size=(n_in, n_out),
operation='linear',
axis=0,
gather_out=True,
num_partitions=mp,
weight_attr=fluid.ParamAttr(
name='%s.w_0' % name if name is not None else None,
initializer=init),
bias_attr='%s.b_0' % name if name is not None else None, )
def multi_head_attention(queries,
keys,
values,
attn_bias,
d_key,
d_value,
d_model,
n_head=1,
dropout_rate=0.,
cache=None,
param_initializer=None,
name='multi_head_att',
topo=None):
"""
Multi-Head Attention. Note that attn_bias is added to the logit before
computing softmax activiation to mask certain selected positions so that
they will not considered in attention weights.
"""
keys = queries if keys is None else keys
values = keys if values is None else values
#if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3):
# raise ValueError(
# "Inputs: quries, keys and values should all be 3-D tensors. but {} v.s. {} v.s. {}"\
# .format(queries.shape, keys.shape, values.shape))
def __compute_qkv(queries, keys, values, n_head, d_key, d_value):
"""
Add linear projection to queries, keys, and values.
"""
if topo is None or topo.mp.size == 1:
q = layers.fc(input=queries,
size=d_key * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_query_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_query_fc.b_0')
k = layers.fc(input=keys,
size=d_key * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_key_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_key_fc.b_0')
v = layers.fc(input=values,
size=d_value * n_head,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_value_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_value_fc.b_0')
else:
q = _build_linear_column_parallel(queries, d_model, d_model, name+'_query_fc_'+str(topo.mp.rank), param_initializer, topo.mp.size)
k = _build_linear_column_parallel(keys, d_model, d_model, name+'_key_fc_'+str(topo.mp.rank), param_initializer, topo.mp.size)
v = _build_linear_column_parallel(values, d_model, d_model, name+'_value_fc_'+str(topo.mp.rank), param_initializer, topo.mp.size)
return q, k, v
def __split_heads(x, n_head):
"""
Reshape the last dimension of inpunt tensor x so that it becomes two
dimensions and then transpose. Specifically, input a tensor with shape
[bs, max_sequence_length, n_head * hidden_dim] then output a tensor
with shape [bs, n_head, max_sequence_length, hidden_dim].
"""
hidden_size = x.shape[-1]
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
reshaped = layers.reshape(
x=x, shape=[0, 0, n_head, hidden_size // n_head], inplace=True)
# permuate the dimensions into:
# [batch_size, n_head, max_sequence_len, hidden_size_per_head]
return layers.transpose(x=reshaped, perm=[0, 2, 1, 3])
def __combine_heads(x):
"""
Transpose and then reshape the last two dimensions of inpunt tensor x
so that it becomes one dimension, which is reverse to __split_heads.
"""
if len(x.shape) == 3: return x
if len(x.shape) != 4:
raise ValueError("Input(x) should be a 4-D Tensor.")
trans_x = layers.transpose(x, perm=[0, 2, 1, 3])
# The value 0 in shape attr means copying the corresponding dimension
# size of the input as the output dimension size.
return layers.reshape(
x=trans_x,
shape=[0, 0, trans_x.shape[2] * trans_x.shape[3]],
inplace=True)
def scaled_dot_product_attention(q, k, v, attn_bias, d_key, dropout_rate):
"""
Scaled Dot-Product Attention
"""
scaled_q = layers.scale(x=q, scale=d_key**-0.5)
product = layers.matmul(x=scaled_q, y=k, transpose_y=True)
if attn_bias:
product += attn_bias
weights = layers.softmax(product)
if dropout_rate:
weights = layers.dropout(
weights,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
out = layers.matmul(weights, v)
return out
if topo.mp.size > 1:
n_head = n_head // topo.mp.size
q, k, v = __compute_qkv(queries, keys, values, n_head, d_key, d_value)
if cache is not None: # use cache and concat time steps
# Since the inplace reshape in __split_heads changes the shape of k and
# v, which is the cache input for next time step, reshape the cache
# input from the previous time step first.
k = cache["k"] = layers.concat(
[layers.reshape(
cache["k"], shape=[0, 0, d_model]), k], axis=1)
v = cache["v"] = layers.concat(
[layers.reshape(
cache["v"], shape=[0, 0, d_model]), v], axis=1)
q = __split_heads(q, n_head)
k = __split_heads(k, n_head)
v = __split_heads(v, n_head)
ctx_multiheads = scaled_dot_product_attention(q, k, v, attn_bias, d_key,
dropout_rate)
out = __combine_heads(ctx_multiheads)
# Project back to the model size.
if topo is None or topo.mp.size == 1:
proj_out = layers.fc(input=out,
size=d_model,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_output_fc.w_0',
initializer=param_initializer),
bias_attr=name + '_output_fc.b_0')
else:
proj_out = _build_linear_row_parallel(out, d_model, d_model, name+'_output_fc_'+str(topo.mp.rank), param_initializer, topo.mp.size)
return proj_out
def positionwise_feed_forward(x,
d_inner_hid,
d_hid,
dropout_rate,
hidden_act,
param_initializer=None,
name='ffn',
topo=None):
"""
Position-wise Feed-Forward Networks.
This module consists of two linear transformations with a ReLU activation
in between, which is applied to each position separately and identically.
"""
assert hidden_act == 'gelu.approximate'
if topo is None or topo.mp.size == 1:
hidden = layers.fc(input=x,
size=d_inner_hid,
num_flatten_dims=2,
act=None,
param_attr=fluid.ParamAttr(
name=name + '_fc_0.w_0',
initializer=param_initializer),
bias_attr=name + '_fc_0.b_0')
else:
hidden = _build_linear_column_parallel(x, d_hid, d_inner_hid, name+'_fc_0_'+str(topo.mp.rank), param_initializer, topo.mp.size)
hidden = gelu(hidden)
if dropout_rate:
hidden = layers.dropout(
hidden,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
if topo is None or topo.mp.size == 1:
out = layers.fc(input=hidden,
size=d_hid,
num_flatten_dims=2,
param_attr=fluid.ParamAttr(
name=name + '_fc_1.w_0', initializer=param_initializer),
bias_attr=name + '_fc_1.b_0')
else:
out = _build_linear_row_parallel(hidden, d_inner_hid, d_hid, name+'_fc_1_'+str(topo.mp.rank), param_initializer, topo.mp.size)
return out
def pre_post_process_layer(prev_out,
out,
process_cmd,
dropout_rate=0.,
epsilon=1e-12,
name=''):
"""
Add residual connection, layer normalization and droput to the out tensor
optionally according to the value of process_cmd.
This will be used before or after multi-head attention and position-wise
feed-forward networks.
"""
for cmd in process_cmd:
if cmd == "a": # add residual connection
out = out + prev_out if prev_out else out
elif cmd == "n": # add layer normalization
out = layers.layer_norm(
out,
begin_norm_axis=len(out.shape) - 1,
param_attr=fluid.ParamAttr(
name=name + '_layer_norm_scale',
initializer=fluid.initializer.Constant(1.)),
bias_attr=fluid.ParamAttr(
name=name + '_layer_norm_bias',
initializer=fluid.initializer.Constant(0.)),
epsilon=epsilon)
elif cmd == "d": # add dropout
if dropout_rate:
out = layers.dropout(
out,
dropout_prob=dropout_rate,
dropout_implementation="upscale_in_train",
is_test=False)
return out
pre_process_layer = partial(pre_post_process_layer, None)
post_process_layer = pre_post_process_layer
def encoder_layer(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name='',
epsilon=1e-12,
topo=None):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
attn_output = multi_head_attention(
enc_input,
None,
None,
attn_bias,
d_key,
d_value,
d_model,
n_head,
attention_dropout,
param_initializer=param_initializer,
name=name + '_multi_head_att',
topo=topo)
attn_output = post_process_layer(
enc_input,
attn_output,
'an',
prepostprocess_dropout,
name=name + '_post_att',
epsilon=epsilon)
ffd_output = positionwise_feed_forward(
attn_output,
d_inner_hid,
d_model,
relu_dropout,
hidden_act,
param_initializer=param_initializer,
name=name + '_ffn',
topo=topo)
return post_process_layer(
attn_output,
ffd_output,
'an',
prepostprocess_dropout,
name=name + '_post_ffn',
epsilon=epsilon
)
def encoder_layer_preln(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd="n",
postprocess_cmd="da",
param_initializer=None,
name='',
epsilon=1e-12,
topo=None):
"""The encoder layers that can be stacked to form a deep encoder.
This module consits of a multi-head (self) attention followed by
position-wise feed-forward networks and both the two components companied
with the post_process_layer to add residual connection, layer normalization
and droput.
"""
enc_ln_output = post_process_layer(
None,
enc_input,
'n',
prepostprocess_dropout,
name=name + '_post_att',
epsilon=epsilon)
attn_output = multi_head_attention(
enc_ln_output,
None,
None,
attn_bias,
d_key,
d_value,
d_model,
n_head,
attention_dropout,
param_initializer=param_initializer,
name=name + '_multi_head_att',
topo=topo)
attn_output = post_process_layer(
enc_input,
attn_output,
'a',
prepostprocess_dropout,
name=name + '_post_att',
epsilon=epsilon)
attn_ln_output = post_process_layer(
None,
attn_output,
'n',
prepostprocess_dropout,
name=name + '_post_ffn',
epsilon=epsilon)
ffd_output = positionwise_feed_forward(
attn_ln_output,
d_inner_hid,
d_model,
relu_dropout,
hidden_act,
param_initializer=param_initializer,
name=name + '_ffn',
topo=topo)
return post_process_layer(
attn_output,
ffd_output,
'a',
prepostprocess_dropout,
name=name + '_post_ffn',
epsilon=epsilon
)
def encoder_inner_share(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd,
postprocess_cmd,
epsilon,
param_initializer=None,
name='',
n_layer_per_block=1,
topo=None):
"""
The encoder_inner_share is composed of n_layer_per_block layers returned by calling
encoder_layer.
"""
_checkpoints = []
for i in range(n_layer_per_block):
enc_output = encoder_layer(
enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd,
postprocess_cmd,
param_initializer=param_initializer,
name=name + '_layer_' + str(i),
epsilon=epsilon,
topo=topo
)
_checkpoints.append(enc_output.name)
enc_input = enc_output
return enc_output, _checkpoints
def encoder_outer_share(enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd,
postprocess_cmd,
epsilon,
param_initializer=None,
name='',
n_layer_per_block=1,
topo=None,
preln=False):
"""
The encoder_outer_share is composed of n_layer_per_block layers returned by calling
encoder_layer.
"""
enc_fn = encoder_layer_preln if preln else encoder_layer
_checkpoints = []
for i in range(n_layer_per_block):
enc_output = enc_fn(
enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd,
postprocess_cmd,
param_initializer=param_initializer,
name=name,
epsilon=epsilon,
topo=topo)
_checkpoints.append(enc_output.name)
enc_input = enc_output
return enc_output, _checkpoints
def encoder(enc_input,
attn_bias,
n_layer,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd,
postprocess_cmd,
epsilon,
n_layer_per_block,
param_initializer=None,
name='',
param_share=None,
topo=None,
preln=False):
"""
The encoder is composed of a stack of identical layers returned by calling
encoder_layer .
"""
checkpoints = []
# http://wiki.baidu.com/pages/viewpage.action?pageId=978216217
# for outer_share it will share same param in one block,
# and for inner_share it will share param across blocks, rather than in one same block
#
# outer-share inner-block
# [1] [1] ----\ 1st block
# [1] [2] ----/
# [2] [1] ----\ 2nd block
# [2] [2] ----/
if param_share == "normal" or param_share == 'outer_share':
#n_layer_per_block = 24 for bert-large
enc_fn = encoder_outer_share
name_fn = lambda i: name + '_layer_' + str(i)
elif param_share == "inner_share":
#n_layer_per_block = 2
enc_fn = encoder_inner_share
name_fn = lambda i: name
else:
raise ValueError('unsupported param share mode')
layer_per_stage = n_layer // topo.pp.size
for i in range(n_layer // n_layer_per_block):
with fluid.device_guard(f'gpu:{i//layer_per_stage}'):
attn_bias.stop_gradient = True
attn_bias.persistable = True
enc_output, cp = enc_fn(
enc_input,
attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd,
postprocess_cmd,
param_initializer=param_initializer,
name=name_fn(i),
n_layer_per_block=n_layer_per_block,
epsilon=epsilon,
topo=topo,
preln=preln)
if i % layer_per_stage == 0:
checkpoints.extend(cp)
enc_input = enc_output
if preln:
with fluid.device_guard(f'gpu:{topo.pp.size-1}'):
enc_output = post_process_layer(
None,
enc_output,
'n',
prepostprocess_dropout,
name='post_encoder',
epsilon=epsilon)
with fluid.device_guard(f'gpu:{topo.pp.size-1}'):
enc_output = pre_process_layer(
enc_output,
preprocess_cmd,
prepostprocess_dropout,
name="post_encoder",
epsilon=epsilon)
return enc_output, checkpoints
| 33.969183
| 142
| 0.539962
|
794de8ad8a3c2ff40d677cdef364c6c0fab6cc9d
| 3,060
|
py
|
Python
|
tests/functional/test_positive_float.py
|
AutumnalDream/tartiflette-plugin-scalars
|
2c73b20eac93b364a97b2192956e5fd4034ec35a
|
[
"MIT"
] | 8
|
2019-10-02T12:47:15.000Z
|
2021-12-15T14:29:37.000Z
|
tests/functional/test_positive_float.py
|
AutumnalDream/tartiflette-plugin-scalars
|
2c73b20eac93b364a97b2192956e5fd4034ec35a
|
[
"MIT"
] | 109
|
2019-09-19T13:37:43.000Z
|
2022-03-28T07:08:50.000Z
|
tests/functional/test_positive_float.py
|
AutumnalDream/tartiflette-plugin-scalars
|
2c73b20eac93b364a97b2192956e5fd4034ec35a
|
[
"MIT"
] | 4
|
2019-10-26T19:57:20.000Z
|
2021-06-24T14:32:37.000Z
|
import pytest
from tartiflette import Resolver, create_engine
@pytest.mark.asyncio
async def test_positive_float_ok():
@Resolver("Query.positiveFloat", schema_name="test_positive_float_ok")
async def positive_float_resolver(*_args, **_kwargs):
return 999.99
sdl = """
type Query {
positiveFloat: PositiveFloat
}
"""
engine = await create_engine(
sdl=sdl,
modules=[{"name": "tartiflette_plugin_scalars", "config": {}}],
schema_name="test_positive_float_ok",
)
assert await engine.execute("query positiveFloatOk { positiveFloat }") == {
"data": {"positiveFloat": 999.99}
}
@pytest.mark.asyncio
async def test_positive_float_nok():
@Resolver("Query.positiveFloat", schema_name="test_positive_float_nok")
async def positive_float_resolver(*_args, **_kwargs):
return "nope"
sdl = """
type Query {
positiveFloat: PositiveFloat
}
"""
engine = await create_engine(
sdl=sdl,
modules=[{"name": "tartiflette_plugin_scalars", "config": {}}],
schema_name="test_positive_float_nok",
)
result = await engine.execute("query positiveFloatNok { positiveFloat }")
assert result["data"]["positiveFloat"] is None
assert len(result["errors"]) == 1
assert (
result["errors"][0]["message"]
== "could not convert string to float: 'nope'"
)
@pytest.mark.asyncio
async def test__positive_float_mutation_ok():
@Resolver(
"Mutation.positiveFloat", schema_name="test_positive_float_mutation_ok"
)
async def positive_float_resolver(*_args, **_kwargs):
return True
sdl = """
type Query {
positiveFloat: PositiveFloat
}
type Mutation {
positiveFloat(input: PositiveFloat): Boolean
}
"""
engine = await create_engine(
sdl=sdl,
modules=[{"name": "tartiflette_plugin_scalars", "config": {}}],
schema_name="test_positive_float_mutation_ok",
)
assert await engine.execute(
"mutation positiveFloat { positiveFloat(input:100) }"
) == {"data": {"positiveFloat": True}}
@pytest.mark.asyncio
async def test_positive_float_mutation_nok():
@Resolver(
"Mutation.positiveFloat",
schema_name="test_positive_float_mutation_nok",
)
async def positive_float_resolver(*_args, **_kwargs):
return True
sdl = """
type Query {
positiveFloat: PositiveFloat
}
type Mutation {
positiveFloat(input: PositiveFloat): Boolean
}
"""
engine = await create_engine(
sdl=sdl,
modules=[{"name": "tartiflette_plugin_scalars", "config": {}}],
schema_name="test_positive_float_mutation_nok",
)
result = await engine.execute(
"mutation positiveFloat { positiveFloat(input:-100) }"
)
assert result["data"] is None
assert len(result["errors"]) == 1
assert (
result["errors"][0]["message"]
== "Value -100 is not of correct type PositiveFloat"
)
| 25.714286
| 79
| 0.636601
|
794de8ec57ff317d026c6af28893c94aa4ee94ce
| 5,559
|
py
|
Python
|
cvat/apps/dashboard/parseToJsTree.py
|
ItayHoresh/improvedCvat
|
b0fc79a574d74643b93955a4fc6a1969f49685d1
|
[
"MIT"
] | null | null | null |
cvat/apps/dashboard/parseToJsTree.py
|
ItayHoresh/improvedCvat
|
b0fc79a574d74643b93955a4fc6a1969f49685d1
|
[
"MIT"
] | 6
|
2020-03-25T11:49:12.000Z
|
2020-06-06T01:35:38.000Z
|
cvat/apps/dashboard/parseToJsTree.py
|
OperationalBina/Cvat
|
b0fc79a574d74643b93955a4fc6a1969f49685d1
|
[
"MIT"
] | 1
|
2020-12-02T12:27:33.000Z
|
2020-12-02T12:27:33.000Z
|
from oswrapper import *
from cvat.apps.engine.models import *
from django.db.models import Q
import requests
import json
import os
global generator_id
generator_id = 0
global generator_id_sorted
generator_id_sorted = 0
def getObjectStoragesPerProject(user_id, project_id, isAdmin):
# if the user authorize to connect this project
if Projects_Users.objects.filter(user_id=user_id).filter(project_id=project_id).exists() or isAdmin:
return list(Projects_ObjectStorages.objects.filter(project_id=project_id).values_list('object_storage__name', 'channels', 'object_storage__secret_key', 'object_storage__access_key', 'object_storage__id', 'object_storage__endpoint_url'))
else:
return []
def handleObjectStorages(request):
# get list of all object storages the user can connect
list_of_os = getObjectStoragesPerProject(request.user.id, request.GET['project_id'], request.user.has_perm('dashboard.views.isAdmin'))
items = []
final_items = []
# for each object storage add to the item list all files
for os_details in list_of_os:
items = []
secret_key = os_details[2]
access_key = os_details[3]
endpoint_url = os_details[5]
objs = ObjectStorageWrapper(access_key=access_key, secret_key=secret_key, endpt_url=endpoint_url)
# if there is a channel in this object storage
if os_details[1] == None:
path = os_details[0]
items = objs.find_objects(path, formats=('.mp4', '.avi'))
else:
# for each channel get all 1500kbps files
for channel in os_details[1]:
path = os_details[0] + '/' + channel
items += objs.find_objects(path, formats=('1500kbps_init_gpac.mp4'))
# for each file add the object storage id to know from where to connect
for item in items:
item['OS_ID'] = os_details[4]
item['Bucket'] = os_details[0].split('/')[0]
final_items.append(item)
return final_items
def checkIfVideoExist(source, video_id):
return Task.objects.filter(Q(source=source) | Q(video_id=video_id)).exists()
def toList(items):
# return only the path splited by / : [['path', 'to', 'file'], ['another', 'file', 'path']]
return [[str(item['OS_ID'])] + [item['Bucket']] + item["Key"].split('/') for item in items]
def createJson(text, parent, os_id, treeType):
global generator_id
global generator_id_sorted
iconPath = ''
if '.mp4' in text:
iconPath = '/static/engine/icons/MP4.png'
elif '.avi' in text:
iconPath = '/static/engine/icons/AVI.png'
if treeType == 0:
currId = generator_id
else:
currId = generator_id_sorted
currJson = { 'id' : currId,
'parent' : parent,
'text' : text,
'icon' : iconPath,
'os_id' : int(os_id) }
if treeType == 0:
generator_id+=1
else:
generator_id_sorted+=1
return currJson
def createJsonSorted(currId, text, parent, score, video_id, os_id, path):
iconPath = ''
if '.mp4' in text:
iconPath = '/static/engine/icons/MP4.png'
elif '.avi' in text:
iconPath = '/static/engine/icons/AVI.png'
currJson = { 'id' : currId,
'parent' : parent,
'text' : text,
'icon' : iconPath,
'os_id' : int(os_id),
'score': score,
'video_id': video_id,
'path': path }
return currJson
def getJsonTree(list_of_paths, parent, data, typeTree):
currDict = dict()
for path in list_of_paths:
os_id = path[0]
name = path[1]
# if it's the last name in path
if len(path) > 2:
# if it's a new dir
if name not in currDict.keys():
currDict[name] = [[os_id] + path[2:]]
else:
currDict[name].append([os_id] + path[2:])
else:
jsTreeJson = createJson(name, parent, os_id, typeTree)
data.append(jsTreeJson)
# for each key in dict create json and create it also to his children
for key in currDict.keys():
jsTreeJson = createJson(key, parent, 0, typeTree)
data.append(jsTreeJson)
data = getJsonTree(currDict[key], jsTreeJson['id'], data, typeTree)
return data
def getJsonById(data, myId):
return [currJson for currJson in data if currJson['id'] == myId][0]
def getPath(data, currJson):
if currJson['parent'] == '#':
return currJson['text']
return getPath(data, getJsonById(data, currJson['parent'])) + '/' + currJson['text']
def addPath(data):
newData = []
for currJson in data:
currJson["path"] = getPath(data, currJson)
newData.append(currJson)
return newData
def getTree(request):
global generator_id
generator_id = 0
data = []
# get all objects in json type
items = handleObjectStorages(request)
# split each path to an array
list_of_paths = toList(items)
# get all path by js tree type : [{'id': value, 'parent' : value, 'text' : value}]
data = getJsonTree(list_of_paths, '#', [], 1)
# add path to json for downloading the file from OS : [{'id': value, 'parent' : value, 'text' : value, 'path' : path}]
data = addPath(data)
return data
| 34.104294
| 245
| 0.595791
|
794dea1cd9655b2a7b32f322140aba90bb0be164
| 96
|
py
|
Python
|
Codewars/8kyu/find-multiples-of-a-number/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | 7
|
2017-09-20T16:40:39.000Z
|
2021-08-31T18:15:08.000Z
|
Codewars/8kyu/find-multiples-of-a-number/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
Codewars/8kyu/find-multiples-of-a-number/Python/solution1.py
|
RevansChen/online-judge
|
ad1b07fee7bd3c49418becccda904e17505f3018
|
[
"MIT"
] | null | null | null |
# Python - 3.6.0
find_multiples = lambda integer, limit: [*range(integer, limit + 1, integer)]
| 24
| 77
| 0.6875
|
794dea3b3b9100d6fda895cfd9c6ace1527c1c1e
| 746
|
py
|
Python
|
web/nouns/migrations/0015_auto_20151104_2059.py
|
mehrdad-shokri/arguman.org
|
b02e5f50f85964180c02f353c1d0aa84ee6d20dd
|
[
"MIT"
] | 1
|
2021-07-07T11:01:04.000Z
|
2021-07-07T11:01:04.000Z
|
web/nouns/migrations/0015_auto_20151104_2059.py
|
mehrdad-shokri/arguman.org
|
b02e5f50f85964180c02f353c1d0aa84ee6d20dd
|
[
"MIT"
] | null | null | null |
web/nouns/migrations/0015_auto_20151104_2059.py
|
mehrdad-shokri/arguman.org
|
b02e5f50f85964180c02f353c1d0aa84ee6d20dd
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('nouns', '0014_auto_20151104_0127'),
]
operations = [
migrations.AddField(
model_name='noun',
name='language',
field=models.CharField(default='en', max_length=25),
preserve_default=False,
),
migrations.AlterField(
model_name='noun',
name='text',
field=models.CharField(max_length=255, db_index=True),
),
migrations.AlterUniqueTogether(
name='noun',
unique_together=set([('text', 'language')]),
),
]
| 24.866667
| 66
| 0.567024
|
794ded2bb76d1127021745cda8541dc883cf3386
| 10,770
|
py
|
Python
|
zppy/templates/coupled_global.py
|
xylar/zppy
|
8f1c80cfc4eae36731a759be74b3d6f5998cf7f6
|
[
"BSD-3-Clause"
] | null | null | null |
zppy/templates/coupled_global.py
|
xylar/zppy
|
8f1c80cfc4eae36731a759be74b3d6f5998cf7f6
|
[
"BSD-3-Clause"
] | null | null | null |
zppy/templates/coupled_global.py
|
xylar/zppy
|
8f1c80cfc4eae36731a759be74b3d6f5998cf7f6
|
[
"BSD-3-Clause"
] | null | null | null |
# Script to plot some global atmosphere and ocean time series
import math
import numpy as np
import matplotlib.pyplot as plt
from readTS import TS
import matplotlib as mpl
mpl.use('Agg')
import shutil
import glob
from netCDF4 import Dataset
import sys
##---additional function to get moc time series
def getmoc(dir_in):
files = sorted(glob.glob(dir_in+"mocTimeSeries*.nc"))
nfiles = len(files)
print(dir_in,nfiles,'moc files in total')
var =np.array([])
time =np.array([])
for i in range(nfiles):
# Open input file
fin = Dataset(files[i], "r")
time0=fin['year'][:]
var0 = fin['mocAtlantic26'][:]
for iyear in range(np.int(time0[0]),np.int(time0[-1])+1):
if(i>0 and iyear <= time[-1]):
print('the amoc value for year',iyear, 'has been included in the moc time series from another moc file',files[i-1], time[-1], 'Skipping...')
else:
imon = np.where(time0==iyear)[0]
if(len(imon)==12):
var=np.append(var,np.mean(var0[imon]))
time=np.append(time,iyear)
else:
print('error in input file :',files[i])
return time,var
# -----------------------------------------------------------------------------
# Function to add horizontal line showing average value over a specified period
def add_line(year, var, year1, year2, ax, format="%4.2f", lw=1, color='b'):
i1 = (np.abs(year-year1)).argmin()
i2 = (np.abs(year-year2)).argmin()
tmp = np.average(var[i1:i2+1])
ax.plot((year[i1],year[i2]),(tmp,tmp), lw=lw, color=color)
ax.text(ax.get_xlim()[1]+1,tmp,format % tmp,va='center',color=color)
return
# -----------------------------------------------------------------------------
# Function to add line showing linear trend over a specified period
def add_trend(year, var, year1, year2, ax, format="%4.2f", lw=1, color='b',
verbose=False, ohc=False, vol=False):
i1 = (np.abs(year-year1)).argmin()
i2 = (np.abs(year-year2)).argmin()
x = year[i1:i2+1]
y = var[i1:i2+1]
fit = np.polyfit(x,y,1)
if (verbose):
print(fit)
fit_fn = np.poly1d(fit)
ax.plot(x, fit_fn(x), lw=lw, ls='--', c=color)
if (ohc):
# Earth radius 6371229. from MPAS-O output files
heat_uptake = fit[0] / ( 4.0*math.pi*(6371229.)**2*365.0*86400.)
ax.text(ax.get_xlim()[1]+1,fit_fn(x[-1]),'%+4.2f W m$^{-2}$' % (heat_uptake),color=color)
if (vol):
# Earth radius 6371229. from MPAS-O output files
#sea_lvl = fit[0] / ( 4.0*math.pi*(6371229.)**2*0.7) #for oceanic portion of the Earth surface
ax.text(ax.get_xlim()[1]+1,fit_fn(x[-1]),'%+5.4f mm yr$^{-1}$' % (fit[0]),color=color)
return
# -----------------------------------------------------------------------------
# These are the "Tableau 20" colors as RGB.
t20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(t20)):
r, g, b = t20[i]
t20[i] = (r / 255., g / 255., b / 255.)
# "Tableau 10" uses every other color
t10 = []
for i in range(0,len(t20),2):
t10.append(t20[i])
# -----------------------------------------------------------------------------
# --- Atmos data ---
# Experiments
case_dir = sys.argv[1]
experiment_name = sys.argv[2]
figstr = sys.argv[3]
exps = [
{'atmos':'{}/post/atm/glb/ts/monthly/10yr/glb.xml'.format(case_dir),
'ocean':'{}/post/ocn/glb/ts/monthly/10yr/glb.xml'.format(case_dir),
'moc':'{}/post/ocn/glb/ts/monthly/10yr/'.format(case_dir),
'vol':'{}/post/ocn/glb/ts/monthly/10yr/glb.xml'.format(case_dir),
'name': experiment_name,
'yoffset':0.0,
'yr':([{{ year1 }},{{ year2 }}],),
'color':'{{ color }}',
}
]
# Variables to extract
vars = ["RESTOM", "RESSURF", "TREFHT", "FSNTOA", "FLUT","PRECC","PRECL","QFLX"]
# Read data
for exp in exps:
print(exp['atmos'])
ts = TS(exp['atmos'])
exp['annual'] = {}
for var in vars:
print(var)
v = ts.globalAnnual(var)
exp['annual'][var] = v
if 'year' not in exp['annual']:
time = v.getTime()
exp['annual']['year'] = [x.year for x in time.asComponentTime()]
del(ts)
# Optionally read ohc
if exp['ocean'] != None:
ts = TS(exp['ocean'])
exp['annual']['ohc'] = ts.globalAnnual('ohc')
# annomalies with respect to first year
exp['annual']['ohc'][:] = exp['annual']['ohc'][:] - exp['annual']['ohc'][0]
if exp['vol'] != None:
ts = TS(exp['vol'])
exp['annual']['volume'] = ts.globalAnnual('volume')
# annomalies with respect to first year
exp['annual']['volume'][:] = exp['annual']['volume'][:] - exp['annual']['volume'][0]
# -----------------------------------------------------------------------------
# --- Generate plots ---
xlim = [0., 100.]
fig = plt.figure(figsize=[13.5, 16.5])
nrows = 4
ncols = 2
#xlim = [0., 31.]
#fig = plt.figure(figsize=[13.5, 9.0])
#nrows = 2
#ncols = 2
# -----------
# First panel
# -----------
ax = plt.subplot(nrows, ncols, 1)
ax.set_xlim(xlim)
ax.set_ylim([-1.5,1.5])
for exp in exps:
year = np.array(exp['annual']['year']) + exp['yoffset']
var = np.array(exp['annual']['RESTOM'])
# ax.plot(year,var,lw=1.5,marker='o',c=exp['color'],label=exp['name'])
ax.plot(year,var,lw=1.0,marker=None,c=exp['color'],label=exp['name'])
if exp['yr'] is not None:
print(exp['name'])
for yrs in exp['yr']:
add_line(year,var,yrs[0],yrs[1],format="%4.2f",ax=ax,lw=2,color=exp['color'])
add_trend(year,var,yrs[0],yrs[1],format="%4.2f",ax=ax,lw=2,color=exp['color'])
ax.axhline(y=0,lw=1,c='0.5')
ax.set_title("Net TOA flux (restom)")
ax.set_xlabel("Year")
ax.set_ylabel("W m-2")
ax.legend(loc="best")
# ------------
# Second panel
# ------------
ax = plt.subplot(nrows, ncols, 2)
ax.set_xlim(xlim)
for exp in exps:
year = np.array(exp['annual']['year']) + exp['yoffset']
var = np.array(exp['annual']['TREFHT']) - 273.15
# ax.plot(year,var,lw=1.5,marker='o',c=exp['color'],label=exp['name'])
ax.plot(year,var,lw=1.0,marker=None,c=exp['color'],label=exp['name'])
if exp['yr'] is not None:
print(exp['name'])
for yrs in exp['yr']:
add_line(year,var,yrs[0],yrs[1],format="%4.2f",ax=ax,lw=2,color=exp['color'])
add_trend(year,var,yrs[0],yrs[1],format="%4.2f",ax=ax,lw=2,color=exp['color'])
ax.set_title("Global surface air temperature")
ax.set_xlabel("Year")
ax.set_ylabel("degC")
#ax.legend(loc="upper left")
# ------------
# Third panel
# ------------
ax = plt.subplot(nrows, ncols, 3)
ax.set_xlim(xlim)
for exp in exps:
year = np.array(exp['annual']['year']) + exp['yoffset']
var = np.array(exp['annual']['FSNTOA'])
# ax.plot(year,var,lw=1.5,marker='o',c=exp['color'],label=exp['name'])
ax.plot(year,var,lw=1.0,marker=None,c=exp['color'],label=exp['name'])
var = np.array(exp['annual']['FLUT'])
# ax.plot(year,var,lw=1.5,marker='o',ls=':',c=exp['color'])
ax.plot(year,var,lw=1.0,marker=None,ls=':',c=exp['color'])
ax.set_title("TOA radiation: SW (solid), LW (dashed)")
ax.set_xlabel("Year")
ax.set_ylabel("W m-2")
#ax.legend(loc="lower left")
# ------------
# Fourth panel
# ------------
ax = plt.subplot(nrows, ncols, 4)
ax.set_xlim(xlim)
ax.set_ylim([-0.3,0.3])
for exp in exps:
year = np.array(exp['annual']['year']) + exp['yoffset']
var = np.array(exp['annual']['RESTOM']) - np.array(exp['annual']['RESSURF'])
# ax.plot(year,var,lw=1.5,marker='o',c=exp['color'],label=exp['name'])
ax.plot(year,var,lw=1.0,marker=None,c=exp['color'],label=exp['name'])
if exp['yr'] is not None:
print(exp['name'])
for yrs in exp['yr']:
add_line(year,var,yrs[0],yrs[1],format="%4.2f",ax=ax,lw=2,color=exp['color'])
ax.set_title("Net atm energy imbalance (restom-ressurf)")
ax.set_xlabel("Year")
ax.set_ylabel("W m-2")
#ax.legend(loc="lower left")
# -----------
# Fifth panel
# -----------
ax = plt.subplot(nrows, ncols, 5)
ax.set_xlim(xlim)
ax.set_ylim([-0.3e24,0.9e24])
for exp in exps:
if exp['ocean'] != None:
year = np.array(exp['annual']['year']) + exp['yoffset']
var = np.array(exp['annual']['ohc'])
ax.plot(year,var,lw=1.5,marker=None,c=exp['color'],label=exp['name'])
for yrs in exp['yr']:
add_trend(year,var,yrs[0],yrs[1],format="%4.2f",ax=ax,lw=3,color=exp['color'],ohc=True)
ax.axhline(y=0,lw=1,c='0.5')
ax.set_title("Change in ocean heat content")
ax.set_xlabel("Year")
ax.set_ylabel("J")
ax.legend(loc="best")
# -----------
# Sixth panel
# -----------
ax = plt.subplot(nrows, ncols, 6)
ax.set_xlim(xlim)
ax.set_ylim([4,22])
for exp in exps:
if exp['moc'] != None:
[year_moc,var]=getmoc(exp['moc'])
ax.plot(year_moc,var,lw=1.5,marker=None,c=exp['color'],label=exp['name'])
for yrs in exp['yr']:
add_trend(year_moc,var,yrs[0],yrs[1],format="%4.2f",ax=ax,lw=3,color=exp['color'],verbose=True)
ax.axhline(y=10,lw=1,c='0.5')
ax.set_title("Max MOC Atlantic streamfunction at 26.5N")
ax.set_xlabel("Year")
ax.set_ylabel("Sv")
ax.legend(loc="best")
# -----------
# Seventh panel
# -----------
ax = plt.subplot(nrows, ncols, 7)
ax.set_xlim(xlim)
#ax.set_ylim([4,22])
for exp in exps:
if exp['vol'] != None:
year_vol = np.array(exp['annual']['year']) + exp['yoffset']
var = 1e3*np.array(exp['annual']['volume'])/(4.0*math.pi*(6371229.)**2*0.7)
ax.plot(year_vol,var,lw=1.5,marker=None,c=exp['color'],label=exp['name'])
for yrs in exp['yr']:
add_trend(year_vol,var,yrs[0],yrs[1],format="%5.3f",ax=ax,lw=3,color=exp['color'],verbose=True,vol=True)
#ax.axhline(y=10,lw=1,c='0.5')
ax.set_title("Change in sea level")
ax.set_xlabel("Year")
ax.set_ylabel("mm")
ax.legend(loc="best")
# ------------
# Eighth panel
# ------------
ax = plt.subplot(nrows, ncols, 8)
ax.set_xlim(xlim)
ax.set_ylim([-1,1])
for exp in exps:
year = np.array(exp['annual']['year']) + exp['yoffset']
var = 365*86400*(np.array(exp['annual']['QFLX']) - 1e3*(np.array(exp['annual']['PRECC']) + np.array(exp['annual']['PRECL'])))
ax.plot(year,var,lw=1.0,marker=None,c=exp['color'],label=exp['name'])
if exp['yr'] is not None:
print(exp['name'])
for yrs in exp['yr']:
add_line(year,var,yrs[0],yrs[1],format="%5.4f",ax=ax,lw=2,color=exp['color'])
ax.set_title("Net atm water imbalance (evap-prec)")
ax.set_xlabel("Year")
ax.set_ylabel("mm yr-1")
#ax.legend(loc="lower left")
fig.tight_layout()
fig.savefig(figstr+".pdf")
fig.savefig(figstr+".png",dpi=150)
plt.clf()
| 32.245509
| 150
| 0.576602
|
794dedb615644350ee02dda07e17ff8e896d2c2a
| 210
|
py
|
Python
|
mcache/datadog_checks/mcache/__init__.py
|
glasser/integrations-core
|
1dd515d49b1690a1369ee5195713605b1b072b1f
|
[
"BSD-3-Clause"
] | 2
|
2019-05-28T03:48:29.000Z
|
2019-07-05T07:05:58.000Z
|
mcache/datadog_checks/mcache/__init__.py
|
glasser/integrations-core
|
1dd515d49b1690a1369ee5195713605b1b072b1f
|
[
"BSD-3-Clause"
] | 4
|
2019-07-03T02:53:19.000Z
|
2019-07-10T14:52:14.000Z
|
mcache/datadog_checks/mcache/__init__.py
|
glasser/integrations-core
|
1dd515d49b1690a1369ee5195713605b1b072b1f
|
[
"BSD-3-Clause"
] | 1
|
2019-12-23T13:35:17.000Z
|
2019-12-23T13:35:17.000Z
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .__about__ import __version__
from .mcache import Memcache
__all__ = ['__version__', 'Memcache']
| 26.25
| 59
| 0.752381
|
794dedba40b40b476e8d63ea141f220d3753504e
| 1,679
|
py
|
Python
|
homeassistant/components/multimatic/entities.py
|
thomasgermain/home-assistant
|
69a8ba678e0276bc1bfde0f3d9e9d3682209f962
|
[
"Apache-2.0"
] | 7
|
2019-08-15T13:36:58.000Z
|
2020-03-18T10:46:29.000Z
|
homeassistant/components/multimatic/entities.py
|
thomasgermain/home-assistant
|
69a8ba678e0276bc1bfde0f3d9e9d3682209f962
|
[
"Apache-2.0"
] | 73
|
2020-10-01T06:39:39.000Z
|
2022-03-31T06:16:15.000Z
|
homeassistant/components/multimatic/entities.py
|
thomasgermain/home-assistant
|
69a8ba678e0276bc1bfde0f3d9e9d3682209f962
|
[
"Apache-2.0"
] | 4
|
2019-10-26T14:25:13.000Z
|
2020-11-10T11:00:18.000Z
|
"""Common entities."""
from __future__ import annotations
from abc import ABC
import logging
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from homeassistant.util import slugify
from .const import DOMAIN as MULTIMATIC
from .coordinator import MultimaticCoordinator
_LOGGER = logging.getLogger(__name__)
class MultimaticEntity(CoordinatorEntity, ABC):
"""Define base class for multimatic entities."""
coordinator: MultimaticCoordinator
def __init__(self, coordinator: MultimaticCoordinator, domain, device_id):
"""Initialize entity."""
super().__init__(coordinator)
id_part = slugify(
device_id
+ (f"_{coordinator.api.serial}" if coordinator.api.fixed_serial else "")
)
self.entity_id = f"{domain}.{id_part}"
self._unique_id = slugify(f"{MULTIMATIC}_{coordinator.api.serial}_{device_id}")
self._remove_listener = None
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
await super().async_added_to_hass()
_LOGGER.debug("%s added", self.entity_id)
self.coordinator.add_api_listener(self.unique_id)
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass."""
await super().async_will_remove_from_hass()
self.coordinator.remove_api_listener(self.unique_id)
@property
def available(self) -> bool:
"""Return if entity is available."""
return super().available and self.coordinator.data
| 31.092593
| 87
| 0.691483
|
794dedff7de40d3059d8aaca01a7149e012da0bd
| 3,473
|
py
|
Python
|
app/models.py
|
azanegin/miptcommuna
|
97ad8cbe161cbbd86494ef7530588492571024b7
|
[
"MIT"
] | null | null | null |
app/models.py
|
azanegin/miptcommuna
|
97ad8cbe161cbbd86494ef7530588492571024b7
|
[
"MIT"
] | null | null | null |
app/models.py
|
azanegin/miptcommuna
|
97ad8cbe161cbbd86494ef7530588492571024b7
|
[
"MIT"
] | null | null | null |
"""
Definition of models.
"""
from django.db import models
from django.contrib.auth.models import User
class Person(models.Model):
user = models.OneToOneField(User)
department = models.TextField(default='') # факультет
group = models.TextField(default='') # группа
vkLink = models.TextField(default='') # ссылка на вк
fblink = models.TextField(default='') # ссылка на фб
skype = models.TextField(default='') # ник в скайпе
physMail = models.TextField(default='') # физтех почта
mainMail = models.TextField(default='') # оснеовная почта
phone = models.TextField(default='') # телефон
# номер общаги
dormNumber = models.DecimalField(max_digits=5, decimal_places=0)
# номер комнаты
roomNumber = models.DecimalField(max_digits=5, decimal_places=0)
class Shop(models.Model):
shopName = models.TextField() # название магазина
class ShopTag(models.Model):
shop = models.ForeignKey(Shop)
tag = models.TextField() # тег к конкретному магазину
class Discount(models.Model):
owner = models.ForeignKey(Person) # владелец скидки
shop = models.ForeignKey(Shop) # магазин для скидки
discountType = models.TextField() # тип скидки
discount = models.FloatField() # размер скидки
expTime = models.DateField() # время истеченея срока
description = models.TextField() # описание скидки
class Meeting(models.Model):
metType = models.TextField() # тип встречи
time = models.TimeField() # день
dayPart = models.TextField() # время дня
location = models.TextField(default='') # место для мероприятия
creator = models.ForeignKey(Person) # создатель
support = models.TextField() # поддержка от МКИ, Деканата и так далее
# стоимость для участия
money = models.DecimalField(max_digits=5, decimal_places=0)
description = models.TextField() # описание события
link = models.TextField() # ссылка на событие в вк
class Member(models.Model):
meeting = models.ForeignKey(Meeting) # встреча
user = models.ForeignKey(Person) # учаник встречи
donate = models.DecimalField(max_digits=5, decimal_places=2) # его взнос
class Gallery(models.Model):
meet = models.ForeignKey(Meeting) # встреча
uploader = models.ForeignKey(Person) # кто дал доступ к галлерее
link = models.TextField() # ссылка на галлерею
class Item(models.Model):
name = models.TextField() # название предмета
itemType = models.TextField() # классификация предмета
owner = models.ForeignKey(Person) # владелец
quality = models.TextField() # качетво предмета
location = models.TextField() # где его можно найти
status = models.BooleanField(default=False) # используется ли сейчас
isCommon = models.BooleanField(default=True) # общественный
description = models.TextField() # описание
def __str__(self):
return self.name
class Query(models.Model):
who = models.ForeignKey(Person) # кто
need = models.TextField() # что ищет
time = models.TimeField() # на какой день
dayPart = models.TextField() # время суток
duration = models.TextField() # длительность
description = models.TextField() # описание
compelete = models.BooleanField(default=False) # удовлетворен
cancel = models.BooleanField(default=False) # отменен
class Squery(models.Model):
query = models.ForeignKey(Query) # запрос
person = models.ForeignKey(Person) # пользователь
| 36.177083
| 77
| 0.700835
|
794def6324eefcff7db3e56879f3d4273ff422b1
| 2,482
|
py
|
Python
|
docs/conf.py
|
mrob95/pycom-VirtualDesktopAccessor
|
fbd45f821c34f5961ec7b33a7e3045cc652dd862
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
mrob95/pycom-VirtualDesktopAccessor
|
fbd45f821c34f5961ec7b33a7e3045cc652dd862
|
[
"MIT"
] | null | null | null |
docs/conf.py
|
mrob95/pycom-VirtualDesktopAccessor
|
fbd45f821c34f5961ec7b33a7e3045cc652dd862
|
[
"MIT"
] | null | null | null |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
directory = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.insert(0, directory)
# -- Project information -----------------------------------------------------
project = 'pyvda'
copyright = '2021, Mike Roberts'
author = 'Mike Roberts'
# The full version, including alpha/beta/rc tags
release = '0.2.2'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
]
autodoc_member_order = 'bysource'
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# html_theme = 'classic'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
#---------------------------------------------------------------------------
# Mock libraries that are only available on some platforms or with optional
# dependencies installed.
from unittest.mock import Mock
mock_modules = {
"comtypes", "ctypes", "ctypes.wintypes", "pywin32"
}
for module_name in mock_modules:
sys.modules[module_name] = Mock()
| 31.820513
| 79
| 0.656728
|
794def8bb79b695924270a9cf2b0aee42c7bf220
| 4,184
|
py
|
Python
|
src/Report/Report.py
|
kuefmz/software_classification
|
0dee3a046e59052ab272e4029195fb21f3d58c04
|
[
"Apache-2.0"
] | null | null | null |
src/Report/Report.py
|
kuefmz/software_classification
|
0dee3a046e59052ab272e4029195fb21f3d58c04
|
[
"Apache-2.0"
] | null | null | null |
src/Report/Report.py
|
kuefmz/software_classification
|
0dee3a046e59052ab272e4029195fb21f3d58c04
|
[
"Apache-2.0"
] | null | null | null |
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score,precision_recall_fscore_support
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_validate
import pickle
import numpy as np
import pandas as pd
#def tn(y_true, y_pred): return confusion_matrix(y_true, y_pred)[0, 0]
#def fp(y_true, y_pred): return confusion_matrix(y_true, y_pred)[0, 1]
#def fn(y_true, y_pred): return confusion_matrix(y_true, y_pred)[1, 0]
#def tp(y_true, y_pred): return confusion_matrix(y_true, y_pred)[1, 1]
tn = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[0, 0]
fp = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[0, 1]
fn = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[1, 0]
tp = lambda y_true, y_pred: confusion_matrix(y_true, y_pred)[1, 1]
score_metrics = {'accuracy': accuracy_score,
'precision': precision_score,
'recall': recall_score,
'f1-score': f1_score,
#'p_r_f1_sup': precision_recall_fscore_support,
'tp': tp, 'tn': tn,
'fp': fp, 'fn': fn}
def report(clf, train_name, x_train, y_train, label, name='classifier', cv=5, dict_scoring=None, fit_params=None, save=False):
'''
Function create a metric report automatically with cross_validate function.
@param clf: (model) classifier
@param x: (list or matrix or tensor) training x data
@param y: (list) label data
@param name: (string) name of the model (default classifier)
@param cv: (int) number of fold for cross-validation (default 5)
@param dict_scoring: (dict) dictionary of metrics and names
@param fit_aparams: (dict) add parameters for model fitting
@param save: (bool) determine if the model need to be saved
@return: (pandas.dataframe) dataframe containing all the results of the metrics
for each fold and the mean and std for each of them
'''
if dict_scoring!=None:
score = dict_scoring.copy() # save the original dictionary
for i in score.keys():
if len(set(y_train))>2:
if i in ["precision", "recall", "f1-score"]:
score[i] = make_scorer(score[i], average = 'weighted') # make each function scorer
elif i=="roc_auc":
score[i] = make_scorer(score[i], average = 'weighted', multi_class="ovo",needs_proba=True) # make each function scorer
else:
score[i] = make_scorer(score[i]) # make each function scorer
elif i in ['precision', 'recall', 'f1-score'] :
score[i] = make_scorer(score[i], pos_label=label) # make each function scorer
else:
score[i] = make_scorer(score[i])
try:
scores = cross_validate(clf, x_train, y_train, scoring=score,
cv=cv, return_train_score=True, n_jobs=-1, fit_params=fit_params)
except:
scores = cross_validate(clf, x_train, y_train, scoring=score,
cv=cv, return_train_score=True, fit_params=fit_params)
#print(scores)
# Train test on the overall data
model = clf
model.fit(x_train, y_train)
#features = model[:-1].get_feature_names_out()
#print(f'{label}: ', file=open("output.txt", "a"))
#for i in features:
# print(f'{i}', file=open("output.txt", "a"))
#y_pred = model.predict(X_test)#>0.5).astype(int)
if save:
filename= name+label+".sav"
pickle.dump(model, open('results/models/'+filename, 'wb'))
#csvFileName = f"{label.lower().replace(' ', '_')}.csv"
#with open('results/scoreboards/' + csvFileName, 'r') as csvfile:
# rownum = len(csvfile.readlines())
# initialisation
res = {'PipelineID' : label,
'Pipeline' : name ,
'train_set' : train_name}
for i in scores: # loop on each metric generate text and values
if i == "estimator": continue
for j in enumerate(scores[i]):
res[i+"_cv"+str(j[0]+1)] = j[1]
res[i+"_mean"] = np.mean(scores[i])
# add metrics averall dataset on the dictionary
#print(scores)
#print(score)
del scores['fit_time']
del scores['score_time']
#for i in scores: # compute metrics
# scores[i] = np.append(scores[i] ,score[i.split("test_")[-1]](model, X_test, y_test))
# res[i.split("test_")[-1]+'_overall'] = scores[i][-1]
return pd.DataFrame(data=res.values(), index=res.keys()).T, model
| 40.230769
| 126
| 0.708174
|
794defa26f562e3224f4b44ef77744ce7bd5fd39
| 2,174
|
py
|
Python
|
optimus/outliers/mad.py
|
liRONCO11/optimus
|
0ca0567267300397c7ba711483c46f94ac265e55
|
[
"Apache-2.0"
] | 1,045
|
2017-07-17T17:59:46.000Z
|
2021-06-15T07:06:48.000Z
|
optimus/outliers/mad.py
|
liRONCO11/optimus
|
0ca0567267300397c7ba711483c46f94ac265e55
|
[
"Apache-2.0"
] | 955
|
2017-07-14T15:47:58.000Z
|
2021-05-27T14:16:24.000Z
|
optimus/outliers/mad.py
|
liRONCO11/optimus
|
0ca0567267300397c7ba711483c46f94ac265e55
|
[
"Apache-2.0"
] | 226
|
2017-08-04T20:41:33.000Z
|
2021-05-21T08:28:33.000Z
|
from optimus.helpers.constants import RELATIVE_ERROR
from optimus.helpers.filters import dict_filter
from optimus.helpers.json import dump_json
from optimus.outliers.abstract_outliers_bounds import AbstractOutlierBounds
class MAD(AbstractOutlierBounds):
"""
Handle outliers using mad http://eurekastatistics.com/using-the-median-absolute-deviation-to-find-outliers/
"""
def __init__(self, df, col_name, threshold: int, relative_error: int = RELATIVE_ERROR):
"""
:param df:
:param col_name:
:type threshold: object
:type relative_error: object
"""
self.df = df
self.col_name = col_name
self.threshold = threshold
self.relative_error = relative_error
self.upper_bound, self.lower_bound = dict_filter(self.whiskers(), ["upper_bound", "lower_bound"])
super().__init__(df, col_name, self.lower_bound, self.upper_bound)
def whiskers(self):
"""
Get the wisker used to defined outliers
:return:
"""
mad_median_value = self.df.cols.mad(self.col_name, self.relative_error, more=True)
col_name = self.col_name
mad_value = mad_median_value[col_name]["mad"]
median_value = mad_median_value[col_name]["median"]
lower_bound = median_value - self.threshold * mad_value
upper_bound = median_value + self.threshold * mad_value
return {"lower_bound": lower_bound, "upper_bound": upper_bound}
def info(self, output: str = "dict"):
"""
Get whiskers, iqrs and outliers and non outliers count
:return:
"""
upper_bound, lower_bound, = dict_filter(self.whiskers(),
["upper_bound", "lower_bound"])
result = {"count_outliers": self.count(), "count_non_outliers": self.non_outliers_count(),
"lower_bound": lower_bound, "lower_bound_count": self.count_lower_bound(lower_bound),
"upper_bound": upper_bound, "upper_bound_count": self.count_upper_bound(upper_bound)}
if output == "json":
result = dump_json(result)
return result
| 38.821429
| 111
| 0.649954
|
794defe6025092790d7da33d724c0e26fe35caf6
| 64,512
|
py
|
Python
|
sympy/plotting/plot.py
|
shipci/sympy
|
4b59927bed992b980c9b3faac01becb36feef26b
|
[
"BSD-3-Clause"
] | 319
|
2016-09-22T15:54:48.000Z
|
2022-03-18T02:36:58.000Z
|
sympy/plotting/plot.py
|
shipci/sympy
|
4b59927bed992b980c9b3faac01becb36feef26b
|
[
"BSD-3-Clause"
] | 9
|
2016-11-03T21:56:41.000Z
|
2020-08-09T19:27:37.000Z
|
sympy/plotting/plot.py
|
shipci/sympy
|
4b59927bed992b980c9b3faac01becb36feef26b
|
[
"BSD-3-Clause"
] | 27
|
2016-10-06T16:05:32.000Z
|
2022-03-18T02:37:00.000Z
|
"""Plotting module for Sympy.
A plot is represented by the ``Plot`` class that contains a reference to the
backend and a list of the data series to be plotted. The data series are
instances of classes meant to simplify getting points and meshes from sympy
expressions. ``plot_backends`` is a dictionary with all the backends.
This module gives only the essential. For all the fancy stuff use directly
the backend. You can get the backend wrapper for every plot from the
``_backend`` attribute. Moreover the data series classes have various useful
methods like ``get_points``, ``get_segments``, ``get_meshes``, etc, that may
be useful if you wish to use another plotting library.
Especially if you need publication ready graphs and this module is not enough
for you - just get the ``_backend`` attribute and add whatever you want
directly to it. In the case of matplotlib (the common way to graph data in
python) just copy ``_backend.fig`` which is the figure and ``_backend.ax``
which is the axis and work on them as you would on any other matplotlib object.
Simplicity of code takes much greater importance than performance. Don't use it
if you care at all about performance. A new backend instance is initialized
every time you call ``show()`` and the old one is left to the garbage collector.
"""
from __future__ import print_function, division
from inspect import getargspec
from itertools import chain
from collections import Callable
import warnings
from sympy import sympify, Expr, Tuple, Dummy, Symbol
from sympy.external import import_module
from sympy.utilities.decorator import doctest_depends_on
from sympy.utilities.iterables import is_sequence
from .experimental_lambdify import (vectorized_lambdify, lambdify)
# N.B.
# When changing the minimum module version for matplotlib, please change
# the same in the `SymPyDocTestFinder`` in `sympy/utilities/runtests.py`
# Backend specific imports - textplot
from sympy.plotting.textplot import textplot
# Global variable
# Set to False when running tests / doctests so that the plots don't show.
_show = True
def unset_show():
global _show
_show = False
##############################################################################
# The public interface
##############################################################################
class Plot(object):
"""The central class of the plotting module.
For interactive work the function ``plot`` is better suited.
This class permits the plotting of sympy expressions using numerous
backends (matplotlib, textplot, the old pyglet module for sympy, Google
charts api, etc).
The figure can contain an arbitrary number of plots of sympy expressions,
lists of coordinates of points, etc. Plot has a private attribute _series that
contains all data series to be plotted (expressions for lines or surfaces,
lists of points, etc (all subclasses of BaseSeries)). Those data series are
instances of classes not imported by ``from sympy import *``.
The customization of the figure is on two levels. Global options that
concern the figure as a whole (eg title, xlabel, scale, etc) and
per-data series options (eg name) and aesthetics (eg. color, point shape,
line type, etc.).
The difference between options and aesthetics is that an aesthetic can be
a function of the coordinates (or parameters in a parametric plot). The
supported values for an aesthetic are:
- None (the backend uses default values)
- a constant
- a function of one variable (the first coordinate or parameter)
- a function of two variables (the first and second coordinate or
parameters)
- a function of three variables (only in nonparametric 3D plots)
Their implementation depends on the backend so they may not work in some
backends.
If the plot is parametric and the arity of the aesthetic function permits
it the aesthetic is calculated over parameters and not over coordinates.
If the arity does not permit calculation over parameters the calculation is
done over coordinates.
Only cartesian coordinates are supported for the moment, but you can use
the parametric plots to plot in polar, spherical and cylindrical
coordinates.
The arguments for the constructor Plot must be subclasses of BaseSeries.
Any global option can be specified as a keyword argument.
The global options for a figure are:
- title : str
- xlabel : str
- ylabel : str
- legend : bool
- xscale : {'linear', 'log'}
- yscale : {'linear', 'log'}
- axis : bool
- axis_center : tuple of two floats or {'center', 'auto'}
- xlim : tuple of two floats
- ylim : tuple of two floats
- aspect_ratio : tuple of two floats or {'auto'}
- autoscale : bool
- margin : float in [0, 1]
The per data series options and aesthetics are:
There are none in the base series. See below for options for subclasses.
Some data series support additional aesthetics or options:
ListSeries, LineOver1DRangeSeries, Parametric2DLineSeries,
Parametric3DLineSeries support the following:
Aesthetics:
- line_color : function which returns a float.
options:
- label : str
- steps : bool
- integers_only : bool
SurfaceOver2DRangeSeries, ParametricSurfaceSeries support the following:
aesthetics:
- surface_color : function which returns a float.
"""
def __init__(self, *args, **kwargs):
super(Plot, self).__init__()
# Options for the graph as a whole.
# The possible values for each option are described in the docstring of
# Plot. They are based purely on convention, no checking is done.
self.title = None
self.xlabel = None
self.ylabel = None
self.aspect_ratio = 'auto'
self.xlim = None
self.ylim = None
self.axis_center = 'auto'
self.axis = True
self.xscale = 'linear'
self.yscale = 'linear'
self.legend = False
self.autoscale = True
self.margin = 0
# Contains the data objects to be plotted. The backend should be smart
# enough to iterate over this list.
self._series = []
self._series.extend(args)
# The backend type. On every show() a new backend instance is created
# in self._backend which is tightly coupled to the Plot instance
# (thanks to the parent attribute of the backend).
self.backend = DefaultBackend
# The keyword arguments should only contain options for the plot.
for key, val in kwargs.items():
if hasattr(self, key):
setattr(self, key, val)
def show(self):
# TODO move this to the backend (also for save)
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.show()
def save(self, path):
if hasattr(self, '_backend'):
self._backend.close()
self._backend = self.backend(self)
self._backend.save(path)
def __str__(self):
series_strs = [('[%d]: ' % i) + str(s)
for i, s in enumerate(self._series)]
return 'Plot object containing:\n' + '\n'.join(series_strs)
def __getitem__(self, index):
return self._series[index]
def __setitem__(self, index, *args):
if len(args) == 1 and isinstance(args[0], BaseSeries):
self._series[index] = args
def __delitem__(self, index):
del self._series[index]
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def append(self, arg):
"""Adds an element from a plot's series to an existing plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot's first series object to the first, use the
``append`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.append(p2[0])
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
See Also
========
extend
"""
if isinstance(arg, BaseSeries):
self._series.append(arg)
else:
raise TypeError('Must specify element of plot to append.')
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def extend(self, arg):
"""Adds all series from another plot.
Examples
========
Consider two ``Plot`` objects, ``p1`` and ``p2``. To add the
second plot to the first, use the ``extend`` method, like so:
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
>>> p1 = plot(x*x)
>>> p2 = plot(x)
>>> p1.extend(p2)
>>> p1
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
[1]: cartesian line: x for x over (-10.0, 10.0)
"""
if isinstance(arg, Plot):
self._series.extend(arg._series)
elif is_sequence(arg):
self._series.extend(arg)
else:
raise TypeError('Expecting Plot or sequence of BaseSeries')
##############################################################################
# Data Series
##############################################################################
#TODO more general way to calculate aesthetics (see get_color_array)
### The base class for all series
class BaseSeries(object):
"""Base class for the data objects containing stuff to be plotted.
The backend should check if it supports the data series that it's given.
(eg TextBackend supports only LineOver1DRange).
It's the backend responsibility to know how to use the class of
data series that it's given.
Some data series classes are grouped (using a class attribute like is_2Dline)
according to the api they present (based only on convention). The backend is
not obliged to use that api (eg. The LineOver1DRange belongs to the
is_2Dline group and presents the get_points method, but the
TextBackend does not use the get_points method).
"""
# Some flags follow. The rationale for using flags instead of checking base
# classes is that setting multiple flags is simpler than multiple
# inheritance.
is_2Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dline = False
# Some of the backends expect:
# - get_points returning 1D np.arrays list_x, list_y, list_y
# - get_segments returning np.array (done in Line2DBaseSeries)
# - get_color_array returning 1D np.array (done in Line2DBaseSeries)
# with the colors calculated at the points from get_points
is_3Dsurface = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_contour = False
# Some of the backends expect:
# - get_meshes returning mesh_x, mesh_y, mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
is_implicit = False
# Some of the backends expect:
# - get_meshes returning mesh_x (1D array), mesh_y(1D array,
# mesh_z (2D np.arrays)
# - get_points an alias for get_meshes
#Different from is_contour as the colormap in backend will be
#different
is_parametric = False
# The calculation of aesthetics expects:
# - get_parameter_points returning one or two np.arrays (1D or 2D)
# used for calculation aesthetics
def __init__(self):
super(BaseSeries, self).__init__()
@property
def is_3D(self):
flags3D = [
self.is_3Dline,
self.is_3Dsurface
]
return any(flags3D)
@property
def is_line(self):
flagslines = [
self.is_2Dline,
self.is_3Dline
]
return any(flagslines)
### 2D lines
class Line2DBaseSeries(BaseSeries):
"""A base class for 2D lines.
- adding the label, steps and only_integers options
- making is_2Dline true
- defining get_segments and get_color_array
"""
is_2Dline = True
_dim = 2
def __init__(self):
super(Line2DBaseSeries, self).__init__()
self.label = None
self.steps = False
self.only_integers = False
self.line_color = None
def get_segments(self):
np = import_module('numpy')
points = self.get_points()
if self.steps is True:
x = np.array((points[0], points[0])).T.flatten()[1:]
y = np.array((points[1], points[1])).T.flatten()[:-1]
points = (x, y)
points = np.ma.array(points).T.reshape(-1, 1, self._dim)
return np.ma.concatenate([points[:-1], points[1:]], axis=1)
def get_color_array(self):
np = import_module('numpy')
c = self.line_color
if hasattr(c, '__call__'):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if arity == 1 and self.is_parametric:
x = self.get_parameter_points()
return f(centers_of_segments(x))
else:
variables = list(map(centers_of_segments, self.get_points()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else: # only if the line is 3D (otherwise raises an error)
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class List2DSeries(Line2DBaseSeries):
"""Representation for a line consisting of list of points."""
def __init__(self, list_x, list_y):
np = import_module('numpy')
super(List2DSeries, self).__init__()
self.list_x = np.array(list_x)
self.list_y = np.array(list_y)
self.label = 'list'
def __str__(self):
return 'list plot'
def get_points(self):
return (self.list_x, self.list_y)
class LineOver1DRangeSeries(Line2DBaseSeries):
"""Representation for a line consisting of a SymPy expression over a range."""
def __init__(self, expr, var_start_end, **kwargs):
super(LineOver1DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.label = str(self.expr)
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'cartesian line: %s for %s over %s' % (
str(self.expr), str(self.var), str((self.start, self.end)))
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if self.only_integers or not self.adaptive:
return super(LineOver1DRangeSeries, self).get_segments()
else:
f = lambdify([self.var], self.expr)
list_segments = []
def sample(p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
np = import_module('numpy')
#Randomly sample to avoid aliasing.
random = 0.45 + np.random.rand() * 0.1
xnew = p[0] + random * (q[0] - p[0])
ynew = f(xnew)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif p[1] is None and q[1] is None:
xarray = np.linspace(p[0], q[0], 10)
yarray = list(map(f, xarray))
if any(y is not None for y in yarray):
for i in range(len(yarray) - 1):
if yarray[i] is not None or yarray[i + 1] is not None:
sample([xarray[i], yarray[i]],
[xarray[i + 1], yarray[i + 1]], depth + 1)
#Sample further if one of the end points in None( i.e. a complex
#value) or the three points are not almost collinear.
elif (p[1] is None or q[1] is None or new_point[1] is None
or not flat(p, new_point, q)):
sample(p, new_point, depth + 1)
sample(new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start = f(self.start)
f_end = f(self.end)
sample([self.start, f_start], [self.end, f_end], 0)
return list_segments
def get_points(self):
np = import_module('numpy')
if self.only_integers is True:
list_x = np.linspace(int(self.start), int(self.end),
num=int(self.end) - int(self.start) + 1)
else:
list_x = np.linspace(self.start, self.end, num=self.nb_of_points)
f = vectorized_lambdify([self.var], self.expr)
list_y = f(list_x)
return (list_x, list_y)
class Parametric2DLineSeries(Line2DBaseSeries):
"""Representation for a line consisting of two parametric sympy expressions
over a range."""
is_parametric = True
def __init__(self, expr_x, expr_y, var_start_end, **kwargs):
super(Parametric2DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.adaptive = kwargs.get('adaptive', True)
self.depth = kwargs.get('depth', 12)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return 'parametric cartesian line: (%s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.var),
str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
list_x = fx(param)
list_y = fy(param)
return (list_x, list_y)
def get_segments(self):
"""
Adaptively gets segments for plotting.
The adaptive sampling is done by recursively checking if three
points are almost collinear. If they are not collinear, then more
points are added between those points.
References
==========
[1] Adaptive polygonal approximation of parametric curves,
Luiz Henrique de Figueiredo.
"""
if not self.adaptive:
return super(Parametric2DLineSeries, self).get_segments()
f_x = lambdify([self.var], self.expr_x)
f_y = lambdify([self.var], self.expr_y)
list_segments = []
def sample(param_p, param_q, p, q, depth):
""" Samples recursively if three points are almost collinear.
For depth < 6, points are added irrespective of whether they
satisfy the collinearity condition or not. The maximum depth
allowed is 12.
"""
#Randomly sample to avoid aliasing.
np = import_module('numpy')
random = 0.45 + np.random.rand() * 0.1
param_new = param_p + random * (param_q - param_p)
xnew = f_x(param_new)
ynew = f_y(param_new)
new_point = np.array([xnew, ynew])
#Maximum depth
if depth > self.depth:
list_segments.append([p, q])
#Sample irrespective of whether the line is flat till the
#depth of 6. We are not using linspace to avoid aliasing.
elif depth < 6:
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
#Sample ten points if complex values are encountered
#at both ends. If there is a real value in between, then
#sample those points further.
elif ((p[0] is None and q[1] is None) or
(p[1] is None and q[1] is None)):
param_array = np.linspace(param_p, param_q, 10)
x_array = list(map(f_x, param_array))
y_array = list(map(f_y, param_array))
if any(x is not None and y is not None
for x, y in zip(x_array, y_array)):
for i in range(len(y_array) - 1):
if ((x_array[i] is not None and y_array[i] is not None) or
(x_array[i + 1] is not None and y_array[i + 1] is not None)):
point_a = [x_array[i], y_array[i]]
point_b = [x_array[i + 1], y_array[i + 1]]
sample(param_array[i], param_array[i], point_a,
point_b, depth + 1)
#Sample further if one of the end points in None( ie a complex
#value) or the three points are not almost collinear.
elif (p[0] is None or p[1] is None
or q[1] is None or q[0] is None
or not flat(p, new_point, q)):
sample(param_p, param_new, p, new_point, depth + 1)
sample(param_new, param_q, new_point, q, depth + 1)
else:
list_segments.append([p, q])
f_start_x = f_x(self.start)
f_start_y = f_y(self.start)
start = [f_start_x, f_start_y]
f_end_x = f_x(self.end)
f_end_y = f_y(self.end)
end = [f_end_x, f_end_y]
sample(self.start, self.end, start, end, 0)
return list_segments
### 3D lines
class Line3DBaseSeries(Line2DBaseSeries):
"""A base class for 3D lines.
Most of the stuff is derived from Line2DBaseSeries."""
is_2Dline = False
is_3Dline = True
_dim = 3
def __init__(self):
super(Line3DBaseSeries, self).__init__()
class Parametric3DLineSeries(Line3DBaseSeries):
"""Representation for a 3D line consisting of two parametric sympy
expressions and a range."""
def __init__(self, expr_x, expr_y, expr_z, var_start_end, **kwargs):
super(Parametric3DLineSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.label = "(%s, %s)" % (str(self.expr_x), str(self.expr_y))
self.var = sympify(var_start_end[0])
self.start = float(var_start_end[1])
self.end = float(var_start_end[2])
self.nb_of_points = kwargs.get('nb_of_points', 300)
self.line_color = kwargs.get('line_color', None)
def __str__(self):
return '3D parametric cartesian line: (%s, %s, %s) for %s over %s' % (
str(self.expr_x), str(self.expr_y), str(self.expr_z),
str(self.var), str((self.start, self.end)))
def get_parameter_points(self):
np = import_module('numpy')
return np.linspace(self.start, self.end, num=self.nb_of_points)
def get_points(self):
param = self.get_parameter_points()
fx = vectorized_lambdify([self.var], self.expr_x)
fy = vectorized_lambdify([self.var], self.expr_y)
fz = vectorized_lambdify([self.var], self.expr_z)
list_x = fx(param)
list_y = fy(param)
list_z = fz(param)
return (list_x, list_y, list_z)
### Surfaces
class SurfaceBaseSeries(BaseSeries):
"""A base class for 3D surfaces."""
is_3Dsurface = True
def __init__(self):
super(SurfaceBaseSeries, self).__init__()
self.surface_color = None
def get_color_array(self):
np = import_module('numpy')
c = self.surface_color
if isinstance(c, Callable):
f = np.vectorize(c)
arity = len(getargspec(c)[0])
if self.is_parametric:
variables = list(map(centers_of_faces, self.get_parameter_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables)
variables = list(map(centers_of_faces, self.get_meshes()))
if arity == 1:
return f(variables[0])
elif arity == 2:
return f(*variables[:2])
else:
return f(*variables)
else:
return c*np.ones(self.nb_of_points)
class SurfaceOver2DRangeSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of a sympy expression and 2D
range."""
def __init__(self, expr, var_start_end_x, var_start_end_y, **kwargs):
super(SurfaceOver2DRangeSeries, self).__init__()
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.nb_of_points_x = kwargs.get('nb_of_points_x', 50)
self.nb_of_points_y = kwargs.get('nb_of_points_y', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('cartesian surface: %s for'
' %s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
class ParametricSurfaceSeries(SurfaceBaseSeries):
"""Representation for a 3D surface consisting of three parametric sympy
expressions and a range."""
is_parametric = True
def __init__(
self, expr_x, expr_y, expr_z, var_start_end_u, var_start_end_v,
**kwargs):
super(ParametricSurfaceSeries, self).__init__()
self.expr_x = sympify(expr_x)
self.expr_y = sympify(expr_y)
self.expr_z = sympify(expr_z)
self.var_u = sympify(var_start_end_u[0])
self.start_u = float(var_start_end_u[1])
self.end_u = float(var_start_end_u[2])
self.var_v = sympify(var_start_end_v[0])
self.start_v = float(var_start_end_v[1])
self.end_v = float(var_start_end_v[2])
self.nb_of_points_u = kwargs.get('nb_of_points_u', 50)
self.nb_of_points_v = kwargs.get('nb_of_points_v', 50)
self.surface_color = kwargs.get('surface_color', None)
def __str__(self):
return ('parametric cartesian surface: (%s, %s, %s) for'
' %s over %s and %s over %s') % (
str(self.expr_x),
str(self.expr_y),
str(self.expr_z),
str(self.var_u),
str((self.start_u, self.end_u)),
str(self.var_v),
str((self.start_v, self.end_v)))
def get_parameter_meshes(self):
np = import_module('numpy')
return np.meshgrid(np.linspace(self.start_u, self.end_u,
num=self.nb_of_points_u),
np.linspace(self.start_v, self.end_v,
num=self.nb_of_points_v))
def get_meshes(self):
mesh_u, mesh_v = self.get_parameter_meshes()
fx = vectorized_lambdify((self.var_u, self.var_v), self.expr_x)
fy = vectorized_lambdify((self.var_u, self.var_v), self.expr_y)
fz = vectorized_lambdify((self.var_u, self.var_v), self.expr_z)
return (fx(mesh_u, mesh_v), fy(mesh_u, mesh_v), fz(mesh_u, mesh_v))
### Contours
class ContourSeries(BaseSeries):
"""Representation for a contour plot."""
#The code is mostly repetition of SurfaceOver2DRange.
#XXX: Presently not used in any of those functions.
#XXX: Add contour plot and use this seties.
is_contour = True
def __init__(self, expr, var_start_end_x, var_start_end_y):
super(ContourSeries, self).__init__()
self.nb_of_points_x = 50
self.nb_of_points_y = 50
self.expr = sympify(expr)
self.var_x = sympify(var_start_end_x[0])
self.start_x = float(var_start_end_x[1])
self.end_x = float(var_start_end_x[2])
self.var_y = sympify(var_start_end_y[0])
self.start_y = float(var_start_end_y[1])
self.end_y = float(var_start_end_y[2])
self.get_points = self.get_meshes
def __str__(self):
return ('contour: %s for '
'%s over %s and %s over %s') % (
str(self.expr),
str(self.var_x),
str((self.start_x, self.end_x)),
str(self.var_y),
str((self.start_y, self.end_y)))
def get_meshes(self):
np = import_module('numpy')
mesh_x, mesh_y = np.meshgrid(np.linspace(self.start_x, self.end_x,
num=self.nb_of_points_x),
np.linspace(self.start_y, self.end_y,
num=self.nb_of_points_y))
f = vectorized_lambdify((self.var_x, self.var_y), self.expr)
return (mesh_x, mesh_y, f(mesh_x, mesh_y))
##############################################################################
# Backends
##############################################################################
class BaseBackend(object):
def __init__(self, parent):
super(BaseBackend, self).__init__()
self.parent = parent
## don't have to check for the success of importing matplotlib in each case;
## we will only be using this backend if we can successfully import matploblib
class MatplotlibBackend(BaseBackend):
def __init__(self, parent):
super(MatplotlibBackend, self).__init__(parent)
are_3D = [s.is_3D for s in self.parent._series]
self.matplotlib = import_module('matplotlib',
__import__kwargs={'fromlist': ['pyplot', 'cm', 'collections']},
min_module_version='1.1.0', catch=(RuntimeError,))
self.plt = self.matplotlib.pyplot
self.cm = self.matplotlib.cm
self.LineCollection = self.matplotlib.collections.LineCollection
if any(are_3D) and not all(are_3D):
raise ValueError('The matplotlib backend can not mix 2D and 3D.')
elif not any(are_3D):
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111)
self.ax.spines['left'].set_position('zero')
self.ax.spines['right'].set_color('none')
self.ax.spines['bottom'].set_position('zero')
self.ax.spines['top'].set_color('none')
self.ax.spines['left'].set_smart_bounds(True)
self.ax.spines['bottom'].set_smart_bounds(False)
self.ax.xaxis.set_ticks_position('bottom')
self.ax.yaxis.set_ticks_position('left')
elif all(are_3D):
## mpl_toolkits.mplot3d is necessary for
## projection='3d'
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
self.fig = self.plt.figure()
self.ax = self.fig.add_subplot(111, projection='3d')
def process_series(self):
parent = self.parent
for s in self.parent._series:
# Create the collections
if s.is_2Dline:
collection = self.LineCollection(s.get_segments())
self.ax.add_collection(collection)
elif s.is_contour:
self.ax.contour(*s.get_meshes())
elif s.is_3Dline:
# TODO too complicated, I blame matplotlib
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
art3d = mpl_toolkits.mplot3d.art3d
collection = art3d.Line3DCollection(s.get_segments())
self.ax.add_collection(collection)
x, y, z = s.get_points()
self.ax.set_xlim((min(x), max(x)))
self.ax.set_ylim((min(y), max(y)))
self.ax.set_zlim((min(z), max(z)))
elif s.is_3Dsurface:
x, y, z = s.get_meshes()
collection = self.ax.plot_surface(x, y, z, cmap=self.cm.jet,
rstride=1, cstride=1,
linewidth=0.1)
elif s.is_implicit:
#Smart bounds have to be set to False for implicit plots.
self.ax.spines['left'].set_smart_bounds(False)
self.ax.spines['bottom'].set_smart_bounds(False)
points = s.get_raster()
if len(points) == 2:
#interval math plotting
x, y = _matplotlib_list(points[0])
self.ax.fill(x, y, facecolor='b', edgecolor='None' )
else:
# use contourf or contour depending on whether it is
# an inequality or equality.
#XXX: ``contour`` plots multiple lines. Should be fixed.
ListedColormap = self.matplotlib.colors.ListedColormap
colormap = ListedColormap(["white", "blue"])
xarray, yarray, zarray, plot_type = points
if plot_type == 'contour':
self.ax.contour(xarray, yarray, zarray,
contours=(0, 0), fill=False, cmap=colormap)
else:
self.ax.contourf(xarray, yarray, zarray, cmap=colormap)
else:
raise ValueError('The matplotlib backend supports only '
'is_2Dline, is_3Dline, is_3Dsurface and '
'is_contour objects.')
# Customise the collections with the corresponding per-series
# options.
if hasattr(s, 'label'):
collection.set_label(s.label)
if s.is_line and s.line_color:
if isinstance(s.line_color, (float, int)) or isinstance(s.line_color, Callable):
color_array = s.get_color_array()
collection.set_array(color_array)
else:
collection.set_color(s.line_color)
if s.is_3Dsurface and s.surface_color:
if self.matplotlib.__version__ < "1.2.0": # TODO in the distant future remove this check
warnings.warn('The version of matplotlib is too old to use surface coloring.')
elif isinstance(s.surface_color, (float, int)) or isinstance(s.surface_color, Callable):
color_array = s.get_color_array()
color_array = color_array.reshape(color_array.size)
collection.set_array(color_array)
else:
collection.set_color(s.surface_color)
# Set global options.
# TODO The 3D stuff
# XXX The order of those is important.
mpl_toolkits = import_module('mpl_toolkits',
__import__kwargs={'fromlist': ['mplot3d']})
Axes3D = mpl_toolkits.mplot3d.Axes3D
if parent.xscale and not isinstance(self.ax, Axes3D):
self.ax.set_xscale(parent.xscale)
if parent.yscale and not isinstance(self.ax, Axes3D):
self.ax.set_yscale(parent.yscale)
if parent.xlim:
self.ax.set_xlim(parent.xlim)
else:
if all(isinstance(s, LineOver1DRangeSeries) for s in parent._series):
starts = [s.start for s in parent._series]
ends = [s.end for s in parent._series]
self.ax.set_xlim(min(starts), max(ends))
if parent.ylim:
self.ax.set_ylim(parent.ylim)
if not isinstance(self.ax, Axes3D) or self.matplotlib.__version__ >= '1.2.0': # XXX in the distant future remove this check
self.ax.set_autoscale_on(parent.autoscale)
if parent.axis_center:
val = parent.axis_center
if isinstance(self.ax, Axes3D):
pass
elif val == 'center':
self.ax.spines['left'].set_position('center')
self.ax.spines['bottom'].set_position('center')
elif val == 'auto':
xl, xh = self.ax.get_xlim()
yl, yh = self.ax.get_ylim()
pos_left = ('data', 0) if xl*xh <= 0 else 'center'
pos_bottom = ('data', 0) if yl*yh <= 0 else 'center'
self.ax.spines['left'].set_position(pos_left)
self.ax.spines['bottom'].set_position(pos_bottom)
else:
self.ax.spines['left'].set_position(('data', val[0]))
self.ax.spines['bottom'].set_position(('data', val[1]))
if not parent.axis:
self.ax.set_axis_off()
if parent.legend:
if self.ax.legend():
self.ax.legend_.set_visible(parent.legend)
if parent.margin:
self.ax.set_xmargin(parent.margin)
self.ax.set_ymargin(parent.margin)
if parent.title:
self.ax.set_title(parent.title)
if parent.xlabel:
self.ax.set_xlabel(parent.xlabel, position=(1, 0))
if parent.ylabel:
self.ax.set_ylabel(parent.ylabel, position=(0, 1))
def show(self):
self.process_series()
#TODO after fixing https://github.com/ipython/ipython/issues/1255
# you can uncomment the next line and remove the pyplot.show() call
#self.fig.show()
if _show:
self.plt.show()
def save(self, path):
self.process_series()
self.fig.savefig(path)
def close(self):
self.plt.close(self.fig)
class TextBackend(BaseBackend):
def __init__(self, parent):
super(TextBackend, self).__init__(parent)
def show(self):
if len(self.parent._series) != 1:
raise ValueError(
'The TextBackend supports only one graph per Plot.')
elif not isinstance(self.parent._series[0], LineOver1DRangeSeries):
raise ValueError(
'The TextBackend supports only expressions over a 1D range')
else:
ser = self.parent._series[0]
textplot(ser.expr, ser.start, ser.end)
def close(self):
pass
class DefaultBackend(BaseBackend):
def __new__(cls, parent):
matplotlib = import_module('matplotlib', min_module_version='1.1.0', catch=(RuntimeError,))
if matplotlib:
return MatplotlibBackend(parent)
else:
return TextBackend(parent)
plot_backends = {
'matplotlib': MatplotlibBackend,
'text': TextBackend,
'default': DefaultBackend
}
##############################################################################
# Finding the centers of line segments or mesh faces
##############################################################################
def centers_of_segments(array):
np = import_module('numpy')
return np.average(np.vstack((array[:-1], array[1:])), 0)
def centers_of_faces(array):
np = import_module('numpy')
return np.average(np.dstack((array[:-1, :-1],
array[1:, :-1],
array[:-1, 1: ],
array[:-1, :-1],
)), 2)
def flat(x, y, z, eps=1e-3):
"""Checks whether three points are almost collinear"""
np = import_module('numpy')
vector_a = x - y
vector_b = z - y
dot_product = np.dot(vector_a, vector_b)
vector_a_norm = np.linalg.norm(vector_a)
vector_b_norm = np.linalg.norm(vector_b)
cos_theta = dot_product / (vector_a_norm * vector_b_norm)
return abs(cos_theta + 1) < eps
def _matplotlib_list(interval_list):
"""
Returns lists for matplotlib ``fill`` command from a list of bounding
rectangular intervals
"""
xlist = []
ylist = []
if len(interval_list):
for intervals in interval_list:
intervalx = intervals[0]
intervaly = intervals[1]
xlist.extend([intervalx.start, intervalx.start,
intervalx.end, intervalx.end, None])
ylist.extend([intervaly.start, intervaly.end,
intervaly.end, intervaly.start, None])
else:
#XXX Ugly hack. Matplotlib does not accept empty lists for ``fill``
xlist.extend([None, None, None, None])
ylist.extend([None, None, None, None])
return xlist, ylist
####New API for plotting module ####
# TODO: Add color arrays for plots.
# TODO: Add more plotting options for 3d plots.
# TODO: Adaptive sampling for 3D plots.
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot(*args, **kwargs):
"""
Plots a function of a single variable and returns an instance of
the ``Plot`` class (also, see the description of the
``show`` keyword argument below).
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single Plot
``plot(expr, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot(expr1, expr2, ..., range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot((expr1, range), (expr2, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function of single variable
``range``: (x, 0, 5), A 3-tuple denoting the range of the free variable.
Keyword Arguments
=================
Arguments for ``plot`` function:
``show``: Boolean. The default value is set to ``True``. Set show to
``False`` and the function will not display the plot. The returned
instance of the ``Plot`` class can then be used to save or display
the plot by calling the ``save()`` and ``show()`` methods
respectively.
Arguments for ``LineOver1DRangeSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to False and
specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of value ``n``
samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The function
is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics options:
``line_color``: float. Specifies the color for the plot.
See ``Plot`` to see how to set color for the plots.
If there are multiple plots, then the same series series are applied to
all the plots. If you want to set these options separately, you can index
the ``Plot`` object returned and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot. It is set to the latex representation of
the expression, if the plot has only one expression.
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center or
{'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot
>>> x = symbols('x')
Single Plot
>>> plot(x**2, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x**2 for x over (-5.0, 5.0)
Multiple plots with single range.
>>> plot(x, x**2, x**3, (x, -5, 5))
Plot object containing:
[0]: cartesian line: x for x over (-5.0, 5.0)
[1]: cartesian line: x**2 for x over (-5.0, 5.0)
[2]: cartesian line: x**3 for x over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot((x**2, (x, -6, 6)), (x, (x, -5, 5)))
Plot object containing:
[0]: cartesian line: x**2 for x over (-6.0, 6.0)
[1]: cartesian line: x for x over (-5.0, 5.0)
No adaptive sampling.
>>> plot(x**2, adaptive=False, nb_of_points=400)
Plot object containing:
[0]: cartesian line: x**2 for x over (-10.0, 10.0)
See Also
========
Plot, LineOver1DRangeSeries.
"""
args = list(map(sympify, args))
free = set()
for a in args:
if isinstance(a, Expr):
free |= a.free_symbols
if len(free) > 1:
raise ValueError(
'The same variable should be used in all '
'univariate expressions being plotted.')
x = free.pop() if free else Symbol('x')
kwargs.setdefault('xlabel', x.name)
kwargs.setdefault('ylabel', 'f(%s)' % x.name)
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 1)
series = [LineOver1DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot_parametric(*args, **kwargs):
"""
Plots a 2D parametric plot.
The plotting uses an adaptive algorithm which samples recursively to
accurately plot the plot. The adaptive algorithm uses a random point near
the midpoint of two points that has to be further sampled. Hence the same
plots can appear slightly different.
Usage
=====
Single plot.
``plot_parametric(expr_x, expr_y, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with same range.
``plot_parametric((expr1_x, expr1_y), (expr2_x, expr2_y), range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot_parametric((expr_x, expr_y, range), ..., **kwargs)``
Range has to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``range``: (u, 0, 5), A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric2DLineSeries`` class:
``adaptive``: Boolean. The default value is set to True. Set adaptive to
False and specify ``nb_of_points`` if uniform sampling is required.
``depth``: int Recursion depth of the adaptive algorithm. A depth of
value ``n`` samples a maximum of `2^{n}` points.
``nb_of_points``: int. Used when the ``adaptive`` is set to False. The
function is uniformly sampled at ``nb_of_points`` number of points.
Aesthetics
----------
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same Series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``xlabel`` : str. Label for the x-axis.
``ylabel`` : str. Label for the y-axis.
``xscale``: {'linear', 'log'} Sets the scaling of the x-axis.
``yscale``: {'linear', 'log'} Sets the scaling if the y-axis.
``axis_center``: tuple of two floats denoting the coordinates of the center
or {'center', 'auto'}
``xlim`` : tuple of two floats, denoting the x-axis limits.
``ylim`` : tuple of two floats, denoting the y-axis limits.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot_parametric
>>> u = symbols('u')
Single Parametric plot
>>> plot_parametric(cos(u), sin(u), (u, -5, 5))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
Multiple parametric plot with single range.
>>> plot_parametric((cos(u), sin(u)), (u, cos(u)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-10.0, 10.0)
[1]: parametric cartesian line: (u, cos(u)) for u over (-10.0, 10.0)
Multiple parametric plots.
>>> plot_parametric((cos(u), sin(u), (u, -5, 5)),
... (cos(u), u, (u, -5, 5)))
Plot object containing:
[0]: parametric cartesian line: (cos(u), sin(u)) for u over (-5.0, 5.0)
[1]: parametric cartesian line: (cos(u), u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric2DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 2, 1)
series = [Parametric2DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_line(*args, **kwargs):
"""
Plots a 3D parametric line plot.
Usage
=====
Single plot:
``plot3d_parametric_line(expr_x, expr_y, expr_z, range, **kwargs)``
If the range is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_line((expr_x, expr_y, expr_z, range), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x`` : Expression representing the function along x.
``expr_y`` : Expression representing the function along y.
``expr_z`` : Expression representing the function along z.
``range``: ``(u, 0, 5)``, A 3-tuple denoting the range of the parameter
variable.
Keyword Arguments
=================
Arguments for ``Parametric3DLineSeries`` class.
``nb_of_points``: The range is uniformly sampled at ``nb_of_points``
number of points.
Aesthetics:
``line_color``: function which returns a float. Specifies the color for the
plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class.
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_line
>>> u = symbols('u')
Single plot.
>>> plot3d_parametric_line(cos(u), sin(u), u, (u, -5, 5))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
Multiple plots.
>>> plot3d_parametric_line((cos(u), sin(u), u, (u, -5, 5)),
... (sin(u), u**2, u, (u, -5, 5)))
Plot object containing:
[0]: 3D parametric cartesian line: (cos(u), sin(u), u) for u over (-5.0, 5.0)
[1]: 3D parametric cartesian line: (sin(u), u**2, u) for u over (-5.0, 5.0)
See Also
========
Plot, Parametric3DLineSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 1)
series = [Parametric3DLineSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d(*args, **kwargs):
"""
Plots a 3D surface plot.
Usage
=====
Single plot
``plot3d(expr, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plot with the same range.
``plot3d(expr1, expr2, range_x, range_y, **kwargs)``
If the ranges are not specified, then a default range of (-10, 10) is used.
Multiple plots with different ranges.
``plot3d((expr1, range_x, range_y), (expr2, range_x, range_y), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr`` : Expression representing the function along x.
``range_x``: (x, 0, 5), A 3-tuple denoting the range of the x
variable.
``range_y``: (y, 0, 5), A 3-tuple denoting the range of the y
variable.
Keyword Arguments
=================
Arguments for ``SurfaceOver2DRangeSeries`` class:
``nb_of_points_x``: int. The x range is sampled uniformly at
``nb_of_points_x`` of points.
``nb_of_points_y``: int. The y range is sampled uniformly at
``nb_of_points_y`` of points.
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied to
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols
>>> from sympy.plotting import plot3d
>>> x, y = symbols('x y')
Single plot
>>> plot3d(x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with same range
>>> plot3d(x*y, -x*y, (x, -5, 5), (y, -5, 5))
Plot object containing:
[0]: cartesian surface: x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: -x*y for x over (-5.0, 5.0) and y over (-5.0, 5.0)
Multiple plots with different ranges.
>>> plot3d((x**2 + y**2, (x, -5, 5), (y, -5, 5)),
... (x*y, (x, -3, 3), (y, -3, 3)))
Plot object containing:
[0]: cartesian surface: x**2 + y**2 for x over (-5.0, 5.0) and y over (-5.0, 5.0)
[1]: cartesian surface: x*y for x over (-3.0, 3.0) and y over (-3.0, 3.0)
See Also
========
Plot, SurfaceOver2DRangeSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 1, 2)
series = [SurfaceOver2DRangeSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
@doctest_depends_on(modules=('numpy', 'matplotlib',))
def plot3d_parametric_surface(*args, **kwargs):
"""
Plots a 3D parametric surface plot.
Usage
=====
Single plot.
``plot3d_parametric_surface(expr_x, expr_y, expr_z, range_u, range_v, **kwargs)``
If the ranges is not specified, then a default range of (-10, 10) is used.
Multiple plots.
``plot3d_parametric_surface((expr_x, expr_y, expr_z, range_u, range_v), ..., **kwargs)``
Ranges have to be specified for every expression.
Default range may change in the future if a more advanced default range
detection algorithm is implemented.
Arguments
=========
``expr_x``: Expression representing the function along ``x``.
``expr_y``: Expression representing the function along ``y``.
``expr_z``: Expression representing the function along ``z``.
``range_u``: ``(u, 0, 5)``, A 3-tuple denoting the range of the ``u``
variable.
``range_v``: ``(v, 0, 5)``, A 3-tuple denoting the range of the v
variable.
Keyword Arguments
=================
Arguments for ``ParametricSurfaceSeries`` class:
``nb_of_points_u``: int. The ``u`` range is sampled uniformly at
``nb_of_points_v`` of points
``nb_of_points_y``: int. The ``v`` range is sampled uniformly at
``nb_of_points_y`` of points
Aesthetics:
``surface_color``: Function which returns a float. Specifies the color for
the surface of the plot. See ``sympy.plotting.Plot`` for more details.
If there are multiple plots, then the same series arguments are applied for
all the plots. If you want to set these options separately, you can index
the returned ``Plot`` object and set it.
Arguments for ``Plot`` class:
``title`` : str. Title of the plot.
Examples
========
>>> from sympy import symbols, cos, sin
>>> from sympy.plotting import plot3d_parametric_surface
>>> u, v = symbols('u v')
Single plot.
>>> plot3d_parametric_surface(cos(u + v), sin(u - v), u - v,
... (u, -5, 5), (v, -5, 5))
Plot object containing:
[0]: parametric cartesian surface: (cos(u + v), sin(u - v), u - v) for u over (-5.0, 5.0) and v over (-5.0, 5.0)
See Also
========
Plot, ParametricSurfaceSeries
"""
args = list(map(sympify, args))
show = kwargs.pop('show', True)
series = []
plot_expr = check_arguments(args, 3, 2)
series = [ParametricSurfaceSeries(*arg, **kwargs) for arg in plot_expr]
plots = Plot(*series, **kwargs)
if show:
plots.show()
return plots
def check_arguments(args, expr_len, nb_of_free_symbols):
"""
Checks the arguments and converts into tuples of the
form (exprs, ranges)
Examples
========
>>> from sympy import plot, cos, sin, symbols
>>> from sympy.plotting.plot import check_arguments
>>> x = symbols('x')
>>> check_arguments([cos(x), sin(x)], 2, 1)
[(cos(x), sin(x), (x, -10, 10))]
>>> check_arguments([x, x**2], 1, 1)
[(x, (x, -10, 10)), (x**2, (x, -10, 10))]
"""
if expr_len > 1 and isinstance(args[0], Expr):
# Multiple expressions same range.
# The arguments are tuples when the expression length is
# greater than 1.
if len(args) < expr_len:
raise ValueError("len(args) should not be less than expr_len")
for i in range(len(args)):
if isinstance(args[i], Tuple):
break
else:
i = len(args) + 1
exprs = Tuple(*args[:i])
free_symbols = list(set.union(*[e.free_symbols for e in exprs]))
if len(args) == expr_len + nb_of_free_symbols:
#Ranges given
plots = [exprs + Tuple(*args[expr_len:])]
else:
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
plots = [exprs + Tuple(*ranges)]
return plots
if isinstance(args[0], Expr) or (isinstance(args[0], Tuple) and
len(args[0]) == expr_len and
expr_len != 3):
# Cannot handle expressions with number of expression = 3. It is
# not possible to differentiate between expressions and ranges.
#Series of plots with same range
for i in range(len(args)):
if isinstance(args[i], Tuple) and len(args[i]) != expr_len:
break
if not isinstance(args[i], Tuple):
args[i] = Tuple(args[i])
else:
i = len(args) + 1
exprs = args[:i]
assert all(isinstance(e, Expr) for expr in exprs for e in expr)
free_symbols = list(set.union(*[e.free_symbols for expr in exprs
for e in expr]))
if len(free_symbols) > nb_of_free_symbols:
raise ValueError("The number of free_symbols in the expression "
"is greater than %d" % nb_of_free_symbols)
if len(args) == i + nb_of_free_symbols and isinstance(args[i], Tuple):
ranges = Tuple(*[range_expr for range_expr in args[
i:i + nb_of_free_symbols]])
plots = [expr + ranges for expr in exprs]
return plots
else:
#Use default ranges.
default_range = Tuple(-10, 10)
ranges = []
for symbol in free_symbols:
ranges.append(Tuple(symbol) + default_range)
for i in range(len(free_symbols) - nb_of_free_symbols):
ranges.append(Tuple(Dummy()) + default_range)
ranges = Tuple(*ranges)
plots = [expr + ranges for expr in exprs]
return plots
elif isinstance(args[0], Tuple) and len(args[0]) == expr_len + nb_of_free_symbols:
#Multiple plots with different ranges.
for arg in args:
for i in range(expr_len):
if not isinstance(arg[i], Expr):
raise ValueError("Expected an expression, given %s" %
str(arg[i]))
for i in range(nb_of_free_symbols):
if not len(arg[i + expr_len]) == 3:
raise ValueError("The ranges should be a tuple of "
"length 3, got %s" % str(arg[i + expr_len]))
return args
| 35.543802
| 132
| 0.592448
|
794df05c2792f68168f217f9c84708db640b884d
| 5,474
|
py
|
Python
|
test/test_od.py
|
mlederhi/canopen
|
dd2d1a73f75844b66b5fa4a26b01cb8cebe72beb
|
[
"MIT"
] | 301
|
2016-10-19T03:21:10.000Z
|
2022-03-31T11:00:26.000Z
|
test/test_od.py
|
mlederhi/canopen
|
dd2d1a73f75844b66b5fa4a26b01cb8cebe72beb
|
[
"MIT"
] | 236
|
2016-10-27T15:33:51.000Z
|
2022-03-22T19:57:58.000Z
|
test/test_od.py
|
mlederhi/canopen
|
dd2d1a73f75844b66b5fa4a26b01cb8cebe72beb
|
[
"MIT"
] | 143
|
2016-10-22T06:48:33.000Z
|
2022-03-22T09:49:20.000Z
|
import unittest
from canopen import objectdictionary as od
class TestDataConversions(unittest.TestCase):
def test_boolean(self):
var = od.Variable("Test BOOLEAN", 0x1000)
var.data_type = od.BOOLEAN
self.assertEqual(var.decode_raw(b"\x01"), True)
self.assertEqual(var.decode_raw(b"\x00"), False)
self.assertEqual(var.encode_raw(True), b"\x01")
self.assertEqual(var.encode_raw(False), b"\x00")
def test_unsigned8(self):
var = od.Variable("Test UNSIGNED8", 0x1000)
var.data_type = od.UNSIGNED8
self.assertEqual(var.decode_raw(b"\xff"), 255)
self.assertEqual(var.encode_raw(254), b"\xfe")
def test_unsigned16(self):
var = od.Variable("Test UNSIGNED16", 0x1000)
var.data_type = od.UNSIGNED16
self.assertEqual(var.decode_raw(b"\xfe\xff"), 65534)
self.assertEqual(var.encode_raw(65534), b"\xfe\xff")
def test_unsigned32(self):
var = od.Variable("Test UNSIGNED32", 0x1000)
var.data_type = od.UNSIGNED32
self.assertEqual(var.decode_raw(b"\xfc\xfd\xfe\xff"), 4294901244)
self.assertEqual(var.encode_raw(4294901244), b"\xfc\xfd\xfe\xff")
def test_integer8(self):
var = od.Variable("Test INTEGER8", 0x1000)
var.data_type = od.INTEGER8
self.assertEqual(var.decode_raw(b"\xff"), -1)
self.assertEqual(var.decode_raw(b"\x7f"), 127)
self.assertEqual(var.encode_raw(-2), b"\xfe")
self.assertEqual(var.encode_raw(127), b"\x7f")
def test_integer16(self):
var = od.Variable("Test INTEGER16", 0x1000)
var.data_type = od.INTEGER16
self.assertEqual(var.decode_raw(b"\xfe\xff"), -2)
self.assertEqual(var.decode_raw(b"\x01\x00"), 1)
self.assertEqual(var.encode_raw(-2), b"\xfe\xff")
self.assertEqual(var.encode_raw(1), b"\x01\x00")
def test_integer32(self):
var = od.Variable("Test INTEGER32", 0x1000)
var.data_type = od.INTEGER32
self.assertEqual(var.decode_raw(b"\xfe\xff\xff\xff"), -2)
self.assertEqual(var.encode_raw(-2), b"\xfe\xff\xff\xff")
def test_visible_string(self):
var = od.Variable("Test VISIBLE_STRING", 0x1000)
var.data_type = od.VISIBLE_STRING
self.assertEqual(var.decode_raw(b"abcdefg"), "abcdefg")
self.assertEqual(var.decode_raw(b"zero terminated\x00"), "zero terminated")
self.assertEqual(var.encode_raw("testing"), b"testing")
class TestAlternativeRepresentations(unittest.TestCase):
def test_phys(self):
var = od.Variable("Test INTEGER16", 0x1000)
var.data_type = od.INTEGER16
var.factor = 0.1
self.assertAlmostEqual(var.decode_phys(128), 12.8)
self.assertEqual(var.encode_phys(-0.1), -1)
def test_desc(self):
var = od.Variable("Test UNSIGNED8", 0x1000)
var.data_type = od.UNSIGNED8
var.add_value_description(0, "Value 0")
var.add_value_description(1, "Value 1")
var.add_value_description(3, "Value 3")
self.assertEqual(var.decode_desc(0), "Value 0")
self.assertEqual(var.decode_desc(3), "Value 3")
self.assertEqual(var.encode_desc("Value 1"), 1)
def test_bits(self):
var = od.Variable("Test UNSIGNED8", 0x1000)
var.data_type = od.UNSIGNED8
var.add_bit_definition("BIT 0", [0])
var.add_bit_definition("BIT 2 and 3", [2, 3])
self.assertEqual(var.decode_bits(1, "BIT 0"), 1)
self.assertEqual(var.decode_bits(1, [1]), 0)
self.assertEqual(var.decode_bits(0xf, [0, 1, 2, 3]), 15)
self.assertEqual(var.decode_bits(8, "BIT 2 and 3"), 2)
self.assertEqual(var.encode_bits(0xf, [1], 0), 0xd)
self.assertEqual(var.encode_bits(0, "BIT 0", 1), 1)
class TestObjectDictionary(unittest.TestCase):
def test_add_variable(self):
test_od = od.ObjectDictionary()
var = od.Variable("Test Variable", 0x1000)
test_od.add_object(var)
self.assertEqual(test_od["Test Variable"], var)
self.assertEqual(test_od[0x1000], var)
def test_add_record(self):
test_od = od.ObjectDictionary()
record = od.Record("Test Record", 0x1001)
var = od.Variable("Test Subindex", 0x1001, 1)
record.add_member(var)
test_od.add_object(record)
self.assertEqual(test_od["Test Record"], record)
self.assertEqual(test_od[0x1001], record)
self.assertEqual(test_od["Test Record"]["Test Subindex"], var)
def test_add_array(self):
test_od = od.ObjectDictionary()
array = od.Array("Test Array", 0x1002)
array.add_member(od.Variable("Last subindex", 0x1002, 0))
test_od.add_object(array)
self.assertEqual(test_od["Test Array"], array)
self.assertEqual(test_od[0x1002], array)
class TestArray(unittest.TestCase):
def test_subindexes(self):
array = od.Array("Test Array", 0x1000)
last_subindex = od.Variable("Last subindex", 0x1000, 0)
last_subindex.data_type = od.UNSIGNED8
array.add_member(last_subindex)
array.add_member(od.Variable("Test Variable", 0x1000, 1))
array.add_member(od.Variable("Test Variable 2", 0x1000, 2))
self.assertEqual(array[0].name, "Last subindex")
self.assertEqual(array[1].name, "Test Variable")
self.assertEqual(array[2].name, "Test Variable 2")
self.assertEqual(array[3].name, "Test Variable_3")
| 39.381295
| 83
| 0.648703
|
794df136f39b9e2cfa450f252b3c4e9dff9be930
| 1,139
|
py
|
Python
|
scripts/fp_quan/run_fixedp_quan.py
|
deep-fry/mayo
|
7211a11fdb9bb0a036d496a3eba16c96db122f89
|
[
"MIT"
] | 110
|
2018-06-07T17:52:29.000Z
|
2022-03-28T08:04:02.000Z
|
scripts/fp_quan/run_fixedp_quan.py
|
kypomon/mayo
|
7211a11fdb9bb0a036d496a3eba16c96db122f89
|
[
"MIT"
] | 6
|
2019-10-17T12:00:29.000Z
|
2021-10-21T13:41:22.000Z
|
scripts/fp_quan/run_fixedp_quan.py
|
kypomon/mayo
|
7211a11fdb9bb0a036d496a3eba16c96db122f89
|
[
"MIT"
] | 22
|
2018-07-05T01:30:49.000Z
|
2021-10-19T06:15:40.000Z
|
import yaml
import os
import subprocess
mayo_dir = "../../"
gpus = [0, 1]
model='mobilenet_v1'
cmd_formatter = './my datasets/imagenet.yaml models/override/{}.yaml models/override/quantize/fixed_incremental.yaml trainers/cifarnet.yaml system.checkpoint.load=pretrained train.learning_rate._initial=0.01 train.learning_rate.decay_steps=10 system.max_epochs=30 system.checkpoint.save.interval=1 system.num_gpus=2 system.visible_gpus=[{},{}] train.learning_rate._default_batch_size=256 system.batch_size_per_gpu=128 reset-num-epochs ttrain'
eval_cmd_formatter = './my datasets/imagenet.yaml models/override/{}.yaml models/override/quantize/fixed_incremental.yaml trainers/cifarnet.yaml system.checkpoint.load=pretrained train.learning_rate._initial=0.01 train.learning_rate.decay_steps=10 system.max_epochs=30 system.checkpoint.save.interval=1 system.num_gpus=2 system.visible_gpus=[{},{}] eval-all'
cmd = cmd_formatter.format(model, gpus[0], gpus[1])
subprocess.call(cmd, cwd=mayo_dir, shell=True)
subprocess.call(eval_cmd, cwd=mayo_dir, shell=True)
subprocess.call("cp eval_all.csv eval_all{}.csv".format(model), cwd=mayo_dir, shell=True)
| 81.357143
| 442
| 0.808604
|
794df13dbe715c7dd455dee29df378ded8d6cde0
| 462
|
py
|
Python
|
seimas/migrations/0007_auto_20180813_1328.py
|
zinaukarenku/zkr-platform
|
8daf7d1206c482f1f8e0bcd54d4fde783e568774
|
[
"Apache-2.0"
] | 2
|
2018-11-16T21:45:17.000Z
|
2019-02-03T19:55:46.000Z
|
seimas/migrations/0007_auto_20180813_1328.py
|
zinaukarenku/zkr-platform
|
8daf7d1206c482f1f8e0bcd54d4fde783e568774
|
[
"Apache-2.0"
] | 13
|
2018-08-17T19:12:11.000Z
|
2022-03-11T23:27:41.000Z
|
seimas/migrations/0007_auto_20180813_1328.py
|
zinaukarenku/zkr-platform
|
8daf7d1206c482f1f8e0bcd54d4fde783e568774
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.1 on 2018-08-13 13:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('seimas', '0006_auto_20180813_1327'),
]
operations = [
migrations.AlterField(
model_name='politicianparliamentgroup',
name='group_id',
field=models.IntegerField(help_text='parlamentinės_grupės_id is used internally in seimas web'),
),
]
| 24.315789
| 108
| 0.649351
|
794df26f790dfdf227ae70b59882f486b92822bb
| 37,448
|
py
|
Python
|
eraserbenchmark/rationale_benchmark/metrics.py
|
go2chayan/HateXplain
|
c5f173d39dca348ec6481fca08a17bc80616651a
|
[
"MIT"
] | 69
|
2021-03-05T20:50:39.000Z
|
2022-03-29T17:45:55.000Z
|
eraserbenchmark/rationale_benchmark/metrics.py
|
go2chayan/HateXplain
|
c5f173d39dca348ec6481fca08a17bc80616651a
|
[
"MIT"
] | 10
|
2021-03-05T07:38:06.000Z
|
2022-03-31T22:27:39.000Z
|
eraserbenchmark/rationale_benchmark/metrics.py
|
go2chayan/HateXplain
|
c5f173d39dca348ec6481fca08a17bc80616651a
|
[
"MIT"
] | 28
|
2021-03-22T03:46:43.000Z
|
2022-03-08T18:34:38.000Z
|
import argparse
import json
import logging
import os
import pprint
from collections import Counter, defaultdict, namedtuple
from dataclasses import dataclass
from itertools import chain
from typing import Any, Callable, Dict, List, Set, Tuple
import numpy as np
import torch
from scipy.stats import entropy
from sklearn.metrics import accuracy_score, auc, average_precision_score, classification_report, precision_recall_curve, roc_auc_score
from rationale_benchmark.utils import (
Annotation,
Evidence,
annotations_from_jsonl,
load_jsonl,
load_documents,
load_flattened_documents
)
logging.basicConfig(level=logging.DEBUG, format='%(relativeCreated)6d %(threadName)s %(message)s')
# start_token is inclusive, end_token is exclusive
@dataclass(eq=True, frozen=True)
class Rationale:
ann_id: str
docid: str
start_token: int
end_token: int
def to_token_level(self) -> List['Rationale']:
ret = []
for t in range(self.start_token, self.end_token):
ret.append(Rationale(self.ann_id, self.docid, t, t+1))
return ret
@classmethod
def from_annotation(cls, ann: Annotation) -> List['Rationale']:
ret = []
for ev_group in ann.evidences:
for ev in ev_group:
ret.append(Rationale(ann.annotation_id, ev.docid, ev.start_token, ev.end_token))
return ret
@classmethod
def from_instance(cls, inst: dict) -> List['Rationale']:
ret = []
for rat in inst['rationales']:
for pred in rat.get('hard_rationale_predictions', []):
ret.append(Rationale(inst['annotation_id'], rat['docid'], pred['start_token'], pred['end_token']))
return ret
@dataclass(eq=True, frozen=True)
class PositionScoredDocument:
ann_id: str
docid: str
scores: Tuple[float]
truths: Tuple[bool]
@classmethod
def from_results(cls, instances: List[dict], annotations: List[Annotation], docs: Dict[str, List[Any]], use_tokens: bool=True) -> List['PositionScoredDocument']:
"""Creates a paired list of annotation ids/docids/predictions/truth values"""
key_to_annotation = dict()
for ann in annotations:
for ev in chain.from_iterable(ann.evidences):
key = (ann.annotation_id, ev.docid)
if key not in key_to_annotation:
key_to_annotation[key] = [False for _ in docs[ev.docid]]
if use_tokens:
start, end = ev.start_token, ev.end_token
else:
start, end = ev.start_sentence, ev.end_sentence
for t in range(start, end):
key_to_annotation[key][t] = True
ret = []
if use_tokens:
field = 'soft_rationale_predictions'
else:
field = 'soft_sentence_predictions'
for inst in instances:
for rat in inst['rationales']:
docid = rat['docid']
scores = rat[field]
key = (inst['annotation_id'], docid)
assert len(scores) == len(docs[docid])
if key in key_to_annotation :
assert len(scores) == len(key_to_annotation[key])
else :
#In case model makes a prediction on docuemnt(s) for which ground truth evidence is not present
key_to_annotation[key] = [False for _ in docs[docid]]
ret.append(PositionScoredDocument(inst['annotation_id'], docid, tuple(scores), tuple(key_to_annotation[key])))
return ret
def _f1(_p, _r):
if _p == 0 or _r == 0:
return 0
return 2 * _p * _r / (_p + _r)
def _keyed_rationale_from_list(rats: List[Rationale]) -> Dict[Tuple[str, str], Rationale]:
ret = defaultdict(set)
for r in rats:
ret[(r.ann_id, r.docid)].add(r)
return ret
def partial_match_score(truth: List[Rationale], pred: List[Rationale], thresholds: List[float]) -> List[Dict[str, Any]]:
"""Computes a partial match F1
Computes an instance-level (annotation) micro- and macro-averaged F1 score.
True Positives are computed by using intersection-over-union and
thresholding the resulting intersection-over-union fraction.
Micro-average results are computed by ignoring instance level distinctions
in the TP calculation (and recall, and precision, and finally the F1 of
those numbers). Macro-average results are computed first by measuring
instance (annotation + document) precisions and recalls, averaging those,
and finally computing an F1 of the resulting average.
"""
ann_to_rat = _keyed_rationale_from_list(truth)
pred_to_rat = _keyed_rationale_from_list(pred)
num_classifications = {k:len(v) for k,v in pred_to_rat.items()}
num_truth = {k:len(v) for k,v in ann_to_rat.items()}
ious = defaultdict(dict)
for k in set(ann_to_rat.keys()) | set(pred_to_rat.keys()):
for p in pred_to_rat.get(k, []):
best_iou = 0.0
for t in ann_to_rat.get(k, []):
num = len(set(range(p.start_token, p.end_token)) & set(range(t.start_token, t.end_token)))
denom = len(set(range(p.start_token, p.end_token)) | set(range(t.start_token, t.end_token)))
iou = 0 if denom == 0 else num / denom
if iou > best_iou:
best_iou = iou
ious[k][p] = best_iou
scores = []
for threshold in thresholds:
threshold_tps = dict()
for k, vs in ious.items():
threshold_tps[k] = sum(int(x >= threshold) for x in vs.values())
micro_r = sum(threshold_tps.values()) / sum(num_truth.values()) if sum(num_truth.values()) > 0 else 0
micro_p = sum(threshold_tps.values()) / sum(num_classifications.values()) if sum(num_classifications.values()) > 0 else 0
micro_f1 = _f1(micro_r, micro_p)
macro_rs = list(threshold_tps.get(k, 0.0) / n if n > 0 else 0 for k, n in num_truth.items())
macro_ps = list(threshold_tps.get(k, 0.0) / n if n > 0 else 0 for k, n in num_classifications.items())
macro_r = sum(macro_rs) / len(macro_rs) if len(macro_rs) > 0 else 0
macro_p = sum(macro_ps) / len(macro_ps) if len(macro_ps) > 0 else 0
macro_f1 = _f1(macro_r, macro_p)
scores.append({'threshold': threshold,
'micro': {
'p': micro_p,
'r': micro_r,
'f1': micro_f1
},
'macro': {
'p': macro_p,
'r': macro_r,
'f1': macro_f1
},
})
return scores
def score_hard_rationale_predictions(truth: List[Rationale], pred: List[Rationale]) -> Dict[str, Dict[str, float]]:
"""Computes instance (annotation)-level micro/macro averaged F1s"""
scores = dict()
truth = set(truth)
pred = set(pred)
micro_prec = len(truth & pred) / len(pred)
micro_rec = len(truth & pred) / len(truth)
micro_f1 = _f1(micro_prec, micro_rec)
scores['instance_micro'] = {
'p': micro_prec,
'r': micro_rec,
'f1': micro_f1,
}
ann_to_rat = _keyed_rationale_from_list(truth)
pred_to_rat = _keyed_rationale_from_list(pred)
instances_to_scores = dict()
for k in set(ann_to_rat.keys()) | (pred_to_rat.keys()):
if len(pred_to_rat.get(k, set())) > 0:
instance_prec = len(ann_to_rat.get(k, set()) & pred_to_rat.get(k, set())) / len(pred_to_rat[k])
else:
instance_prec = 0
if len(ann_to_rat.get(k, set())) > 0:
instance_rec = len(ann_to_rat.get(k, set()) & pred_to_rat.get(k, set())) / len(ann_to_rat[k])
else:
instance_rec = 0
instance_f1 = _f1(instance_prec, instance_rec)
instances_to_scores[k] = {
'p': instance_prec,
'r': instance_rec,
'f1': instance_f1,
}
# these are calculated as sklearn would
macro_prec = sum(instance['p'] for instance in instances_to_scores.values()) / len(instances_to_scores)
macro_rec = sum(instance['r'] for instance in instances_to_scores.values()) / len(instances_to_scores)
macro_f1 = sum(instance['f1'] for instance in instances_to_scores.values()) / len(instances_to_scores)
scores['instance_macro'] = {
'p': macro_prec,
'r': macro_rec,
'f1': macro_f1,
}
return scores
def _auprc(truth: Dict[Any, List[bool]], preds: Dict[Any, List[float]]) -> float:
if len(preds) == 0:
return 0.0
assert len(truth.keys() and preds.keys()) == len(truth.keys())
aucs = []
for k, true in truth.items():
pred = preds[k]
true = [int(t) for t in true]
precision, recall, _ = precision_recall_curve(true, pred)
aucs.append(auc(recall, precision))
return np.average(aucs)
def _score_aggregator(truth: Dict[Any, List[bool]], preds: Dict[Any, List[float]], score_function: Callable[[List[float], List[float]], float ], discard_single_class_answers: bool) -> float:
if len(preds) == 0:
return 0.0
assert len(truth.keys() and preds.keys()) == len(truth.keys())
scores = []
for k, true in truth.items():
pred = preds[k]
if (all(true) or all(not x for x in true)) and discard_single_class_answers:
continue
true = [int(t) for t in true]
scores.append(score_function(true, pred))
return np.average(scores)
def score_soft_tokens(paired_scores: List[PositionScoredDocument]) -> Dict[str, float]:
truth = {(ps.ann_id, ps.docid): ps.truths for ps in paired_scores}
pred = {(ps.ann_id, ps.docid): ps.scores for ps in paired_scores}
auprc_score = _auprc(truth, pred)
ap = _score_aggregator(truth, pred, average_precision_score, True)
roc_auc = _score_aggregator(truth, pred, roc_auc_score, True)
return {
'auprc': auprc_score,
'average_precision': ap,
'roc_auc_score': roc_auc,
}
def _instances_aopc(instances: List[dict], thresholds: List[float], key: str) -> Tuple[float, List[float]]:
dataset_scores = []
for inst in instances:
kls = inst['classification']
beta_0 = inst['classification_scores'][kls]
instance_scores = []
for score in filter(lambda x : x['threshold'] in thresholds, sorted(inst['thresholded_scores'], key=lambda x: x['threshold'])):
beta_k = score[key][kls]
delta = beta_0 - beta_k
instance_scores.append(delta)
assert len(instance_scores) == len(thresholds)
dataset_scores.append(instance_scores)
dataset_scores = np.array(dataset_scores)
# a careful reading of Samek, et al. "Evaluating the Visualization of What a Deep Neural Network Has Learned"
# and some algebra will show the reader that we can average in any of several ways and get the same result:
# over a flattened array, within an instance and then between instances, or over instances (by position) an
# then across them.
final_score = np.average(dataset_scores)
position_scores = np.average(dataset_scores, axis=0).tolist()
return final_score, position_scores
def compute_aopc_scores(instances: List[dict], aopc_thresholds: List[float]):
if aopc_thresholds is None :
aopc_thresholds = sorted(set(chain.from_iterable([x['threshold'] for x in y['thresholded_scores']] for y in instances)))
aopc_comprehensiveness_score, aopc_comprehensiveness_points = _instances_aopc(instances, aopc_thresholds, 'comprehensiveness_classification_scores')
aopc_sufficiency_score, aopc_sufficiency_points = _instances_aopc(instances, aopc_thresholds, 'sufficiency_classification_scores')
return aopc_thresholds, aopc_comprehensiveness_score, aopc_comprehensiveness_points, aopc_sufficiency_score, aopc_sufficiency_points
def score_classifications(instances: List[dict], annotations: List[Annotation], docs: Dict[str, List[str]], aopc_thresholds: List[float]) -> Dict[str, float]:
def compute_kl(cls_scores_, faith_scores_):
keys = list(cls_scores_.keys())
cls_scores_ = [cls_scores_[k] for k in keys]
faith_scores_ = [faith_scores_[k] for k in keys]
return entropy(faith_scores_, cls_scores_)
labels = list(set(x.classification for x in annotations))
labels +=['normal']
label_to_int = {l:i for i,l in enumerate(labels)}
key_to_instances = {inst['annotation_id']:inst for inst in instances}
truth = []
predicted = []
for ann in annotations:
truth.append(label_to_int[ann.classification])
inst = key_to_instances[ann.annotation_id]
predicted.append(label_to_int[inst['classification']])
classification_scores = classification_report(truth, predicted, output_dict=True, target_names=labels, digits=3)
accuracy = accuracy_score(truth, predicted)
if 'comprehensiveness_classification_scores' in instances[0]:
comprehensiveness_scores = [x['classification_scores'][x['classification']] - x['comprehensiveness_classification_scores'][x['classification']] for x in instances]
comprehensiveness_score = np.average(comprehensiveness_scores)
else :
comprehensiveness_score = None
comprehensiveness_scores = None
if 'sufficiency_classification_scores' in instances[0]:
sufficiency_scores = [x['classification_scores'][x['classification']] - x['sufficiency_classification_scores'][x['classification']] for x in instances]
sufficiency_score = np.average(sufficiency_scores)
else :
sufficiency_score = None
sufficiency_scores = None
if 'comprehensiveness_classification_scores' in instances[0]:
comprehensiveness_entropies = [entropy(list(x['classification_scores'].values())) - entropy(list(x['comprehensiveness_classification_scores'].values())) for x in instances]
comprehensiveness_entropy = np.average(comprehensiveness_entropies)
comprehensiveness_kl = np.average(list(compute_kl(x['classification_scores'], x['comprehensiveness_classification_scores']) for x in instances))
else:
comprehensiveness_entropies = None
comprehensiveness_kl = None
comprehensiveness_entropy = None
if 'sufficiency_classification_scores' in instances[0]:
sufficiency_entropies = [entropy(list(x['classification_scores'].values())) - entropy(list(x['sufficiency_classification_scores'].values())) for x in instances]
sufficiency_entropy = np.average(sufficiency_entropies)
sufficiency_kl = np.average(list(compute_kl(x['classification_scores'], x['sufficiency_classification_scores']) for x in instances))
else:
sufficiency_entropies = None
sufficiency_kl = None
sufficiency_entropy = None
if 'thresholded_scores' in instances[0]:
aopc_thresholds, aopc_comprehensiveness_score, aopc_comprehensiveness_points, aopc_sufficiency_score, aopc_sufficiency_points = compute_aopc_scores(instances, aopc_thresholds)
else:
aopc_thresholds, aopc_comprehensiveness_score, aopc_comprehensiveness_points, aopc_sufficiency_score, aopc_sufficiency_points = None, None, None, None, None
if 'tokens_to_flip' in instances[0]:
token_percentages = []
for ann in annotations:
# in practice, this is of size 1 for everything except e-snli
docids = set(ev.docid for ev in chain.from_iterable(ann.evidences))
inst = key_to_instances[ann.annotation_id]
tokens = inst['tokens_to_flip']
doc_lengths = sum(len(docs[d]) for d in docids)
token_percentages.append(tokens / doc_lengths)
token_percentages = np.average(token_percentages)
else:
token_percentages = None
return {
'accuracy': accuracy,
'prf': classification_scores,
'comprehensiveness': comprehensiveness_score,
'sufficiency': sufficiency_score,
'comprehensiveness_entropy': comprehensiveness_entropy,
'comprehensiveness_kl': comprehensiveness_kl,
'sufficiency_entropy': sufficiency_entropy,
'sufficiency_kl': sufficiency_kl,
'aopc_thresholds': aopc_thresholds,
'comprehensiveness_aopc': aopc_comprehensiveness_score,
'comprehensiveness_aopc_points': aopc_comprehensiveness_points,
'sufficiency_aopc': aopc_sufficiency_score,
'sufficiency_aopc_points': aopc_sufficiency_points,
}
def verify_instance(instance: dict, docs: Dict[str, list], thresholds: Set[float]):
error = False
docids = []
# verify the internal structure of these instances is correct:
# * hard predictions are present
# * start and end tokens are valid
# * soft rationale predictions, if present, must have the same document length
for rat in instance['rationales']:
docid = rat['docid']
if docid not in docid:
error = True
logging.info(f'Error! For instance annotation={instance["annotation_id"]}, docid={docid} could not be found as a preprocessed document! Gave up on additional processing.')
continue
doc_length = len(docs[docid])
for h1 in rat.get('hard_rationale_predictions', []):
# verify that each token is valid
# verify that no annotations overlap
for h2 in rat.get('hard_rationale_predictions', []):
if h1 == h2:
continue
if len(set(range(h1['start_token'], h1['end_token'])) & set(range(h2['start_token'], h2['end_token']))) > 0:
logging.info(f'Error! For instance annotation={instance["annotation_id"]}, docid={docid} {h1} and {h2} overlap!')
error = True
if h1['start_token'] > doc_length:
logging.info(f'Error! For instance annotation={instance["annotation_id"]}, docid={docid} received an impossible tokenspan: {h1} for a document of length {doc_length}')
error = True
if h1['end_token'] > doc_length:
logging.info(f'Error! For instance annotation={instance["annotation_id"]}, docid={docid} received an impossible tokenspan: {h1} for a document of length {doc_length}')
error = True
# length check for soft rationale
# note that either flattened_documents or sentence-broken documents must be passed in depending on result
soft_rationale_predictions = rat.get('soft_rationale_predictions', [])
if len(soft_rationale_predictions) > 0 and len(soft_rationale_predictions) != doc_length:
logging.info(f'Error! For instance annotation={instance["annotation_id"]}, docid={docid} expected classifications for {doc_length} tokens but have them for {len(soft_rationale_predictions)} tokens instead!')
error = True
# count that one appears per-document
docids = Counter(docids)
for docid, count in docids.items():
if count > 1:
error = True
logging.info('Error! For instance annotation={instance["annotation_id"]}, docid={docid} appear {count} times, may only appear once!')
classification = instance.get('classification', '')
if not isinstance(classification, str):
logging.info(f'Error! For instance annotation={instance["annotation_id"]}, classification field {classification} is not a string!')
error = True
classification_scores = instance.get('classification_scores', dict())
if not isinstance(classification_scores, dict):
logging.info(f'Error! For instance annotation={instance["annotation_id"]}, classification_scores field {classification_scores} is not a dict!')
error = True
comprehensiveness_classification_scores = instance.get('comprehensiveness_classification_scores', dict())
if not isinstance(comprehensiveness_classification_scores, dict):
logging.info(f'Error! For instance annotation={instance["annotation_id"]}, comprehensiveness_classification_scores field {comprehensiveness_classification_scores} is not a dict!')
error = True
sufficiency_classification_scores = instance.get('sufficiency_classification_scores', dict())
if not isinstance(sufficiency_classification_scores, dict):
logging.info(f'Error! For instance annotation={instance["annotation_id"]}, sufficiency_classification_scores field {sufficiency_classification_scores} is not a dict!')
error = True
if ('classification' in instance) != ('classification_scores' in instance):
logging.info(f'Error! For instance annotation={instance["annotation_id"]}, when providing a classification, you must also provide classification scores!')
error = True
if ('comprehensiveness_classification_scores' in instance) and not ('classification' in instance):
logging.info(f'Error! For instance annotation={instance["annotation_id"]}, when providing a classification, you must also provide a comprehensiveness_classification_score')
error = True
if ('sufficiency_classification_scores' in instance) and not ('classification_scores' in instance):
logging.info(f'Error! For instance annotation={instance["annotation_id"]}, when providing a sufficiency_classification_score, you must also provide a classification score!')
error = True
if 'thresholded_scores' in instance:
instance_thresholds = set(x['threshold'] for x in instance['thresholded_scores'])
if instance_thresholds != thresholds:
error = True
logging.info('Error: {instance["thresholded_scores"]} has thresholds that differ from previous thresholds: {thresholds}')
if 'comprehensiveness_classification_scores' not in instance\
or 'sufficiency_classification_scores' not in instance\
or 'classification' not in instance\
or 'classification_scores' not in instance:
error = True
logging.info('Error: {instance} must have comprehensiveness_classification_scores, sufficiency_classification_scores, classification, and classification_scores defined when including thresholded scores')
if not all('sufficiency_classification_scores' in x for x in instance['thresholded_scores']):
error = True
logging.info('Error: {instance} must have sufficiency_classification_scores for every threshold')
if not all('comprehensiveness_classification_scores' in x for x in instance['thresholded_scores']):
error = True
logging.info('Error: {instance} must have comprehensiveness_classification_scores for every threshold')
return error
def verify_instances(instances: List[dict], docs: Dict[str, list]):
annotation_ids = list(x['annotation_id'] for x in instances)
key_counter = Counter(annotation_ids)
multi_occurrence_annotation_ids = list(filter(lambda kv: kv[1] > 1, key_counter.items()))
error = False
if len(multi_occurrence_annotation_ids) > 0:
error = True
logging.info(f'Error in instances: {len(multi_occurrence_annotation_ids)} appear multiple times in the annotations file: {multi_occurrence_annotation_ids}')
failed_validation = set()
instances_with_classification = list()
instances_with_soft_rationale_predictions = list()
instances_with_soft_sentence_predictions = list()
instances_with_comprehensiveness_classifications = list()
instances_with_sufficiency_classifications = list()
instances_with_thresholded_scores = list()
if 'thresholded_scores' in instances[0]:
thresholds = set(x['threshold'] for x in instances[0]['thresholded_scores'])
else:
thresholds = None
for instance in instances:
instance_error = verify_instance(instance, docs, thresholds)
if instance_error:
error = True
failed_validation.add(instance['annotation_id'])
if instance.get('classification', None) != None:
instances_with_classification.append(instance)
if instance.get('comprehensiveness_classification_scores', None) != None:
instances_with_comprehensiveness_classifications.append(instance)
if instance.get('sufficiency_classification_scores', None) != None:
instances_with_sufficiency_classifications.append(instance)
has_soft_rationales = []
has_soft_sentences = []
for rat in instance['rationales']:
if rat.get('soft_rationale_predictions', None) != None:
has_soft_rationales.append(rat)
if rat.get('soft_sentence_predictions', None) != None:
has_soft_sentences.append(rat)
if len(has_soft_rationales) > 0:
instances_with_soft_rationale_predictions.append(instance)
if len(has_soft_rationales) != len(instance['rationales']):
error = True
logging.info(f'Error: instance {instance["annotation"]} has soft rationales for some but not all reported documents!')
if len(has_soft_sentences) > 0:
instances_with_soft_sentence_predictions.append(instance)
if len(has_soft_sentences) != len(instance['rationales']):
error = True
logging.info(f'Error: instance {instance["annotation"]} has soft sentences for some but not all reported documents!')
if 'thresholded_scores' in instance:
instances_with_thresholded_scores.append(instance)
logging.info(f'Error in instances: {len(failed_validation)} instances fail validation: {failed_validation}')
if len(instances_with_classification) != 0 and len(instances_with_classification) != len(instances):
logging.info(f'Either all {len(instances)} must have a classification or none may, instead {len(instances_with_classification)} do!')
error = True
if len(instances_with_soft_sentence_predictions) != 0 and len(instances_with_soft_sentence_predictions) != len(instances):
logging.info(f'Either all {len(instances)} must have a sentence prediction or none may, instead {len(instances_with_soft_sentence_predictions)} do!')
error = True
if len(instances_with_soft_rationale_predictions) != 0 and len(instances_with_soft_rationale_predictions) != len(instances):
logging.info(f'Either all {len(instances)} must have a soft rationale prediction or none may, instead {len(instances_with_soft_rationale_predictions)} do!')
error = True
if len(instances_with_comprehensiveness_classifications) != 0 and len(instances_with_comprehensiveness_classifications) != len(instances):
error = True
logging.info(f'Either all {len(instances)} must have a comprehensiveness classification or none may, instead {len(instances_with_comprehensiveness_classifications)} do!')
if len(instances_with_sufficiency_classifications) != 0 and len(instances_with_sufficiency_classifications) != len(instances):
error = True
logging.info(f'Either all {len(instances)} must have a sufficiency classification or none may, instead {len(instances_with_sufficiency_classifications)} do!')
if len(instances_with_thresholded_scores) != 0 and len(instances_with_thresholded_scores) != len(instances):
error = True
logging.info(f'Either all {len(instances)} must have thresholded scores or none may, instead {len(instances_with_thresholded_scores)} do!')
if error:
raise ValueError('Some instances are invalid, please fix your formatting and try again')
def _has_hard_predictions(results: List[dict]) -> bool:
# assumes that we have run "verification" over the inputs
return 'rationales' in results[0]\
and len(results[0]['rationales']) > 0\
and 'hard_rationale_predictions' in results[0]['rationales'][0]\
and results[0]['rationales'][0]['hard_rationale_predictions'] is not None\
and len(results[0]['rationales'][0]['hard_rationale_predictions']) > 0
def _has_soft_predictions(results: List[dict]) -> bool:
# assumes that we have run "verification" over the inputs
return 'rationales' in results[0] and len(results[0]['rationales']) > 0 and 'soft_rationale_predictions' in results[0]['rationales'][0] and results[0]['rationales'][0]['soft_rationale_predictions'] is not None
def _has_soft_sentence_predictions(results: List[dict]) -> bool:
# assumes that we have run "verification" over the inputs
return 'rationales' in results[0] and len(results[0]['rationales']) > 0 and 'soft_sentence_predictions' in results[0]['rationales'][0] and results[0]['rationales'][0]['soft_sentence_predictions'] is not None
def _has_classifications(results: List[dict]) -> bool:
# assumes that we have run "verification" over the inputs
return 'classification' in results[0] and results[0]['classification'] is not None
def main():
parser = argparse.ArgumentParser(description="""Computes rationale and final class classification scores""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('--data_dir', dest='data_dir', required=True, help='Which directory contains a {train,val,test}.jsonl file?')
parser.add_argument('--split', dest='split', required=True, help='Which of {train,val,test} are we scoring on?')
parser.add_argument('--strict', dest='strict', required=False, action='store_true', default=False, help='Do we perform strict scoring?')
parser.add_argument('--results', dest='results', required=True, help="""Results File
Contents are expected to be jsonl of:
{
"annotation_id": str, required
# these classifications *must not* overlap
"rationales": List[
{
"docid": str, required
"hard_rationale_predictions": List[{
"start_token": int, inclusive, required
"end_token": int, exclusive, required
}], optional,
# token level classifications, a value must be provided per-token
# in an ideal world, these correspond to the hard-decoding above.
"soft_rationale_predictions": List[float], optional.
# sentence level classifications, a value must be provided for every
# sentence in each document, or not at all
"soft_sentence_predictions": List[float], optional.
}
],
# the classification the model made for the overall classification task
"classification": str, optional
# A probability distribution output by the model. We require this to be normalized.
"classification_scores": Dict[str, float], optional
# The next two fields are measures for how faithful your model is (the
# rationales it predicts are in some sense causal of the prediction), and
# how sufficient they are. We approximate a measure for comprehensiveness by
# asking that you remove the top k%% of tokens from your documents,
# running your models again, and reporting the score distribution in the
# "comprehensiveness_classification_scores" field.
# We approximate a measure of sufficiency by asking exactly the converse
# - that you provide model distributions on the removed k%% tokens.
# 'k' is determined by human rationales, and is documented in our paper.
# You should determine which of these tokens to remove based on some kind
# of information about your model: gradient based, attention based, other
# interpretability measures, etc.
# scores per class having removed k%% of the data, where k is determined by human comprehensive rationales
"comprehensiveness_classification_scores": Dict[str, float], optional
# scores per class having access to only k%% of the data, where k is determined by human comprehensive rationales
"sufficiency_classification_scores": Dict[str, float], optional
# the number of tokens required to flip the prediction - see "Is Attention Interpretable" by Serrano and Smith.
"tokens_to_flip": int, optional
"thresholded_scores": List[{
"threshold": float, required,
"comprehensiveness_classification_scores": like "classification_scores"
"sufficiency_classification_scores": like "classification_scores"
}], optional. if present, then "classification" and "classification_scores" must be present
}
When providing one of the optional fields, it must be provided for *every* instance.
The classification, classification_score, and comprehensiveness_classification_scores
must together be present for every instance or absent for every instance.
""")
parser.add_argument('--iou_thresholds', dest='iou_thresholds', required=False, nargs='+', type=float, default=[0.5], help='''Thresholds for IOU scoring.
These are used for "soft" or partial match scoring of rationale spans.
A span is considered a match if the size of the intersection of the prediction
and the annotation, divided by the union of the two spans, is larger than
the IOU threshold. This score can be computed for arbitrary thresholds.
''')
parser.add_argument('--score_file', dest='score_file', required=False, default=None, help='Where to write results?')
parser.add_argument('--aopc_thresholds', nargs='+', required=False, type=float, default=[0.01, 0.05, 0.1, 0.2, 0.5], help='Thresholds for AOPC Thresholds')
args = parser.parse_args()
results = load_jsonl(args.results)
docids = set(chain.from_iterable([rat['docid'] for rat in res['rationales']] for res in results))
docs = load_flattened_documents(args.data_dir, docids)
verify_instances(results, docs)
# load truth
annotations = annotations_from_jsonl(os.path.join(args.data_dir, args.split + '.jsonl'))
docids |= set(chain.from_iterable((ev.docid for ev in chain.from_iterable(ann.evidences)) for ann in annotations))
has_final_predictions = _has_classifications(results)
scores = dict()
if args.strict:
if not args.iou_thresholds:
raise ValueError("--iou_thresholds must be provided when running strict scoring")
if not has_final_predictions:
raise ValueError("We must have a 'classification', 'classification_score', and 'comprehensiveness_classification_score' field in order to perform scoring!")
# TODO think about offering a sentence level version of these scores.
if _has_hard_predictions(results):
truth = list(chain.from_iterable(Rationale.from_annotation(ann) for ann in annotations))
pred = list(chain.from_iterable(Rationale.from_instance(inst) for inst in results))
if args.iou_thresholds is not None:
iou_scores = partial_match_score(truth, pred, args.iou_thresholds)
scores['iou_scores'] = iou_scores
# NER style scoring
rationale_level_prf = score_hard_rationale_predictions(truth, pred)
scores['rationale_prf'] = rationale_level_prf
token_level_truth = list(chain.from_iterable(rat.to_token_level() for rat in truth))
token_level_pred = list(chain.from_iterable(rat.to_token_level() for rat in pred))
token_level_prf = score_hard_rationale_predictions(token_level_truth, token_level_pred)
scores['token_prf'] = token_level_prf
else:
logging.info("No hard predictions detected, skipping rationale scoring")
if _has_soft_predictions(results):
flattened_documents = load_flattened_documents(args.data_dir, docids)
paired_scoring = PositionScoredDocument.from_results(results, annotations, flattened_documents, use_tokens=True)
token_scores = score_soft_tokens(paired_scoring)
scores['token_soft_metrics'] = token_scores
else:
logging.info("No soft predictions detected, skipping rationale scoring")
if _has_soft_sentence_predictions(results):
documents = load_documents(args.data_dir, docids)
paired_scoring = PositionScoredDocument.from_results(results, annotations, documents, use_tokens=False)
sentence_scores = score_soft_tokens(paired_scoring)
scores['sentence_soft_metrics'] = sentence_scores
else:
logging.info("No sentence level predictions detected, skipping sentence-level diagnostic")
if has_final_predictions:
flattened_documents = load_flattened_documents(args.data_dir, docids)
class_results = score_classifications(results, annotations, flattened_documents, args.aopc_thresholds)
scores['classification_scores'] = class_results
else:
logging.info("No classification scores detected, skipping classification")
pprint.pprint(scores)
if args.score_file:
with open(args.score_file, 'w') as of:
json.dump(scores, of, indent=4, sort_keys=True)
if __name__ == '__main__':
main()
| 55.80924
| 219
| 0.679636
|
794df327324d9342d659f77fb9d087e5e364f0df
| 8,848
|
py
|
Python
|
peepo/autopoiesis/particle.py
|
hayoc/peepo
|
b15fd18d0f618e1e24eadc97c72fde62039ddafb
|
[
"MIT"
] | 2
|
2019-03-04T21:12:21.000Z
|
2021-03-30T00:35:50.000Z
|
peepo/autopoiesis/particle.py
|
hayoc/peepo
|
b15fd18d0f618e1e24eadc97c72fde62039ddafb
|
[
"MIT"
] | null | null | null |
peepo/autopoiesis/particle.py
|
hayoc/peepo
|
b15fd18d0f618e1e24eadc97c72fde62039ddafb
|
[
"MIT"
] | null | null | null |
import pygame as pg
import math
import random
vec = pg.math.Vector2
SCREEN_SIZE = (800, 800)
class Particle:
SIZE = (4, 4)
SPEED = 2
KIND_COLOR = {
"S": "yellow",
"K": "green",
"L": "blue"
}
KIND_SIZE = {
"S": (4, 4),
"K": (6, 6),
"L": (6, 6)
}
def __init__(self, kind, others, pos=None):
self.kind = kind
self.others = others
if pos is None:
pos = (random.randint(0, 800), random.randint(0, 800))
self.rect = pg.Rect(pos, Particle.KIND_SIZE[self.kind])
self.rect.center = pos
self.rotation = random.randint(0, 360)
self.image = self.make_image()
self.image_original = self.image.copy()
self.timestep = 0
self.disintegration_chance = 1 # 1 / 10,000,000 chance
self.bond_left = None
self.bond_right = None
self.clamp = False
if self.kind == "L":
self.edge_left = end_line(5, self.rotation - 90, self.rect.center)
self.edge_right = end_line(5, self.rotation + 90, self.rect.center)
self.edge_center = end_line(10, self.rotation, self.rect.center)
def update(self):
if not self.bond_left and not self.bond_right:
self.rotation += random.randint(-5, 5)
if self.rotation < 0:
self.rotation = 360
if self.rotation > 360:
self.rotation = 0
self.timestep += 1
if self.timestep > 4:
self.rect.x += Particle.SPEED * math.cos(math.radians(self.rotation))
self.rect.y += Particle.SPEED * math.sin(math.radians(self.rotation))
self.image = pg.transform.rotate(self.image_original, -self.rotation)
self.rect = self.image.get_rect(center=self.rect.center)
self.timestep = 0
if self.rect.x < 0:
self.rect.x = 800
if self.rect.y < 0:
self.rect.y = 800
if self.rect.x > 800:
self.rect.x = 0
if self.rect.y > 800:
self.rect.y = 0
if self.kind == "L":
self.edge_center = end_line(10, self.rotation, self.rect.center)
self.edge_left = end_line(10, self.rotation - 90, self.rect.center)
self.edge_right = end_line(10, self.rotation + 90, self.rect.center)
if self.kind == "K":
self.production()
if self.kind == "L":
self.bonding()
self.disintegration()
def draw(self, surface):
surface.blit(self.image, self.rect)
if self.kind == "L":
# myfont = pg.font.SysFont("Comic Sans MS", 8)
# label = myfont.render("{}r - {}p".format(self.rotation, (self.rect.x, self.rect.y)), True, pg.Color("red"))
# surface.blit(label, self.rect)
pg.draw.line(surface, pg.Color("pink"), self.rect.center, self.edge_right, 2)
pg.draw.line(surface, pg.Color("purple"), self.rect.center, self.edge_left, 2)
pg.draw.line(surface, pg.Color("red"), self.rect.center, self.edge_center, 2)
def production(self):
collided = []
for particle in list(self.others):
if particle.kind == "S":
collide = self.rect.colliderect(particle.rect)
if collide:
collided.append(particle)
if len(collided) >= 2:
sub0 = collided[0]
sub1 = collided[1]
dist_x = abs(sub0.rect.x - sub1.rect.x)
dist_y = abs(sub0.rect.y - sub1.rect.y)
start_x = min(sub0.rect.x, sub1.rect.x)
start_y = min(sub0.rect.y, sub1.rect.y)
new_x = start_x + dist_x / 2
new_y = start_y + dist_y / 2
self.others.remove(sub0)
self.others.remove(sub1)
self.others.append(Particle("L", self.others, (new_x, new_y)))
collided.clear()
def bonding(self):
if self.clamp:
return
for particle in list(self.others):
if particle.kind == "L" and particle is not self:
if not self.bond_left and not particle.bond_right:
src_rect = pg.Rect(self.edge_left, (10, 10))
tgt_rect = pg.Rect(particle.edge_right, (10, 10))
collide = src_rect.colliderect(tgt_rect)
if collide:
self.clamp = True
particle.clamp = True
self.bond_left = particle
particle.bond_right = self
angle = 25 # +25 for a left attach, -25 for a right attach
radius = 25 # radius of the theoretical circle of cell
origin_x = self.rect.centerx - radius * math.cos(math.radians(self.rotation))
origin_y = self.rect.centery - radius * math.sin(math.radians(self.rotation))
particle.rotation = modify_degrees(self.rotation, angle)
particle.rect.centerx = origin_x + radius * math.cos(math.radians(particle.rotation))
particle.rect.centery = origin_y + radius * math.sin(math.radians(particle.rotation))
particle.edge_left = end_line(10, particle.rotation - 90, particle.rect.center)
particle.edge_right = end_line(10, particle.rotation + 90, particle.rect.center)
particle.edge_center = end_line(10, particle.rotation, particle.rect.center)
if not self.bond_right and not particle.bond_left:
src_rect = pg.Rect(self.edge_right, (10, 10))
tgt_rect = pg.Rect(particle.edge_left, (10, 10))
collide = src_rect.colliderect(tgt_rect)
if collide:
self.clamp = True
particle.clamp = True
self.bond_right = particle
particle.bond_left = self
angle = -25 # +25 for a left attach, -25 for a right attach
radius = 25 # radius of the theoretical circle of cell
origin_x = self.rect.centerx - radius * math.cos(math.radians(self.rotation))
origin_y = self.rect.centery - radius * math.sin(math.radians(self.rotation))
particle.rotation = modify_degrees(self.rotation, angle)
particle.rect.centerx = origin_x + radius * math.cos(math.radians(particle.rotation))
particle.rect.centery = origin_y + radius * math.sin(math.radians(particle.rotation))
particle.edge_left = end_line(10, particle.rotation - 90, particle.rect.center)
particle.edge_right = end_line(10, particle.rotation + 90, particle.rect.center)
particle.edge_center = end_line(10, particle.rotation, particle.rect.center)
def disintegration(self):
self.disintegration_chance += 1
disintegrate = random.choices([True, False], weights=[self.disintegration_chance, 1000000000], k=1)[0]
if disintegrate:
if self.bond_left:
self.bond_left.bond_left = None
if self.bond_right:
self.bond_right.bond_right = None
self.others.remove(self)
self.others.append(Particle("S", self.others, (self.rect.x, self.rect.y)))
self.others.append(Particle("S", self.others, (self.rect.x, self.rect.y)))
def make_image(self):
image = pg.Surface(self.rect.size).convert_alpha()
image.fill((0, 0, 0, 0))
image_rect = image.get_rect()
pg.draw.rect(image, pg.Color("black"), image_rect)
pg.draw.rect(image, pg.Color(Particle.KIND_COLOR[self.kind]), image_rect.inflate(-2, -2))
return image
def end_line(radius, rotation, center):
center_rotate = vec(radius, 0).rotate(rotation)
return center_rotate + center
def identify_lone_link(one: Particle, two: Particle):
if not one.bond_left and not one.bond_right:
return two, one
if not two.bond_left and not two.bond_right:
return one, two
return None, None
def modify_degrees(start, add):
if add > 0:
if start + add > 360:
return start + add - 360
return start + add
elif add < 0:
if start + add < 0:
return start + add + 360
return start + add
else:
return start
| 38.469565
| 121
| 0.539331
|
794df3604fe86f5abe00916172d46b23cc76faf0
| 9,606
|
py
|
Python
|
test/integration/test_case_management_v1.py
|
zachsirotto/platform-services-python-sdk
|
32a080b7a93567f9528867a31bd0b47423297bab
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_case_management_v1.py
|
zachsirotto/platform-services-python-sdk
|
32a080b7a93567f9528867a31bd0b47423297bab
|
[
"Apache-2.0"
] | null | null | null |
test/integration/test_case_management_v1.py
|
zachsirotto/platform-services-python-sdk
|
32a080b7a93567f9528867a31bd0b47423297bab
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Copyright 2020 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
This class contains an integration test for the Case Management service.
'''
import pytest
import unittest
import os
import os.path
import io
from ibm_cloud_sdk_core import *
from ibm_platform_services.case_management_v1 import *
# Read config file
configFile = 'case_management.env'
class TestCaseManagementV1(unittest.TestCase):
"""
Integration Test Class for CaseManagementV1
"""
# Used to store newly created case number
new_case_number = ''
# Used to store newly uploaded file id
file_attachment_id = ''
@classmethod
def setUpClass(cls):
if os.path.exists(configFile):
os.environ['IBM_CREDENTIALS_FILE'] = configFile
else:
raise unittest.SkipTest('External configuration not available, skipping...')
cls.service = CaseManagementV1.new_instance()
assert cls.service is not None
print('Setup complete.')
def test_01_create_case(self):
# Offering info can be retrieved via /case-management/utilities/v1/offerings/technical
offering_payload_type_model = {}
offering_payload_type_model['group'] = 'crn_service_name'
offering_payload_type_model['key'] = 'cloud-object-storage'
offering_payload_type_model['id'] = 'dff97f5c-bc5e-4455-b470-411c3edbe49c'
offering_payload_model = {}
offering_payload_model['name'] = 'Cloud Object Storage'
offering_payload_model['type'] = offering_payload_type_model
type = 'technical'
subject = 'Python - Integration test'
description = 'Please -ignore this is a test case.'
severity = 4
offering = offering_payload_model
response = self.service.create_case(
type,
subject,
description,
severity=severity,
offering=offering,
headers={}
)
# Storing the new case number for subsequent test cases
TestCaseManagementV1.new_case_number = response.result['number']
assert response.get_status_code() == 200
assert subject == response.result['short_description']
assert description == response.result['description']
def test_02_create_case_with_empty_offering(self):
type = 'technical'
subject = 'Python - Integration test'
description = 'Please -ignore this is a test case.'
severity = 4
with pytest.raises(ApiException) as e:
self.service.create_case(
type,
subject,
description,
severity=severity,
headers={}
)
assert e.value.code == 400
def test_03_create_case_with_empty_subject_and_description(self):
# Offering info can be retrieved via /case-management/utilities/v1/offerings/technical
offering_payload_type_model = {}
offering_payload_type_model['group'] = 'crn_service_name'
offering_payload_type_model['key'] = 'cloud-object-storage'
offering_payload_type_model['id'] = 'dff97f5c-bc5e-4455-b470-411c3edbe49c'
offering_payload_model = {}
offering_payload_model['name'] = 'Cloud Object Storage'
offering_payload_model['type'] = offering_payload_type_model
type = 'technical'
subject = ''
description = ''
severity = 4
offering = offering_payload_model
# Subject and description are required
with pytest.raises(ApiException) as e:
self.service.create_case(
type,
subject,
description,
severity=severity,
offering=offering,
headers={}
)
assert e.value.code == 400
def test_04_get_cases(self):
offset = 0
limit = 2
sort = 'number'
fields = ['number']
response = self.service.get_cases(
offset=offset,
limit=limit,
sort=sort,
fields=fields,
headers={}
)
assert response.status_code == 200
assert response.result['total_count'] > 0
def test_05_get_case(self):
fields = ['number', 'short_description']
case_number = TestCaseManagementV1.new_case_number
response = self.service.get_case(
self.new_case_number,
fields=fields,
headers={}
)
assert TestCaseManagementV1.new_case_number == response.result['number']
assert response.result['short_description'] != ''
def test_06_get_case_with_invalid_field(self):
fields = ['number', 'short_description', 'invalid_field']
case_number = TestCaseManagementV1.new_case_number
with pytest.raises(ApiException) as e:
self.service.get_case(
self.new_case_number,
fields=fields,
headers={}
)
assert e.value.code == 400
def test_07_add_comment(self):
case_number = TestCaseManagementV1.new_case_number
comment = 'This is a test comment!'
response = self.service.add_comment(
case_number,
comment,
headers={}
)
assert response.status_code == 200
assert comment == response.result["value"]
def test_08_add_comment_to_nonexisting_case(self):
case_number = 'fake-case-number'
comment = 'This is a test comment!'
with pytest.raises(ApiException) as e:
self.service.add_comment(
case_number,
comment,
headers={}
)
assert e.value.code == 404
def test_09_add_watch_list_member(self):
# Users can be retrieved via the User Management API.
user_id_and_realm_model = {}
user_id_and_realm_model['realm'] = 'IBMid'
user_id_and_realm_model['user_id'] = 'abc@ibm.com'
watchlist = [user_id_and_realm_model]
response = self.service.add_watchlist(
TestCaseManagementV1.new_case_number,
watchlist=watchlist,
headers={}
)
# Non-account member cannot be added to the watch-list,
# therefore the response will include a "failed" list
assert response.status_code == 200
# Loop over all returned users and find the matching one by user id
found_users = [user for user in response.result['failed']
if user['user_id'] == user_id_and_realm_model['user_id']]
assert len(found_users) == 1
def test_10_file_upload(self):
fileName = "test_file.txt"
file_with_metadata_model = {}
file_with_metadata_model['data'] = io.BytesIO(b'This is a mock file.').getvalue()
file_with_metadata_model['filename'] = fileName
file = [file_with_metadata_model]
response = self.service.upload_file(
TestCaseManagementV1.new_case_number,
file,
headers={}
)
TestCaseManagementV1.file_attachment_id = response.result['id']
assert response.status_code == 200
assert response.result['filename'] == fileName
def test_11_download_file(self):
response = self.service.download_file(
TestCaseManagementV1.new_case_number,
TestCaseManagementV1.file_attachment_id,
headers={}
)
assert response.status_code == 200
assert 'content-type' in response.headers
def test_12_delete_file(self):
response = self.service.delete_file(
TestCaseManagementV1.new_case_number,
TestCaseManagementV1.file_attachment_id,
headers={}
)
assert response.status_code == 200
# Assert the file attachment list is empty
assert len(response.result['attachments']) == 0
def test_13_add_resource(self):
# Adding a resource requires a valid CRN (Cloud Resource Name)
# CRN's can be retrieved via the Search and Tagging API
crn = 'invalid:crn'
type = 'testString'
id = 36.0
note = 'testString'
with pytest.raises(ApiException) as e:
response = self.service.add_resource(
TestCaseManagementV1.new_case_number,
crn=crn,
type=type,
id=id,
note=note,
headers={}
)
assert e.value.code == 500
def test_14_resolve_case(self):
status_payload = {}
status_payload['action'] = 'resolve'
status_payload['comment'] = 'testString'
status_payload['resolution_code'] = 1
response = self.service.update_case_status(
TestCaseManagementV1.new_case_number,
status_payload,
headers={}
)
assert response.status_code == 200
assert TestCaseManagementV1.new_case_number == response.result["number"]
| 30.987097
| 94
| 0.622423
|
794df5a0f81828572b9168a65ac6ee07e507721a
| 28,678
|
py
|
Python
|
podium/datasets/iterator.py
|
TakeLab/podium
|
11ef32d889e483d4d77a44b61e0b5da956ee3a54
|
[
"BSD-3-Clause"
] | 51
|
2021-03-19T14:14:31.000Z
|
2022-02-18T00:42:51.000Z
|
podium/datasets/iterator.py
|
TakeLab/podium
|
11ef32d889e483d4d77a44b61e0b5da956ee3a54
|
[
"BSD-3-Clause"
] | 9
|
2021-03-31T15:39:28.000Z
|
2021-04-16T13:28:15.000Z
|
podium/datasets/iterator.py
|
TakeLab/podium
|
11ef32d889e483d4d77a44b61e0b5da956ee3a54
|
[
"BSD-3-Clause"
] | 1
|
2021-07-26T04:54:18.000Z
|
2021-07-26T04:54:18.000Z
|
"""
Module contains classes for iterating over datasets.
"""
import math
import warnings
from abc import ABC, abstractmethod
from collections import defaultdict
from random import Random
from typing import Callable
from typing import Iterator as PythonIterator
from typing import List, NamedTuple, Tuple
import numpy as np
from podium.datasets.dataset import Dataset, DatasetBase
from podium.datasets.hierarhical_dataset import HierarchicalDataset
from podium.utils.general_utils import repr_type_and_attrs
class Batch(dict):
def __iter__(self):
yield from self.values()
def __getattr__(self, name):
if name in self:
return self[name]
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
def __repr__(self):
return repr_type_and_attrs(self, self, with_newlines=True, repr_values=False)
class IteratorBase(ABC):
"""
Abstract base class for all Iterators in Podium.
"""
def __call__(
self, dataset: DatasetBase
) -> PythonIterator[Tuple[NamedTuple, NamedTuple]]:
"""
Sets the dataset for this Iterator and returns an iterable over the
batches of that dataset. Same as calling iterator.set_dataset() followed
by iter(iterator)
Parameters
----------
dataset: Dataset
Dataset to iterate over.
Returns
-------
Iterable over batches in the Dataset.
"""
self.set_dataset(dataset)
return iter(self)
@abstractmethod
def set_dataset(self, dataset: DatasetBase) -> None:
"""
Sets the dataset for this Iterator to iterate over. Resets the epoch
count.
Parameters
----------
dataset: DatasetBase
Dataset to iterate over.
"""
pass
@abstractmethod
def __iter__(self) -> PythonIterator[Tuple[NamedTuple, NamedTuple]]:
"""
Returns an iterator object that knows how to iterate over the given
dataset. The iterator yields a Batch instance: adictionary subclass
which contains batched data for every field stored under the name of
that Field. The Batch object unpacks over values (instead of keys) in
the same order as the Fields in the dataset.
Returns
-------
iter
Iterator that iterates over batches of examples in the dataset.
"""
pass
@abstractmethod
def __len__(self) -> int:
"""
Returns the number of batches this iterator provides in one epoch.
Returns
-------
int
Number of batches s provided in one epoch.
"""
pass
class Iterator(IteratorBase):
"""
An iterator that batches data from a dataset after numericalization.
"""
def __init__(
self,
dataset=None,
batch_size=32,
sort_key=None,
shuffle=True,
seed=1,
matrix_class=np.array,
disable_batch_matrix=False,
internal_random_state=None,
):
"""
Creates an iterator for the given dataset and batch size.
Parameters
----------
dataset : DatasetBase
The dataset to iterate over.
batch_size : int
Batch size for batched iteration. If the dataset size is
not a multiple of batch_size the last returned batch
will be smaller (``len(dataset) % batch_size``).
sort_key : callable
A ``callable`` used to sort instances within a batch.
If ``None``, batch instances won't be sorted.
Default is ``None``.
shuffle : bool
Flag denoting whether examples should be shuffled prior
to each epoch.
Default is ``False``.
seed : int
The initial random seed.
Only used if ``shuffle=True``. Raises ``ValueError`` if
``shuffle=True``, ``internal_random_state=None`` and
``seed=None``.
Default is ``1``.
matrix_class: callable
The constructor for the return batch datatype. Defaults to
``np.array``.
When working with deep learning frameworks such
as `tensorflow <https://www.tensorflow.org/>`_ and
`pytorch <https://pytorch.org/>`_, setting this argument
allows customization of the batch datatype.
internal_random_state : tuple
The random state that the iterator will be initialized with.
Obtained by calling ``.getstate`` on an instance of the Random
object, exposed through the ``Iterator.get_internal_random_state``
method.
For most use-cases, setting the random seed will suffice.
This argument is useful when we want to stop iteration at a certain
batch of the dataset and later continue exactly where we left off.
If ``None``, the Iterator will create its own random state from the
given seed.
Only relevant if ``shuffle=True``. A ``ValueError`` is raised if
``shuffle=True``, ``internal_random_state=None`` and
``seed=None``.
Default is ``None``.
Raises
------
ValueError
If ``shuffle=True`` and both ``seed`` and ``internal_random_state`` are
``None``.
"""
self._batch_size = batch_size
self._shuffle = shuffle
self._sort_key = sort_key
self._epoch = 0
self._iterations = 0
self._matrix_class = matrix_class
self._disable_batch_matrix = disable_batch_matrix
# set of fieldnames for which numericalization format warnings were issued
# used to avoid spamming warnings between iterations
self._numericalization_format_warned_fieldnames = set()
if dataset is not None:
self.set_dataset(dataset)
else:
self._dataset = None
if self._shuffle:
if seed is None and internal_random_state is None:
raise ValueError(
"If shuffle==True, either seed or "
"internal_random_state have to be != None."
)
self._shuffler = Random(seed)
if internal_random_state is not None:
self._shuffler.setstate(internal_random_state)
else:
self._shuffler = None
@property
def epoch(self) -> int:
"""
The current epoch of the Iterator.
"""
return self._epoch
@property
def iterations(self) -> int:
"""
The number of batches returned so far in the current epoch.
"""
return self._iterations
@property
def matrix_class(self):
"""
The class constructor of the batch matrix.
"""
return self._matrix_class
@property
def batch_size(self):
"""
The batch size of the iterator.
"""
return self._batch_size
@property
def sort_key(self):
return self._sort_key
def reset(self):
"""
Reset the epoch and iteration counter of the Iterator.
"""
self._epoch = 0
self._iterations = 0
def set_dataset(self, dataset: DatasetBase) -> None:
"""
Sets the dataset for this Iterator to iterate over. Resets the epoch
count.
Parameters
----------
dataset: DatasetBase
Dataset to iterate over.
"""
self.reset()
self._dataset = dataset
def __setstate__(self, state):
self.__dict__ = state
if self._shuffle:
# Restore the random state to the one prior to start
# of last epoch so we can rewind to the correct batch
self.set_internal_random_state(self._shuffler_state)
def __len__(self) -> int:
"""
Returns the number of batches this iterator provides in one epoch.
Returns
-------
int
Number of batches s provided in one epoch.
"""
return math.ceil(len(self._dataset) / self.batch_size)
def __iter__(self) -> PythonIterator[Batch]:
"""
Returns an iterator over the given dataset. The iterator yields tuples
in the form ``(input_batch, target_batch)``. The input_batch and
target_batch are dict subclasses which unpack to values instead of
keys::
>>> batch = Batch({
... 'a': np.array([0]),
... 'b': np.array([1])
... })
>>> a, b = batch
>>> a
array([0])
>>> b
array([1])
Batch keys correspond to dataset Field names. Batch values are
by default numpy ndarrays, although the data type can be changed
through the ``matrix_class`` argument. Rows correspond to dataset
instances, while each element is a numericalized value of the input.
Returns
-------
iter
Iterator over batches of examples in the dataset.
"""
indices = list(range(len(self._dataset)))
if self._shuffle:
# Cache state prior to shuffle so we can use it when unpickling
self._shuffler_state = self.get_internal_random_state()
self._shuffler.shuffle(indices)
# If iteration was stopped, continue where we left off
start = self.iterations * self.batch_size
for i in range(start, len(self._dataset), self.batch_size):
batch_indices = indices[i : i + self.batch_size]
batch_instances = self._dataset[batch_indices]
if self._sort_key is not None:
batch_instances = batch_instances.sorted(key=self._sort_key)
self._iterations += 1
yield self._create_batch(batch_instances)
# prepare for new epoch
self._iterations = 0
self._epoch += 1
def _create_batch(self, dataset: DatasetBase) -> Tuple[NamedTuple, NamedTuple]:
examples = dataset.examples
full_batch = Batch()
for field in dataset.fields:
numericalizations = []
for example in examples:
numericalization = field.get_numericalization_for_example(example)
numericalizations.append(numericalization)
# casting to matrix can only be attempted if all values are either
# None or np.ndarray
possible_cast_to_matrix = all(
x is None or isinstance(x, (np.ndarray, int, float))
for x in numericalizations
)
if (
not possible_cast_to_matrix
and not field._disable_batch_matrix
and not self._disable_batch_matrix
and field.name not in self._numericalization_format_warned_fieldnames
):
warnings.warn(
f"The batch for Field '{field.name}' can't be cast to "
f"matrix but `disable_batch_matrix` is set to False."
)
self._numericalization_format_warned_fieldnames.add(field.name)
if (
len(numericalizations) > 0
and not field._disable_batch_matrix
and not self._disable_batch_matrix
and possible_cast_to_matrix
):
batch = Iterator._arrays_to_matrix(
field, numericalizations, self.matrix_class
)
else:
batch = numericalizations
if field.include_lengths:
# Include the length of each instance in the Field
# along with the numericalization
batch_lengths = self.matrix_class(
[len(instance) for instance in numericalizations]
)
batch = (batch, batch_lengths)
full_batch[field.name] = batch
return full_batch
def get_internal_random_state(self):
"""
Returns the internal random state of the iterator.
Useful if we want to stop iteration at a certain batch, and later
continue exactly at that batch..
Only used if ``shuffle=True``.
Returns
-------
tuple
The internal random state of the iterator.
Raises
------
RuntimeError
If ``shuffle=False``.
"""
if not self._shuffle:
raise RuntimeError(
"Iterator with `shuffle=False` does not have an internal random state."
)
return self._shuffler.getstate()
def set_internal_random_state(self, state):
"""
Sets the internal random state of the iterator.
Useful if we want to stop iteration at a certain batch, and later
continue exactly at that batch..
Only used if ``shuffle=True``.
Raises
------
RuntimeError
If ``shuffle=False``.
"""
if not self._shuffle:
raise RuntimeError(
"Iterator with `shuffle=False` does not have an internal random state."
)
self._shuffler.setstate(state)
@staticmethod
def _arrays_to_matrix(
field, arrays: List[np.ndarray], matrix_class: Callable
) -> np.ndarray:
pad_length = Iterator._get_pad_length(field, arrays)
padded_arrays = [field._pad_to_length(a, pad_length) for a in arrays]
return matrix_class(padded_arrays)
@staticmethod
def _get_pad_length(field, numericalizations) -> int:
# the fixed_length attribute of Field has priority over the max length
# of all the examples in the batch
if field._fixed_length is not None:
return field._fixed_length
# if fixed_length is None, then return the maximum length of all the
# examples in the batch
def numericalization_length(n):
if n is None or isinstance(n, (int, float)):
return 1
else:
return len(n)
return max(map(numericalization_length, numericalizations))
def __repr__(self) -> str:
attrs = {
"batch_size": self._batch_size,
"epoch": self._epoch,
"iteration": self._iterations,
"shuffle": self._shuffle,
}
return repr_type_and_attrs(self, attrs, with_newlines=True)
class SingleBatchIterator(Iterator):
"""
Iterator that creates one batch per epoch containing all examples in the
dataset.
"""
def __init__(self, dataset: DatasetBase = None, shuffle=True, add_padding=True):
"""
Creates an Iterator that creates one batch per epoch containing all
examples in the dataset.
Parameters
----------
dataset : DatasetBase
The dataset to iterate over.
shuffle : bool
Flag denoting whether examples should be shuffled before
each epoch.
Default is ``False``.
add_padding : bool
Flag denoting whether to add padding to batches yielded by the
iterator. If set to ``False``, numericalized Fields will be
returned as python lists of ``matrix_class`` instances.
"""
batch_size = len(dataset) if dataset else None
super().__init__(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
disable_batch_matrix=not add_padding,
)
def set_dataset(self, dataset: DatasetBase) -> None:
super().set_dataset(dataset)
self._batch_size = len(dataset)
def __len__(self) -> int:
return 1
class BucketIterator(Iterator):
"""
Creates a bucket iterator which uses a look-ahead heuristic to batch
examples in a way that minimizes the amount of necessary padding.
Uses a bucket of size N x batch_size, and sorts instances within the bucket
before splitting into batches, minimizing necessary padding.
"""
def __init__(
self,
dataset=None,
batch_size=32,
sort_key=None,
shuffle=True,
seed=1,
matrix_class=np.array,
internal_random_state=None,
look_ahead_multiplier=100,
bucket_sort_key=None,
):
"""
Creates a BucketIterator with the given bucket sort key and look-ahead
multiplier (how many batch_sizes to look ahead when sorting examples for
batches).
Parameters
----------
look_ahead_multiplier : int
Multiplier of ``batch_size`` which determines the size of the
look-ahead bucket.
If ``look_ahead_multiplier == 1``, then the BucketIterator behaves
like a normal Iterator.
If ``look_ahead_multiplier >= (num_examples / batch_size)``, then
the BucketIterator behaves like a normal iterator that sorts the
whole dataset.
Default is ``100``.
bucket_sort_key : callable
The callable object used to sort examples in the bucket.
If ``bucket_sort_key=None``, then the ``sort_key`` must not be ``None``,
otherwise a ``ValueError`` is raised.
Default is ``None``.
Raises
------
ValueError
If sort_key and bucket_sort_key are both None.
"""
if sort_key is None and bucket_sort_key is None:
raise ValueError(
"For BucketIterator to work, either sort_key or "
"bucket_sort_key must be != None."
)
super().__init__(
dataset,
batch_size,
sort_key=sort_key,
shuffle=shuffle,
seed=seed,
matrix_class=matrix_class,
internal_random_state=internal_random_state,
)
self.bucket_sort_key = bucket_sort_key
self.look_ahead_multiplier = look_ahead_multiplier
def __iter__(self) -> PythonIterator[Tuple[NamedTuple, NamedTuple]]:
step = self.batch_size * self.look_ahead_multiplier
dataset = self._dataset
# Determine the step where iteration was stopped for lookahead & within bucket
lookahead_start = (
self.iterations // self.look_ahead_multiplier * self.look_ahead_multiplier
)
batch_start = self.iterations % self.look_ahead_multiplier
if self._sort_key is not None:
dataset = dataset.sorted(key=self._sort_key)
for i in range(lookahead_start, len(dataset), step):
bucket = dataset[i : i + step]
if self.bucket_sort_key is not None:
bucket = bucket.sorted(key=self.bucket_sort_key)
for j in range(batch_start, len(bucket), self.batch_size):
batch_dataset = bucket[j : j + self.batch_size]
batch = self._create_batch(batch_dataset)
yield batch
self._iterations += 1
# prepare for new epoch
self._iterations = 0
self._epoch += 1
def __repr__(self) -> str:
attrs = {
"batch_size": self._batch_size,
"epoch": self._epoch,
"iteration": self._iterations,
"shuffle": self._shuffle,
"look_ahead_multiplier": self.look_ahead_multiplier,
}
return repr_type_and_attrs(self, attrs, with_newlines=True)
class HierarchicalIterator(Iterator):
"""
Iterator used to create batches for Hierarchical Datasets.
Creates batches as lists of matrices. In the returned batch, every attribute
corresponds to a field in the dataset. For every field in the dataset, the
batch contains a list of matrices, where every matrix represents the context
of an example in the batch. The rows of a matrix contain numericalized
representations of the examples that make up the context of an example in
the batch with the representation of the example itself being in the last
row of its own context matrix.
"""
def __init__(
self,
dataset=None,
batch_size=32,
sort_key=None,
shuffle=False,
seed=1,
matrix_class=np.array,
internal_random_state=None,
context_max_length=None,
context_max_depth=None,
):
"""
Creates an iterator for the given dataset and batch size.
Parameters
----------
dataset : DatasetBase
The dataset to iterate over.
batch_size : int
Batch size for batched iteration. If the dataset size is
not a multiple of batch_size the last returned batch
will be smaller (``len(dataset) % batch_size``).
sort_key : callable
A ``callable`` used to sort instances within a batch.
If ``None``, batch instances won't be sorted.
Default is ``None``.
shuffle : bool
Flag denoting whether examples should be shuffled prior
to each epoch.
Default is ``False``.
seed : int
The initial random seed.
Only used if ``shuffle=True``. Raises ``ValueError`` if
``shuffle=True``, ``internal_random_state=None`` and
``seed=None``.
Default is ``1``.
matrix_class: callable
The constructor for the return batch datatype. Defaults to
``np.array``.
When working with deep learning frameworks such
as `tensorflow <https://www.tensorflow.org/>`_ and
`pytorch <https://pytorch.org/>`_, setting this argument
allows customization of the batch datatype.
internal_random_state : tuple
The random state that the iterator will be initialized with.
Obtained by calling ``.getstate`` on an instance of the Random
object, exposed through the ``Iterator.get_internal_random_state``
method.
For most use-cases, setting the random seed will suffice.
This argument is useful when we want to stop iteration at a certain
batch of the dataset and later continue exactly where we left off.
If ``None``, the Iterator will create its own random state from the
given seed.
Only relevant if ``shuffle=True``. A ``ValueError`` is raised if
``shuffle=True``, ``internal_random_state=None`` and
``seed=None``.
Default is ``None``.
context_max_depth: int
The maximum depth of the context retrieved for an example in the batch.
While generating the context, the iterator will take 'context_max_depth'
levels above the example and the root node of the last level, e.g. if 0 is
passed, the context generated for an example will contain all examples in the
level of the example in the batch and the root example of that level.
If None, this depth limit will be ignored.
context_max_length: int
The maximum length of the context retrieved for an example in the batch. The
number of rows in the generated context matrix will be (if needed) truncated
to `context_max_length` - 1.
If None, this length limit will be ignored.
Raises
------
ValueError
If shuffle is True and both seed and internal_random_state are
None.
"""
if context_max_length is not None and context_max_length < 1:
raise ValueError(
"'context_max_length' must not be less than 1. "
"If you don't want context, try flattening the dataset. "
f"'context_max_length' : {context_max_length})"
)
if context_max_depth is not None and context_max_depth < 0:
raise ValueError(
"'context_max_depth' must not be negative. "
f"'context_max_depth' : {context_max_length}"
)
self._context_max_depth = context_max_depth
self._context_max_length = context_max_length
super().__init__(
dataset,
batch_size,
sort_key=sort_key,
shuffle=shuffle,
seed=seed,
matrix_class=matrix_class,
internal_random_state=internal_random_state,
)
def set_dataset(self, dataset: HierarchicalDataset) -> None:
if not isinstance(dataset, HierarchicalDataset):
err_msg = (
f"HierarchicalIterator can only iterate over "
f"HierarchicalDatasets. Passed dataset type: "
f"{type(dataset).__name__}"
)
raise ValueError(err_msg)
super().set_dataset(dataset)
def _get_node_context(self, node):
"""
Generates a list of examples that make up the context of the provided
node, truncated to adhere to 'context_max_depth' and
'context_max_length' limitations.
Parameters
----------
node : Node
The Hierarchical dataset node the context should be retrieved for.
Returns
-------
list(Example)
A list of examples that make up the context of the provided node,
truncated to adhere to 'context_max_depth' and 'context_max_length'
limitations.
"""
context_iterator = HierarchicalDataset._get_node_context(
node, self._context_max_depth
)
context = list(context_iterator)
if self._context_max_length is not None:
# if context max size is defined, truncate it
context = context[-self._context_max_length :]
# add the example to the end of its own context
context.append(node.example)
return context
def _nodes_to_batch(self, nodes):
"""
Creates a batch from the passed nodes.
Parameters
----------
nodes : list(Node)
Nodes that should be contained in the batch
Returns
-------
(Batch)
a Batch instance containing numericalized Field data.
"""
batch_dict = defaultdict(list)
for node in nodes:
# all examples that make up the current node's context
node_context_examples = self._get_node_context(node)
node_context_dataset = Dataset(node_context_examples, self._dataset.fields)
sub_batch = super()._create_batch(node_context_dataset)
for key in sub_batch.keys():
value = getattr(sub_batch, key)
batch_dict[key].append(value)
batch = Batch(batch_dict)
return batch
def _data(self):
"""Generates a list of Nodes to be used in batch iteration.
Returns
-------
list(Node)
a list of Nodes
"""
dataset_nodes = list(self._dataset._node_iterator())
if self._shuffle:
# shuffle the indices
indices = list(range(len(self._dataset)))
self._shuffler.shuffle(indices)
# creates a new list of nodes
dataset_nodes = [dataset_nodes[i] for i in indices]
return dataset_nodes
def __iter__(self) -> PythonIterator[Tuple[NamedTuple, NamedTuple]]:
dataset_nodes = self._data()
# If iteration was stopped, continue where we left off
start = self.iterations * self.batch_size
for i in range(start, len(dataset_nodes), self.batch_size):
batch_nodes = dataset_nodes[i : i + self.batch_size]
if self._sort_key is not None:
batch_nodes = batch_nodes.sorted(
key=lambda node: self._sort_key(node.example)
)
yield self._nodes_to_batch(batch_nodes)
self._iterations += 1
# prepare for new epoch
self._iterations = 0
self._epoch += 1
def __repr__(self) -> str:
attrs = {
"batch_size": self._batch_size,
"epoch": self._epoch,
"iteration": self._iterations,
"shuffle": self._shuffle,
"context_max_length": self._context_max_length,
"context_max_depth": self._context_max_depth,
}
return repr_type_and_attrs(self, attrs, with_newlines=True)
| 33.19213
| 89
| 0.593974
|
794df662e3e9087dd9fdc5e2156151f3b512d201
| 9,627
|
py
|
Python
|
doc/source/conf.py
|
RobbiNespu/scikit-image
|
a65b8af4bf0f5f71624a91ea5ce7812e80cae4cb
|
[
"BSD-3-Clause"
] | 8
|
2016-03-11T13:23:51.000Z
|
2021-12-19T10:43:26.000Z
|
doc/source/conf.py
|
RobbiNespu/scikit-image
|
a65b8af4bf0f5f71624a91ea5ce7812e80cae4cb
|
[
"BSD-3-Clause"
] | null | null | null |
doc/source/conf.py
|
RobbiNespu/scikit-image
|
a65b8af4bf0f5f71624a91ea5ce7812e80cae4cb
|
[
"BSD-3-Clause"
] | 4
|
2020-06-19T00:04:34.000Z
|
2021-02-23T07:24:00.000Z
|
# -*- coding: utf-8 -*-
#
# skimage documentation build configuration file, created by
# sphinx-quickstart on Sat Aug 22 13:00:30 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
curpath = os.path.dirname(__file__)
sys.path.append(os.path.join(curpath, '..', 'ext'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
'sphinx.ext.autosummary', 'plot2rst',
'sphinx.ext.intersphinx']
# Determine if the matplotlib has a recent enough version of the
# plot_directive, otherwise use the local fork.
try:
from matplotlib.sphinxext import plot_directive
except ImportError:
use_matplotlib_plot_directive = False
else:
try:
use_matplotlib_plot_directive = (plot_directive.__version__ >= 2)
except AttributeError:
use_matplotlib_plot_directive = False
if use_matplotlib_plot_directive:
extensions.append('matplotlib.sphinxext.plot_directive')
else:
extensions.append('plot_directive')
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'skimage'
copyright = '2013, the scikit-image team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
setup_lines = open('../../skimage/__init__.py').readlines()
version = 'vUndefined'
for l in setup_lines:
if l.startswith('__version__'):
version = l.split("'")[1]
break
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-image'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'skimage v%s docs' % version
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['navigation.html',
'localtoc.html',
'versions.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikitimagedoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('contents', 'scikit-image.tex', u'The scikit-image Documentation',
u'scikit-image development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{enumitem}
\setlistdepth{100}
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
numpydoc_show_class_members = False
numpydoc_class_members_toctree = False
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_basedir = os.path.join(curpath, "plots")
plot_pre_code = """
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
import matplotlib
matplotlib.rcParams.update({
'font.size': 14,
'axes.titlesize': 12,
'axes.labelsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 10,
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
})
"""
plot_include_source = True
plot_formats = [('png', 100), ('pdf', 100)]
plot2rst_index_name = 'README'
plot2rst_rcparams = {'image.cmap' : 'gray',
'image.interpolation' : 'none'}
# -----------------------------------------------------------------------------
# intersphinx
# -----------------------------------------------------------------------------
_python_doc_base = 'http://docs.python.org/2.7'
intersphinx_mapping = {
_python_doc_base: None,
'http://docs.scipy.org/doc/numpy': None,
'http://docs.scipy.org/doc/scipy/reference': None,
'http://scikit-learn.org/stable': None
}
| 32.197324
| 80
| 0.68069
|
794df6b8422d9b046381612add9644760a6c97b9
| 1,529
|
py
|
Python
|
runtime/pylibs/normpatch.py
|
bingusware/client
|
c822e49ec2f1f8eaa8963194f562afa6d4cff594
|
[
"MIT"
] | 5
|
2021-06-01T14:25:01.000Z
|
2022-02-11T10:15:10.000Z
|
runtime/pylibs/normpatch.py
|
bingusware/client
|
c822e49ec2f1f8eaa8963194f562afa6d4cff594
|
[
"MIT"
] | 3
|
2021-02-23T08:47:27.000Z
|
2021-02-26T11:58:18.000Z
|
runtime/pylibs/normpatch.py
|
bingusware/client
|
c822e49ec2f1f8eaa8963194f562afa6d4cff594
|
[
"MIT"
] | null | null | null |
import sys
import os
import shutil
from optparse import OptionParser
def normalisepatch(in_filename, out_filename=None):
in_filename = os.path.normpath(in_filename)
if out_filename is None:
tmp_filename = in_filename + '.tmp'
else:
out_filename = os.path.normpath(out_filename)
tmp_filename = out_filename
dir_name = os.path.dirname(out_filename)
if dir_name:
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(in_filename, 'rb') as inpatch:
with open(tmp_filename, 'wb') as outpatch:
for line in inpatch:
line = line.rstrip('\r\n')
if line[:3] in ['+++', '---', 'Onl', 'dif']:
outpatch.write(line.replace('\\', os.sep).replace('/', os.sep) + os.linesep)
else:
outpatch.write(line + os.linesep)
if out_filename is None:
shutil.move(tmp_filename, in_filename)
def main():
usage = 'usage: %prog [options] src_file [dest_file]'
version = '%prog 6.0'
parser = OptionParser(version=version, usage=usage)
options, args = parser.parse_args()
if len(args) == 1:
in_file = args[0]
out_file = None
elif len(args) == 2:
in_file = args[0]
out_file = args[1]
else:
print >> sys.stderr, 'src_file required'
sys.exit(1)
normalisepatch(in_file, out_file)
if __name__ == '__main__':
main()
| 31.204082
| 97
| 0.568999
|
794df72d630acfdfb201ad1b487ef4d0a99599a6
| 1,021
|
py
|
Python
|
wargame/designpatterns/pythonic_woodelf.py
|
jeantardelli/wargameRepo
|
1e11ae40281f7eafa65ea6e40e045304b20e3824
|
[
"MIT"
] | 1
|
2020-12-01T20:30:27.000Z
|
2020-12-01T20:30:27.000Z
|
wargame/designpatterns/pythonic_woodelf.py
|
jeantardelli/wargameRepo
|
1e11ae40281f7eafa65ea6e40e045304b20e3824
|
[
"MIT"
] | null | null | null |
wargame/designpatterns/pythonic_woodelf.py
|
jeantardelli/wargameRepo
|
1e11ae40281f7eafa65ea6e40e045304b20e3824
|
[
"MIT"
] | null | null | null |
"""pythonic_woodelf
This is one of the different GameUnits that are used in the desing patterns
examples. But, differently than the others, this unit represents a
third-party class that has an incompatible interface.
:copyright: 2020, Jean Tardelli
:license: The MIT license (MIT). See LICENSE file for further details.
"""
class WoodElf:
"""Create a WoodRider instance"""
def info(self):
"""Print info about this unit, overrides superclass method."""
print("I am an Elf from the woods! Nice to meet you!")
def leap(self):
"""leap method is equivalent to the 'jump' method client expects.
The adapter should have a jump method which in turn calls this
method.
"""
print("Inside WoodElf.leap")
def climb(self):
"""Some other (dummy) method of the class.
Adapter shouldn't do anything with this method. It should just
delegate the call from the client to thid method.
"""
print("Inside WoodElf.climb")
| 30.939394
| 76
| 0.669931
|
794df73ed992ef7e37f6266a267b1ced3601a859
| 7,159
|
py
|
Python
|
test_conebound.py
|
VictorDavis/conebound
|
2f0600d946d27abfec10d19a840044c2ec9c18f1
|
[
"MIT"
] | 2
|
2021-04-24T22:35:51.000Z
|
2021-11-15T09:45:17.000Z
|
test_conebound.py
|
VictorDavis/conebound
|
2f0600d946d27abfec10d19a840044c2ec9c18f1
|
[
"MIT"
] | null | null | null |
test_conebound.py
|
VictorDavis/conebound
|
2f0600d946d27abfec10d19a840044c2ec9c18f1
|
[
"MIT"
] | 1
|
2021-04-24T22:35:56.000Z
|
2021-04-24T22:35:56.000Z
|
# bloody dependencies
import numpy as np
from numpy.linalg import norm
import unittest
# things to test
from conebound import bounding_cone
# unit tests
class ConeBoundTest(unittest.TestCase):
# bounding cone aperture < 90
def test_2d_acute(self):
# hyperparameters
ndim = 2
axis = np.random.normal(size=ndim)
axis /= norm(axis)
angle = np.random.uniform(np.pi / 12, np.pi / 4) # [15, 45]
cos = np.cos(angle)
# monte carlo construction of a cone
points = np.random.normal(size=(256, ndim))
dot_products = np.dot(points, axis) / norm(points, axis=1)
points = points[dot_products > cos, :]
# get cone
axis_, angle_ = bounding_cone(points)
# bounding cone should be slightly narrower than monte carlo construction
assert angle_ < angle
# calculated axis should be "close" to construction axis
assert sum(axis_ * axis) > 0.9
# bounding cone aperture > 90
def test_2d_obtuse(self):
# hyperparameters
ndim = 2
axis = np.random.normal(size=ndim)
axis /= norm(axis)
angle = np.random.uniform(np.pi / 4, np.pi / 2) # [45, 90]
cos = np.cos(angle)
# monte carlo construction of a cone
points = np.random.normal(size=(256, ndim))
dot_products = np.dot(points, axis) / norm(points, axis=1)
points = points[dot_products > cos, :]
# get cone
axis_, angle_ = bounding_cone(points)
# bounding cone should be slightly narrower than monte carlo construction
assert angle_ < angle
# calculated axis should be "close" to construction axis
assert sum(axis_ * axis) > 0.9
# bounding cone aperture > 180
def test_2d_reflex(self):
# hyperparameters
ndim = 2
axis = np.random.normal(size=ndim)
axis /= norm(axis)
angle = np.random.uniform(np.pi / 2, 3 * np.pi / 4) # [90, 135]
cos = np.cos(angle)
# monte carlo construction of a cone
points = np.random.normal(size=(256, ndim))
dot_products = np.dot(points, axis) / norm(points, axis=1)
points = points[dot_products > cos, :]
# get cone
axis_, angle_ = bounding_cone(points)
# bounding cone should be slightly narrower than monte carlo construction
assert angle_ < angle
# calculated axis should be "close" to construction axis
assert sum(axis_ * axis) > 0.9
# bounding cone aperture < 90
def test_3d_acute(self):
# hyperparameters
ndim = 3
axis = np.random.normal(size=ndim)
axis /= norm(axis)
angle = np.random.uniform(np.pi / 12, np.pi / 4) # [15, 45]
cos = np.cos(angle)
# monte carlo construction of a cone
points = np.random.normal(size=(1024, ndim))
dot_products = np.dot(points, axis) / norm(points, axis=1)
points = points[dot_products > cos, :]
# get cone
axis_, angle_ = bounding_cone(points)
# bounding cone should be slightly narrower than monte carlo construction
assert angle_ < angle
# calculated axis should be "close" to construction axis
assert sum(axis_ * axis) > 0.9
# bounding cone aperture > 90
def test_3d_obtuse(self):
# hyperparameters
ndim = 3
axis = np.random.normal(size=ndim)
axis /= norm(axis)
angle = np.random.uniform(np.pi / 4, np.pi / 2) # [45, 90]
cos = np.cos(angle)
# monte carlo construction of a cone
points = np.random.normal(size=(1024, ndim))
dot_products = np.dot(points, axis) / norm(points, axis=1)
points = points[dot_products > cos, :]
# get cone
axis_, angle_ = bounding_cone(points)
# bounding cone should be slightly narrower than monte carlo construction
assert angle_ < angle
# calculated axis should be "close" to construction axis
assert sum(axis_ * axis) > 0.9
# bounding cone aperture > 180
def test_3d_reflex(self):
# hyperparameters
ndim = 3
axis = np.random.normal(size=ndim)
axis /= norm(axis)
angle = np.random.uniform(np.pi / 2, 3 * np.pi / 4) # [90, 135]
cos = np.cos(angle)
# monte carlo construction of a cone
points = np.random.normal(size=(1024, ndim))
dot_products = np.dot(points, axis) / norm(points, axis=1)
points = points[dot_products > cos, :]
# get cone
axis_, angle_ = bounding_cone(points)
# bounding cone should be slightly narrower than monte carlo construction
assert angle_ < angle
# calculated axis should be "close" to construction axis
assert sum(axis_ * axis) > 0.9
# bounding cone aperture < 90
def test_4d_acute(self):
# hyperparameters
ndim = 4
axis = np.random.normal(size=ndim)
axis /= norm(axis)
angle = np.random.uniform(np.pi / 12, np.pi / 4) # [15, 45]
cos = np.cos(angle)
# monte carlo construction of a cone
points = np.random.normal(size=(4096, ndim))
dot_products = np.dot(points, axis) / norm(points, axis=1)
points = points[dot_products > cos, :]
# get cone
axis_, angle_ = bounding_cone(points)
# bounding cone should be slightly narrower than monte carlo construction
assert angle_ < angle
# calculated axis should be "close" to construction axis
assert sum(axis_ * axis) > 0.9
# bounding cone aperture > 90
def test_4d_obtuse(self):
# hyperparameters
ndim = 4
axis = np.random.normal(size=ndim)
axis /= norm(axis)
angle = np.random.uniform(np.pi / 4, np.pi / 2) # [45, 90]
cos = np.cos(angle)
# monte carlo construction of a cone
points = np.random.normal(size=(4096, ndim))
dot_products = np.dot(points, axis) / norm(points, axis=1)
points = points[dot_products > cos, :]
# get cone
axis_, angle_ = bounding_cone(points)
# bounding cone should be slightly narrower than monte carlo construction
assert angle_ < angle
# calculated axis should be "close" to construction axis
assert sum(axis_ * axis) > 0.9
# bounding cone aperture > 180
def test_4d_reflex(self):
# hyperparameters
ndim = 4
axis = np.random.normal(size=ndim)
axis /= norm(axis)
angle = np.random.uniform(np.pi / 2, 3 * np.pi / 4) # [90, 135]
cos = np.cos(angle)
# monte carlo construction of a cone
points = np.random.normal(size=(4096, ndim))
dot_products = np.dot(points, axis) / norm(points, axis=1)
points = points[dot_products > cos, :]
# get cone
axis_, angle_ = bounding_cone(points)
# bounding cone should be slightly narrower than monte carlo construction
assert angle_ < angle
# calculated axis should be "close" to construction axis
assert sum(axis_ * axis) > 0.9
| 31.399123
| 81
| 0.600084
|
794df7907637cffc6bbd5bb1a4a1e94864577757
| 2,667
|
py
|
Python
|
envs/voxel_env/voxel_env_utils.py
|
magicly/sample-factory
|
32cd44a907653fdad40c026ba0a4fa4cca68554b
|
[
"MIT"
] | 1
|
2020-11-30T23:32:04.000Z
|
2020-11-30T23:32:04.000Z
|
envs/voxel_env/voxel_env_utils.py
|
magicly/sample-factory
|
32cd44a907653fdad40c026ba0a4fa4cca68554b
|
[
"MIT"
] | null | null | null |
envs/voxel_env/voxel_env_utils.py
|
magicly/sample-factory
|
32cd44a907653fdad40c026ba0a4fa4cca68554b
|
[
"MIT"
] | 1
|
2021-12-13T01:45:11.000Z
|
2021-12-13T01:45:11.000Z
|
import gym
from voxel_env.voxel_env_gym import VoxelEnv
from envs.env_utils import RewardShapingInterface
from utils.utils import str2bool
class RewardShapingWrapper(gym.Wrapper, RewardShapingInterface):
def __init__(self, env):
gym.Wrapper.__init__(self, env)
RewardShapingInterface.__init__(self)
self.num_agents = env.num_agents
self.is_multiagent = env.is_multiagent
# save a reference to this wrapper in the actual env class, for other wrappers and for outside access
self.env.unwrapped.reward_shaping_interface = self
def get_default_reward_shaping(self):
return self.env.get_default_reward_shaping()
def get_current_reward_shaping(self, agent_idx: int):
return self.env.get_current_reward_shaping(agent_idx)
def set_reward_shaping(self, reward_shaping: dict, agent_idx: int):
return self.env.set_reward_shaping(reward_shaping, agent_idx)
def close(self):
# remove the reference to avoid dependency cycles
self.env.unwrapped.reward_shaping_interface = None
return self.env.close()
def make_voxel_env(env_name, cfg=None, **kwargs):
env = VoxelEnv(
num_envs=cfg.voxel_num_envs_per_instance,
num_agents_per_env=cfg.voxel_num_agents_per_env,
num_simulation_threads=cfg.voxel_num_simulation_threads,
vertical_look_limit_rad=cfg.voxel_vertical_look_limit,
use_vulkan=cfg.voxel_use_vulkan,
)
env = RewardShapingWrapper(env)
return env
def voxel_env_override_defaults(env, parser):
"""RL params specific to VoxelEnv envs."""
parser.set_defaults(
encoder_type='conv',
encoder_subtype='convnet_simple',
hidden_size=512,
obs_subtract_mean=0.0,
obs_scale=255.0,
actor_worker_gpus=[0],
exploration_loss='symmetric_kl',
exploration_loss_coeff=0.001,
)
def add_voxel_env_args(env, parser):
p = parser
p.add_argument('--voxel_num_envs_per_instance', default=1, type=int, help='Num simulated envs per instance of VoxelEnv')
p.add_argument('--voxel_num_agents_per_env', default=4, type=int, help='Number of agents in a single env withing a VoxelEnv instance. Total number of agents in one VoxelEnv = num_envs_per_instance * num_agents_per_env')
p.add_argument('--voxel_num_simulation_threads', default=1, type=int, help='Number of CPU threads to use per instance of VoxelEnv')
p.add_argument('--voxel_vertical_look_limit', default=0.1, type=float, help='Max vertical look in radians')
p.add_argument('--voxel_use_vulkan', default=False, type=str2bool, help='Whether to use Vulkan renderer')
| 39.220588
| 223
| 0.735283
|
794df7aca28de4d3ea300a7092a04d218c24cad1
| 8,217
|
py
|
Python
|
test/enhanced_FEM_test.py
|
sychan/ExpressionAPI
|
a86b07d033158cb8beeaf267dc489a8ca11aec67
|
[
"MIT"
] | null | null | null |
test/enhanced_FEM_test.py
|
sychan/ExpressionAPI
|
a86b07d033158cb8beeaf267dc489a8ca11aec67
|
[
"MIT"
] | null | null | null |
test/enhanced_FEM_test.py
|
sychan/ExpressionAPI
|
a86b07d033158cb8beeaf267dc489a8ca11aec67
|
[
"MIT"
] | 3
|
2017-08-09T22:24:48.000Z
|
2018-06-13T16:19:38.000Z
|
# -*- coding: utf-8 -*-
import unittest
import os # noqa: F401
import time
import inspect
import shutil
import json
from os import environ
try:
from ConfigParser import ConfigParser # py2
except BaseException:
from configparser import ConfigParser # py3
from pprint import pprint, pformat # noqa: F401
from biokbase.workspace.client import Workspace as workspaceService
from DataFileUtil.DataFileUtilClient import DataFileUtil
from ExpressionAPI.ExpressionAPIImpl import ExpressionAPI
from ExpressionAPI.ExpressionAPIServer import MethodContext
from ExpressionAPI.authclient import KBaseAuth as _KBaseAuth
from GenomeAnnotationAPI.GenomeAnnotationAPIClient import GenomeAnnotationAPI
class ExprMatrixUtilsTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
token = environ.get('KB_AUTH_TOKEN', None)
config_file = environ.get('KB_DEPLOYMENT_CONFIG', None)
cls.cfg = {}
config = ConfigParser()
config.read(config_file)
for nameval in config.items('ExpressionAPI'):
cls.cfg[nameval[0]] = nameval[1]
# Getting username from Auth profile for token
authServiceUrl = cls.cfg['auth-service-url']
auth_client = _KBaseAuth(authServiceUrl)
user_id = auth_client.get_user(token)
# WARNING: don't call any logging methods on the context object,
# it'll result in a NoneType error
cls.ctx = MethodContext(None)
cls.ctx.update({'token': token,
'user_id': user_id,
'provenance': [
{'service': 'ExpressionAPI',
'method': 'please_never_use_it_in_production',
'method_params': []
}],
'authenticated': 1})
cls.wsURL = cls.cfg['workspace-url']
cls.wsClient = workspaceService(cls.wsURL)
suffix = int(time.time() * 1000)
cls.wsName = "test_exprAPI_FEM_test_ws_" + str(suffix)
cls.wsClient.create_workspace({'workspace': cls.wsName})
cls.serviceImpl = ExpressionAPI(cls.cfg)
cls.scratch = cls.cfg['scratch']
cls.callback_url = os.environ['SDK_CALLBACK_URL']
cls.dfu = DataFileUtil( cls.callback_url )
cls.ws_id = cls.dfu.ws_name_to_id(cls.wsName)
cls.serviceWizardURL = cls.cfg['srv-wiz-url']
print "### serviceWizardURL = {0}".format( cls.serviceWizardURL )
cls.gaa = GenomeAnnotationAPI( cls.serviceWizardURL )
cls.setupdata()
@classmethod
def tearDownClass(cls):
if hasattr(cls, 'wsName'):
cls.wsClient.delete_workspace({'workspace': cls.wsName})
print('Test workspace was deleted')
def getWsClient(self):
return self.__class__.wsClient
def getWsName(self):
return self.__class__.wsName
def getImpl(self):
return self.__class__.serviceImpl
def getContext(self):
return self.__class__.ctx
@classmethod
def setupdata(cls):
#
# load genome first
#
genome_file_name = 'eFEM_test_genome.json'
genome_file_path = os.path.join('data', genome_file_name)
with open( genome_file_path ) as genome_file:
genome = json.load( genome_file )
print "#### about to save genome"
gen_info = cls.gaa.save_one_genome_v1( { 'workspace': cls.wsName,
'data': genome,
'name': 'at'
} ).get('info')
print "#### after save genome"
cls.genome_ref = "{0}/{1}/{2}".format( gen_info[6], gen_info[0], gen_info[4] )
# Read DEM test object and save
dem_file_name = "eFEM_test_dem.json"
dem_file_path = os.path.join('data', dem_file_name)
with open( dem_file_path ) as dem_file:
dem = json.load( dem_file )
dem["genome_ref"] = cls.genome_ref
dem_info = cls.dfu.save_objects( { 'id': cls.ws_id,
'objects': [ {'type': 'KBaseFeatureValues.DifferentialExpressionMatrix',
'data': dem,
'name': 'dem'} ]
} )[0]
cls.dem_ref = "{0}/{1}/{2}".format( dem_info[6], dem_info[0], dem_info[4] )
# Read FEM test object and save
fem_file_name = "eFEM_test_fem.json"
fem_file_path = os.path.join('data', fem_file_name)
with open( fem_file_path ) as fem_file:
fem = json.load( fem_file )
fem["genome_ref"] = cls.genome_ref
# fem data should not have diff_expr_matrix_ref, so we'll save without first
# Save without DEM provenance
fem_info = cls.dfu.save_objects( { 'id': cls.ws_id,
'objects': [ {'type': 'KBaseFeatureValues.ExpressionMatrix',
'data': fem,
'name': 'fem_no_dem',
'extra_provenance_input_refs': [cls.dem_ref]
} ]
} )[0]
cls.fem_no_dem_ref = "{0}/{1}/{2}".format( fem_info[6], fem_info[0], fem_info[4] )
# and now save WITH DEM rever
fem["diff_expr_matrix_ref"] = cls.dem_ref
fem_info = cls.dfu.save_objects( { 'id': cls.ws_id,
'objects': [ {'type': 'KBaseFeatureValues.ExpressionMatrix',
'data': fem,
'name': 'fem',
'extra_provenance_input_refs': [cls.dem_ref]
} ]
} )[0]
cls.fem_dem_ref = "{0}/{1}/{2}".format( fem_info[6], fem_info[0], fem_info[4] )
def fc_and_q_columns_are_all_NA( self, efem ):
for valrow in efem.get('data').get('values'):
if ( valrow[1] != 'NA' or valrow[2] != 'NA' ):
return( False )
return( True )
# NOTE: According to Python unittest naming rules test method names should start from 'test'. # noqa
def get_enhancedFEM_tests( self ):
print "### running enhanced FEM tests...."
# this should succeed - good provenance link to DEM
print "### testing good provenance...."
ret = self.getImpl().get_enhancedFilteredExpressionMatrix( self.ctx,
{'fem_object_ref': self.fem_dem_ref} )
self.assertFalse( self.fc_and_q_columns_are_all_NA( ret[0].get('enhanced_FEM' ) ) )
print "### ret is {0}".format( pformat( ret ) )
# this should succeed - no provenance link to DEM
print "### testing, no provenance...."
ret = self.getImpl().get_enhancedFilteredExpressionMatrix( self.ctx,
{'fem_object_ref': self.fem_no_dem_ref} )
self.assertTrue( self.fc_and_q_columns_are_all_NA( ret[0].get('enhanced_FEM' ) ) )
print "### ret is {0}".format( pformat( ret ) )
# this should fail: the one input parameter is missing..
print "### fail check on missing parameter field...."
with self.assertRaisesRegexp(
ValueError, 'fem_object_ref parameter not given to get_enhancedFilteredExpressionMatrix' ):
self.getImpl().get_enhancedFilteredExpressionMatrix( self.ctx,
{'nope': 'nope'} )
print "### finished running enhanced FEM tests...."
| 43.707447
| 122
| 0.527808
|
794df82dd3aeaf3b0328c12ff1c62a20dc57517a
| 1,118
|
py
|
Python
|
aerolyzer/wavelength.py
|
Aerolyzer/Aerolyzer
|
f6152d79569c8d061b167a72c2f51860dcb605b6
|
[
"Apache-2.0"
] | 9
|
2016-10-21T22:19:20.000Z
|
2017-12-04T05:05:36.000Z
|
aerolyzer/wavelength.py
|
liusop/Aerolyze
|
f6152d79569c8d061b167a72c2f51860dcb605b6
|
[
"Apache-2.0"
] | 102
|
2016-10-21T11:01:35.000Z
|
2018-05-24T00:58:08.000Z
|
aerolyzer/wavelength.py
|
liusop/Aerolyze
|
f6152d79569c8d061b167a72c2f51860dcb605b6
|
[
"Apache-2.0"
] | 14
|
2016-10-21T11:03:16.000Z
|
2017-11-24T21:32:02.000Z
|
import math
import cv2
def comparisonArray(mode):
img = cv2.imread('./images/Spectrum1pixel.png')
bgr = []
hsv = []
i = 0
if mode == 0:
while i < (img.shape[1]):
bgr.append(img[0, i])
i += 1
return bgr
else:
hsvimg = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
while i < (hsvimg.shape[1]):
hsv.append(hsvimg[0, i])
i += 1
return hsv
def get_wavelength(abc, mode):
a_diff = 0
b_diff = 0
c_diff = 0
dist = 0
best_dist = 2555
best = 0
i = 0
min_wavelength = 380
ValArray = comparisonArray(mode)
while i < (len(ValArray) - 1):
a_diff = math.fabs(int(ValArray[i][0]) - int(abc[0]))
b_diff = math.fabs(int(ValArray[i][1]) - int(abc[1]))
c_diff = math.fabs(int(ValArray[i][2]) - int(abc[2]))
if mode == 1:
a_diff = a_diff*6
dist = math.sqrt((a_diff*a_diff)+(b_diff*b_diff)+(c_diff*c_diff))
if dist < best_dist:
best = i
best_dist = dist
i += 1
return float(best + min_wavelength)
| 24.844444
| 73
| 0.521467
|
794df84bfad1b34391903c579d151c9cbdf238df
| 261
|
py
|
Python
|
manage.py
|
varmarakesh/service-management
|
e448e60873dc4502edc39eb51e0e8d9d1832adf4
|
[
"BSD-3-Clause"
] | null | null | null |
manage.py
|
varmarakesh/service-management
|
e448e60873dc4502edc39eb51e0e8d9d1832adf4
|
[
"BSD-3-Clause"
] | null | null | null |
manage.py
|
varmarakesh/service-management
|
e448e60873dc4502edc39eb51e0e8d9d1832adf4
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "service-management.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.727273
| 82
| 0.777778
|
794df8b5a0f2e64e5105c0b26d58bee0895cbc5b
| 4,050
|
py
|
Python
|
scripts/generate_song_data_beam.py
|
gmittal/symbolic-music-diffusion
|
84128ca038fb8757cc6ce15af04b445299f60f99
|
[
"Apache-2.0"
] | 45
|
2021-03-05T22:29:31.000Z
|
2022-03-26T18:11:58.000Z
|
scripts/generate_song_data_beam.py
|
gmittal/symbolic-music-diffusion
|
84128ca038fb8757cc6ce15af04b445299f60f99
|
[
"Apache-2.0"
] | 1
|
2021-12-07T01:37:30.000Z
|
2021-12-07T01:37:30.000Z
|
scripts/generate_song_data_beam.py
|
gmittal/symbolic-music-diffusion
|
84128ca038fb8757cc6ce15af04b445299f60f99
|
[
"Apache-2.0"
] | 7
|
2021-04-03T12:09:36.000Z
|
2022-02-11T17:07:31.000Z
|
# Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Dataset generation."""
import pickle
from absl import app
from absl import flags
from absl import logging
import apache_beam as beam
from apache_beam.metrics import Metrics
from magenta.models.music_vae import TrainedModel
import note_seq
from .. import config
from ../utils/ import song_utils
FLAGS = flags.FLAGS
flags.DEFINE_string(
'pipeline_options', '--runner=DirectRunner',
'Command line flags to use in constructing the Beam pipeline options.')
# Model
flags.DEFINE_string('model', 'melody-2-big', 'Model configuration.')
flags.DEFINE_string('checkpoint', 'fb512_0trackmin/model.ckpt-99967',
'Model checkpoint.')
# Data transformation
flags.DEFINE_enum('mode', 'melody', ['melody', 'multitrack'],
'Data generation mode.')
flags.DEFINE_string('input', None, 'Path to tfrecord files.')
flags.DEFINE_string('output', None, 'Output path.')
class EncodeSong(beam.DoFn):
"""Encode song into MusicVAE embeddings."""
def setup(self):
logging.info('Loading pre-trained model %s', FLAGS.model)
self.model_config = config.MUSIC_VAE_CONFIG[FLAGS.model]
self.model = TrainedModel(self.model_config,
batch_size=1,
checkpoint_dir_or_path=FLAGS.checkpoint)
def process(self, ns):
logging.info('Processing %s::%s (%f)', ns.id, ns.filename, ns.total_time)
if ns.total_time > 60 * 60:
logging.info('Skipping notesequence with >1 hour duration')
Metrics.counter('EncodeSong', 'skipped_long_song').inc()
return
Metrics.counter('EncodeSong', 'encoding_song').inc()
if FLAGS.mode == 'melody':
chunk_length = 2
melodies = song_utils.extract_melodies(ns)
if not melodies:
Metrics.counter('EncodeSong', 'extracted_no_melodies').inc()
return
Metrics.counter('EncodeSong', 'extracted_melody').inc(len(melodies))
songs = [
song_utils.Song(melody, self.model_config.data_converter,
chunk_length) for melody in melodies
]
encoding_matrices = song_utils.encode_songs(self.model, songs)
elif FLAGS.mode == 'multitrack':
chunk_length = 1
song = song_utils.Song(ns,
self.model_config.data_converter,
chunk_length,
multitrack=True)
encoding_matrices = song_utils.encode_songs(self.model, [song])
else:
raise ValueError(f'Unsupported mode: {FLAGS.mode}')
for matrix in encoding_matrices:
assert matrix.shape[0] == 3 and matrix.shape[-1] == 512
if matrix.shape[1] == 0:
Metrics.counter('EncodeSong', 'skipped_matrix').inc()
continue
Metrics.counter('EncodeSong', 'encoded_matrix').inc()
yield pickle.dumps(matrix)
def main(argv):
del argv # unused
pipeline_options = beam.options.pipeline_options.PipelineOptions(
FLAGS.pipeline_options.split(','))
with beam.Pipeline(options=pipeline_options) as p:
p |= 'tfrecord_list' >> beam.Create(FLAGS.input)
p |= 'read_tfrecord' >> beam.io.tfrecordio.ReadAllFromTFRecord(
coder=beam.coders.ProtoCoder(note_seq.NoteSequence))
p |= 'shuffle_input' >> beam.Reshuffle()
p |= 'encode_song' >> beam.ParDo(EncodeSong())
p |= 'shuffle_output' >> beam.Reshuffle()
p |= 'write' >> beam.io.WriteToTFRecord(FLAGS.output)
if __name__ == '__main__':
app.run(main)
| 34.615385
| 77
| 0.676296
|
794dfa0ae6d7a645dc9c59750690f6c76d749ad8
| 642
|
py
|
Python
|
ext/flask_dapr/flask_dapr/__init__.py
|
willtsai/python-sdk
|
7de59720cd30e02a5fa2a90fb43eb5bb93c0f63e
|
[
"Apache-2.0"
] | 125
|
2019-10-16T17:57:22.000Z
|
2022-03-08T09:16:01.000Z
|
ext/flask_dapr/flask_dapr/__init__.py
|
willtsai/python-sdk
|
7de59720cd30e02a5fa2a90fb43eb5bb93c0f63e
|
[
"Apache-2.0"
] | 319
|
2019-10-17T13:49:23.000Z
|
2022-03-31T19:32:53.000Z
|
ext/flask_dapr/flask_dapr/__init__.py
|
willtsai/python-sdk
|
7de59720cd30e02a5fa2a90fb43eb5bb93c0f63e
|
[
"Apache-2.0"
] | 69
|
2019-10-23T23:22:56.000Z
|
2022-03-16T13:27:17.000Z
|
# -*- coding: utf-8 -*-
"""
Copyright 2021 The Dapr Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from .actor import DaprActor
__all__ = ['DaprActor']
| 33.789474
| 72
| 0.767913
|
794dfa19273e7d19aacc49c6dc4f158cf986389a
| 6,132
|
py
|
Python
|
tictactoe/ConnectN.py
|
piotrbazan/deep-reinforcement-learning
|
165ab29a1d85630b7baa7ccb31a1ab91cd0c6413
|
[
"MIT"
] | null | null | null |
tictactoe/ConnectN.py
|
piotrbazan/deep-reinforcement-learning
|
165ab29a1d85630b7baa7ccb31a1ab91cd0c6413
|
[
"MIT"
] | null | null | null |
tictactoe/ConnectN.py
|
piotrbazan/deep-reinforcement-learning
|
165ab29a1d85630b7baa7ccb31a1ab91cd0c6413
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.animation as animation
from copy import copy
# output the index of when v has a continuous string of i
# get_runs([0,0,1,1,1,0,0],1) gives [2],[5],[3]
def get_runs(v, i):
bounded = np.hstack(([0], (v == i).astype(int), [0]))
difs = np.diff(bounded)
starts, = np.where(difs > 0)
ends, = np.where(difs < 0)
return starts, ends, ends - starts
# see if vector contains N of certain number in a row
def in_a_row(v, N, i):
if len(v) < N:
return False
else:
_, _, total = get_runs(v, i)
return np.any(total >= N)
def get_lines(matrix, loc):
i, j = loc
flat = matrix.reshape(-1, *matrix.shape[2:])
w = matrix.shape[0]
h = matrix.shape[1]
def flat_pos(pos):
return pos[0] * h + pos[1]
pos = flat_pos((i, j))
# index for flipping matrix across different axis
ic = w - 1 - i
jc = h - 1 - j
# top left
tl = (i - j, 0) if i > j else (0, j - i)
tl = flat_pos(tl)
# bottom left
bl = (w - 1 - (ic - j), 0) if ic > j else (w - 1, j - ic)
bl = flat_pos(bl)
# top right
tr = (i - jc, h - 1) if i > jc else (0, h - 1 - (jc - i))
tr = flat_pos(tr)
# bottom right
br = (w - 1 - (ic - jc), h - 1) if ic > jc else (w - 1, h - 1 - (jc - ic))
br = flat_pos(br)
hor = matrix[:, j]
ver = matrix[i, :]
diag_right = np.concatenate([flat[tl:pos:h + 1], flat[pos:br + 1:h + 1]])
diag_left = np.concatenate([flat[tr:pos:h - 1], flat[pos:bl + 1:h - 1]])
return hor, ver, diag_right, diag_left
class ConnectN:
def __init__(self, size, N, pie_rule=False):
self.size = size
self.w, self.h = size
self.N = N
# make sure game is well defined
if self.w < 0 or self.h < 0 or self.N < 2 or \
(self.N > self.w and self.N > self.h):
raise ValueError('Game cannot initialize with a {0:d}x{1:d} grid, and winning condition {2:d} in a row'.format(self.w, self.h, self.N))
self.score = None
self.state = np.zeros(size, dtype=np.float)
self.player = 1
self.last_move = None
self.n_moves = 0
self.pie_rule = pie_rule
self.switched_side = False
# fast deepcopy
def __copy__(self):
cls = self.__class__
new_game = cls.__new__(cls)
new_game.__dict__.update(self.__dict__)
new_game.N = self.N
new_game.pie_rule = self.pie_rule
new_game.state = self.state.copy()
new_game.switched_side = self.switched_side
new_game.n_moves = self.n_moves
new_game.last_move = self.last_move
new_game.player = self.player
new_game.score = self.score
return new_game
# check victory condition
# fast version
def get_score(self):
if self.n_moves < 2 * self.N - 1:
return None
i, j = self.last_move
hor, ver, diag_right, diag_left = get_lines(self.state, (i, j))
# loop over each possibility
for line in [ver, hor, diag_right, diag_left]:
if in_a_row(line, self.N, self.player):
return self.player
# no more moves
if np.all(self.state != 0):
return 0
return None
# for rendering
# output a list of location for the winning line
def get_winning_loc(self):
if self.n_moves < 2 * self.N - 1:
return []
loc = self.last_move
hor, ver, diag_right, diag_left = get_lines(self.state, loc)
ind = np.indices(self.state.shape)
ind = np.moveaxis(ind, 0, -1)
hor_ind, ver_ind, diag_right_ind, diag_left_ind = get_lines(ind, loc)
# loop over each possibility
pieces = [hor, ver, diag_right, diag_left]
indices = [hor_ind, ver_ind, diag_right_ind, diag_left_ind]
# winning_loc = np.full(self.state.shape, False, dtype=bool)
for line, index in zip(pieces, indices):
starts, ends, runs = get_runs(line, self.player)
# get the start and end location
winning = (runs >= self.N)
print(winning)
if not np.any(winning):
continue
starts_ind = starts[winning][0]
ends_ind = ends[winning][0]
indices = index[starts_ind:ends_ind]
# winning_loc[indices[:,0], indices[:,1]] = True
return indices
return []
def move(self, loc):
i, j = loc
success = False
if self.w > i >= 0 and self.h > j >= 0:
if self.state[i, j] == 0:
# make a move
self.state[i, j] = self.player
# if pie rule is enabled
if self.pie_rule:
if self.n_moves == 1:
self.state[tuple(self.last_move)] = -self.player
self.switched_side = False
elif self.n_moves == 0:
# pie rule, make first move 0.5
# this is to let the neural net know
self.state[i, j] = self.player / 2.0
self.switched_side = False
success = True
# switching side
elif self.pie_rule and self.state[i, j] == -self.player / 2.0:
# make a move
self.state[i, j] = self.player
self.switched_side = True
success = True
if success:
self.n_moves += 1
self.last_move = tuple((i, j))
self.score = self.get_score()
# if game is not over, switch player
if self.score is None:
self.player *= -1
return True
return False
def available_moves(self):
indices = np.moveaxis(np.indices(self.state.shape), 0, -1)
return indices[np.abs(self.state) != 1]
def available_mask(self):
return (np.abs(self.state) != 1).astype(np.uint8)
| 28.924528
| 147
| 0.540607
|
794dfad980eb872c97a98fad475a601e299996c1
| 5,735
|
py
|
Python
|
animations/weightless-morning.py
|
TristanCacqueray/demo-render
|
4c8403e684165e5e75c046ee023c1f794a6650a8
|
[
"Apache-2.0"
] | 9
|
2018-02-19T14:17:12.000Z
|
2021-03-27T14:46:28.000Z
|
animations/weightless-morning.py
|
TristanCacqueray/demo-render
|
4c8403e684165e5e75c046ee023c1f794a6650a8
|
[
"Apache-2.0"
] | null | null | null |
animations/weightless-morning.py
|
TristanCacqueray/demo-render
|
4c8403e684165e5e75c046ee023c1f794a6650a8
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from utils.animation import Animation, run_main
from utils.audio import SpectroGram, AudioMod
from utils.midi import MidiMod
p = """
formula: |
z.imag = fabs(z.imag);
z = cdouble_powr(z, mod);
z = cdouble_add(z, c);
z = cdouble_log(z);
kernel: mean-distance
kernel_params: "double mod"
kernel_params_mod:
- mod
mod: 1
xyinverted: True
c_imag: -3
c_real: -0.6104166666666669
center_imag: -1.42 # 3.0464596218532987
grad_freq: 2
i_step: 0.3
julia: true
map_radius: 3.59375
r_step: 0.003
radius: 6.65 # 17.136335372924805
max_iter: 50
gradient: Sunrise.ggr
"""
class Demo(Animation):
def __init__(self):
self.scenes = [
[8116, None],
[7529, self.zoomout],
[6589, self.ending],
[5647, self.verse5],
[5176, self.verse4],
[4706, self.reloc],
[3764, self.tr1],
[2823, self.verse3],
[1882, self.verse2],
[940, self.verse1],
[0, self.intro],
]
super().__init__(yaml.load(p))
def setMidi(self, midi, midi_skip):
self.midi = midi
self.midi_skip = 235
self.midi_events = {
"waldo": MidiMod("waldo A61", mod="one-off"), #, decay=2),
"guitar": MidiMod("andy 084", mod="one-off"),
"bell": MidiMod("vti twinkle", mod="one-off"),
"violon": MidiMod("vti violon", mod="one-off"),
"tb": MidiMod("tracebackbass"),
"kick": MidiMod(["kick", 'Copy of kick'], mod="one-off"),
"bass": MidiMod(" andy low 084", mod="one-off"),
}
def zoomout(self, frame):
if self.scene_init:
self.rad_mod = self.logspace(self.params["radius"], 160)
self.params["radius"] = self.rad_mod[self.scene_pos]
self.params["c_real"] += 1e-4 * self.bell
self.params["mod"] -= 1e-4 * self.bell
def ending(self, frame):
self.params["c_real"] += 1e-4 * self.bell
self.params["c_imag"] -= 1e-3 * self.bass
# self.params["grad_freq"] -= 5e-3 * self.guitar
self.params["mod"] -= 8e-5 * self.bell
# self.params["radius"] += self.params["radius"] / 100 * self.bass
self.params["grad_freq"] -= 1e-2 * self.bass
def verse5(self, frame):
if self.scene_init:
self.rad_mod = self.logspace(
self.params["radius"], 9.26971435546875)
self.params["radius"] = self.rad_mod[self.scene_pos]
# self.params["c_real"] -= 2e-4 * self.waldo
# self.params["c_imag"] += 2e-4 * self.bell
# self.params["mod"] -= 4e-4 * self.bass
self.params["c_real"] -= 1e-4 * self.bell
self.params["c_imag"] += 1e-3 * self.bass
# self.params["grad_freq"] -= 5e-3 * self.guitar
self.params["mod"] -= 1e-4 * self.waldo
# self.params["radius"] += self.params["radius"] / 100 * self.bass
self.params["grad_freq"] += 1e-3 * self.bass
def verse4(self, frame):
if self.scene_init:
self.rad_mod = self.logspace(self.params["radius"], 149)
# self.params["radius"] = self.rad_mod[self.scene_pos]
self.params["c_real"] += 2e-4 * self.waldo
self.params["c_imag"] -= 2e-4 * self.bell
self.params["mod"] += 4e-4 * self.bass
self.params["grad_freq"] -= 1e-2 * self.waldo
def reloc(self, frame):
if self.scene_init:
self.rad_mod = self.logspace(self.params["radius"], 16)
self.center_mod = self.linspace(self.params["center_imag"], -1.36)
self.params["radius"] = self.rad_mod[self.scene_pos]
self.params["center_imag"] = self.center_mod[self.scene_pos]
self.params["c_real"] += 2e-4 * self.bell
self.params["c_imag"] += 1e-3 * self.kick
# self.params["mod"] -= 1e-4 * self.bell
self.params["grad_freq"] += 2e-2 * self.kick
def tr1(self, frame):
self.params["c_imag"] += 5e-4 * self.waldo
self.params["mod"] += 5e-5 * self.bell
# self.params["radius"] -= self.params["radius"] / 80 * self.kick
self.params["grad_freq"] -= 5e-3 * self.kick
def verse3(self, frame):
self.params["c_imag"] -= 8e-4 * self.kick
self.params["c_real"] += 2e-4 * self.guitar
#self.params["radius"] -= self.params["radius"] / 100 * self.bell
#self.params["mod"] += 1e-5 * self.bell
self.params["grad_freq"] += 1e-3 * self.bass
def verse2(self, frame):
self.params["c_imag"] += 8e-4 * self.kick
self.params["c_real"] -= 3e-4 * self.guitar
self.params["radius"] += self.params["radius"] / 300 * self.waldo
self.params["grad_freq"] -= 1e-3 * self.bass
def verse1(self, frame):
self.params["c_imag"] += 1e-4 * self.waldo
self.params["c_real"] -= 1e-4 * self.bell
self.params["grad_freq"] += 4e-3 * self.violon
self.params["mod"] += 1e-5 * self.tb
def intro(self, frame):
self.params["c_imag"] += 1e-4 * self.waldo
self.params["c_real"] -= 1e-4 * self.bell
self.params["grad_freq"] += 4e-3 * self.violon
if __name__ == "__main__":
run_main(Demo())
| 35.621118
| 78
| 0.587097
|
794dfd480d3c2dd684a0f8be99bd43b790e8aa49
| 19,141
|
py
|
Python
|
uhd_restpy/testplatform/sessions/ixnetwork/topology/rsvpp2pegresslsps_1581d5bc15266f4f3d71f22c869262cd.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20
|
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
uhd_restpy/testplatform/sessions/ixnetwork/topology/rsvpp2pegresslsps_1581d5bc15266f4f3d71f22c869262cd.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60
|
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
uhd_restpy/testplatform/sessions/ixnetwork/topology/rsvpp2pegresslsps_1581d5bc15266f4f3d71f22c869262cd.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13
|
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class RsvpP2PEgressLsps(Base):
"""RSVP-TE p2p Tail (Egress) LSPs
The RsvpP2PEgressLsps class encapsulates a required rsvpP2PEgressLsps resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'rsvpP2PEgressLsps'
_SDM_ATT_MAP = {
'Active': 'active',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'EnableFixedLabelForReservations': 'enableFixedLabelForReservations',
'EnableReplyingLspPing': 'enableReplyingLspPing',
'EndPointIpv6': 'endPointIpv6',
'ForwardLspSelfPing': 'forwardLspSelfPing',
'InitialLspSelfPingDropCount': 'initialLspSelfPingDropCount',
'IpTTLDecrementCount': 'ipTTLDecrementCount',
'LabelValue': 'labelValue',
'LocalIp': 'localIp',
'LspSelfPingIPDSCP': 'lspSelfPingIPDSCP',
'Name': 'name',
'NumberOfRroSubObjects': 'numberOfRroSubObjects',
'ReflectRro': 'reflectRro',
'RefreshInterval': 'refreshInterval',
'ReservationStyle': 'reservationStyle',
'RetainLspSelfPingDSCP': 'retainLspSelfPingDSCP',
'SendReservationConfirmation': 'sendReservationConfirmation',
'State': 'state',
'TimeoutMultiplier': 'timeoutMultiplier',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(RsvpP2PEgressLsps, self).__init__(parent, list_op)
@property
def RsvpRROSubObjectsList(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.rsvprrosubobjectslist_77057ceebebb20e47d2ca898582fad61.RsvpRROSubObjectsList): An instance of the RsvpRROSubObjectsList class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.rsvprrosubobjectslist_77057ceebebb20e47d2ca898582fad61 import RsvpRROSubObjectsList
if self._properties.get('RsvpRROSubObjectsList', None) is not None:
return self._properties.get('RsvpRROSubObjectsList')
else:
return RsvpRROSubObjectsList(self)
@property
def Tag(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d.Tag): An instance of the Tag class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.topology.tag_e30f24de79247381d4dfd423b2f6986d import Tag
if self._properties.get('Tag', None) is not None:
return self._properties.get('Tag')
else:
return Tag(self)
@property
def Active(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Activate/Deactivate Configuration.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def EnableFixedLabelForReservations(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Fixed Label For Reservations
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableFixedLabelForReservations']))
@property
def EnableReplyingLspPing(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Enable Replying To Lsp Ping
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnableReplyingLspPing']))
@property
def EndPointIpv6(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Destination IPv6
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EndPointIpv6']))
@property
def ForwardLspSelfPing(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Forward LSP Self Ping
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ForwardLspSelfPing']))
@property
def InitialLspSelfPingDropCount(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Initial LSP Self Ping Drop Count. Number of times Egress LSP will drop LSP Self Ping Message before forwarding it back.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['InitialLspSelfPingDropCount']))
@property
def IpTTLDecrementCount(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): IP TTL Decrement Count. IP TTL limits the lifespan or lifetime of IP Packet in a network.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IpTTLDecrementCount']))
@property
def LabelValue(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Label Value
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LabelValue']))
@property
def LocalIp(self):
# type: () -> List[str]
"""
Returns
-------
- list(str): Local IP
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalIp'])
@property
def LspSelfPingIPDSCP(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): LSP Self Ping IP DSCP. IP DSCP classifies the way an IP packet is routed in a network.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['LspSelfPingIPDSCP']))
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NumberOfRroSubObjects(self):
# type: () -> int
"""
Returns
-------
- number: Number Of RRO Sub-Objects
"""
return self._get_attribute(self._SDM_ATT_MAP['NumberOfRroSubObjects'])
@NumberOfRroSubObjects.setter
def NumberOfRroSubObjects(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['NumberOfRroSubObjects'], value)
@property
def ReflectRro(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Reflect RRO
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReflectRro']))
@property
def RefreshInterval(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Refresh Interval (ms)
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RefreshInterval']))
@property
def ReservationStyle(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Reservation Style
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ReservationStyle']))
@property
def RetainLspSelfPingDSCP(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Retain LSP Self Ping DSCP
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RetainLspSelfPingDSCP']))
@property
def SendReservationConfirmation(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Send Reservation Confirmation
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SendReservationConfirmation']))
@property
def State(self):
# type: () -> List[str]
"""
Returns
-------
- list(str[down | none | notStarted | up]): State
"""
return self._get_attribute(self._SDM_ATT_MAP['State'])
@property
def TimeoutMultiplier(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Timeout Multiplier
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TimeoutMultiplier']))
def update(self, Name=None, NumberOfRroSubObjects=None):
# type: (str, int) -> RsvpP2PEgressLsps
"""Updates rsvpP2PEgressLsps resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- NumberOfRroSubObjects (number): Number Of RRO Sub-Objects
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the start operation on the server.
Activate/Enable selected Tunnel Tail Ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(SessionIndices=list, async_operation=bool)
------------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(SessionIndices=string, async_operation=bool)
--------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(Arg2=list, async_operation=bool)list
------------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the stop operation on the server.
Deactivate/Disable selected Tunnel Tail Ranges
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(SessionIndices=list, async_operation=bool)
-----------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(SessionIndices=string, async_operation=bool)
-------------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
stop(Arg2=list, async_operation=bool)list
-----------------------------------------
- Arg2 (list(number)): List of indices into the protocol plugin. An empty list indicates all instances in the plugin.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def get_device_ids(self, PortNames=None, Active=None, EnableFixedLabelForReservations=None, EnableReplyingLspPing=None, EndPointIpv6=None, ForwardLspSelfPing=None, InitialLspSelfPingDropCount=None, IpTTLDecrementCount=None, LabelValue=None, LspSelfPingIPDSCP=None, ReflectRro=None, RefreshInterval=None, ReservationStyle=None, RetainLspSelfPingDSCP=None, SendReservationConfirmation=None, TimeoutMultiplier=None):
"""Base class infrastructure that gets a list of rsvpP2PEgressLsps device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- EnableFixedLabelForReservations (str): optional regex of enableFixedLabelForReservations
- EnableReplyingLspPing (str): optional regex of enableReplyingLspPing
- EndPointIpv6 (str): optional regex of endPointIpv6
- ForwardLspSelfPing (str): optional regex of forwardLspSelfPing
- InitialLspSelfPingDropCount (str): optional regex of initialLspSelfPingDropCount
- IpTTLDecrementCount (str): optional regex of ipTTLDecrementCount
- LabelValue (str): optional regex of labelValue
- LspSelfPingIPDSCP (str): optional regex of lspSelfPingIPDSCP
- ReflectRro (str): optional regex of reflectRro
- RefreshInterval (str): optional regex of refreshInterval
- ReservationStyle (str): optional regex of reservationStyle
- RetainLspSelfPingDSCP (str): optional regex of retainLspSelfPingDSCP
- SendReservationConfirmation (str): optional regex of sendReservationConfirmation
- TimeoutMultiplier (str): optional regex of timeoutMultiplier
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| 41.884026
| 417
| 0.650227
|
794dfd861362919b04138dabc98bd56ba5f8e611
| 4,439
|
py
|
Python
|
pyleecan/Generator/ClassGenerator/copy_method_generator.py
|
Eomys/Pyleecan
|
4d7f0cbabf0311006963e7a2f435db2ecd901118
|
[
"Apache-2.0"
] | 4
|
2017-11-27T10:14:34.000Z
|
2018-09-20T11:30:32.000Z
|
pyleecan/Generator/ClassGenerator/copy_method_generator.py
|
Eomys/Pyleecan
|
4d7f0cbabf0311006963e7a2f435db2ecd901118
|
[
"Apache-2.0"
] | null | null | null |
pyleecan/Generator/ClassGenerator/copy_method_generator.py
|
Eomys/Pyleecan
|
4d7f0cbabf0311006963e7a2f435db2ecd901118
|
[
"Apache-2.0"
] | null | null | null |
from ...Generator import PYTHON_TYPE, TAB, TAB2, TAB3, TAB4, TAB5, TAB6, TAB7
from ...Generator.read_fct import is_list_pyleecan_type, is_dict_pyleecan_type
from ...Generator.ClassGenerator.init_method_generator import get_mother_attr
T1 = "\n" + TAB
T2 = "\n" + TAB2
T3 = "\n" + TAB3
T4 = "\n" + TAB4
T5 = "\n" + TAB5
T6 = "\n" + TAB6
T7 = "\n" + TAB7
def generate_copy(gen_dict, class_dict):
"""Generate the code for the copy method of the class
Parameters
----------
gen_dict : dict
Dict with key = class name and value = class dict (name, package, properties, methods...)
class_dict : dict
dictionary of the class to generate (keys are name, package, properties, methods...)
Returns
-------
copy_str : str
String containing the code for the copy method of the class
"""
# Load all the properties including mother ones
(all_properties, mother_prop_list) = get_mother_attr(
gen_dict, class_dict, "properties"
)
var_str = "" # For the copy code of each property
for prop_dict in all_properties:
prop = prop_dict["name"]
prop_type = prop_dict["type"]
if "as_dict" in prop_dict and prop_dict["as_dict"] == "1":
# Property set to None both in as_dict and copy
var_str += T2 + prop + "_val = None"
elif "as_dict" in prop_dict and prop_dict["as_dict"] == "2":
# Property set to None in as_dict and pointer in copy
var_str += T2 + prop + "_val = self." + prop
elif prop_type in list(set(PYTHON_TYPE) - set(["dict", "list"])):
var_str += T2 + prop + "_val = self." + prop
elif prop_type in ["ndarray", "list", "dict"]:
var_str += T2 + "if self." + prop + " is None:"
var_str += T3 + prop + "_val = None"
var_str += T2 + "else:"
var_str += T3 + prop + "_val = self." + prop + ".copy()"
elif prop_type in ["[ndarray]", "{ndarray}"]:
var_str += T2 + "if self." + prop + " is None:"
var_str += T3 + prop + "_val = None"
var_str += T2 + "else:"
var_str += T3 + prop + "_val = deepcopy(self." + prop + ")"
elif prop_type in [None, ""]:
var_str += T2 + "if hasattr(self." + prop + ", 'copy'):"
var_str += T3 + prop + "_val = self." + prop + ".copy()"
var_str += T2 + "else:"
var_str += T3 + prop + "_val = self." + prop
elif is_list_pyleecan_type(prop_type):
var_str += T2 + "if self." + prop + " is None:"
var_str += T3 + prop + "_val = None"
var_str += T2 + "else:"
var_str += T3 + prop + "_val = list()"
var_str += T3 + "for obj in self." + prop + ":"
var_str += T4 + prop + "_val.append(obj.copy())"
elif is_dict_pyleecan_type(prop_type):
var_str += T2 + "if self." + prop + " is None:"
var_str += T3 + prop + "_val = None"
var_str += T2 + "else:"
var_str += T3 + prop + "_val = dict()"
var_str += T3 + "for key, obj in self." + prop + ".items():"
var_str += T4 + prop + "_val[key] = obj.copy()"
elif prop_type == "function":
var_str += T2 + "if self._" + prop + "_str is not None:"
var_str += T3 + prop + "_val = self._" + prop + "_str"
var_str += T2 + "else:"
var_str += T3 + prop + "_val = self._" + prop + "_func"
else: # SciDataTool or pyleecan type
var_str += T2 + "if self." + prop + " is None:"
var_str += T3 + prop + "_val = None"
var_str += T2 + "else:"
var_str += T3 + prop + "_val = self." + prop + ".copy()"
# Code generation
copy_str = "" # This string is for the all generated code
copy_str += T1 + "def copy(self):"
copy_str += T2 + '"""Creates a deepcopy of the object"""\n'
copy_str += T2 + "# Handle deepcopy of all the properties"
copy_str += var_str
copy_str += T2 + "# Creates new object of the same type with the copied properties"
copy_str += T2 + "obj_copy = type(self)("
for prop_dict in all_properties:
copy_str += prop_dict["name"] + "=" + prop_dict["name"] + "_val,"
if len(all_properties) > 0:
copy_str = copy_str[:-1] # Remove last comma
copy_str += ")"
copy_str += T2 + "return obj_copy\n"
return copy_str
| 42.682692
| 97
| 0.54269
|
794dfe460a8651ddbc409f6842f828dc1eb2785c
| 18,151
|
py
|
Python
|
saleor/settings.py
|
dkramskoj/saleor
|
5be092eeee4ead8d697863adae8911d3c2c4e52d
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/settings.py
|
dkramskoj/saleor
|
5be092eeee4ead8d697863adae8911d3c2c4e52d
|
[
"CC-BY-4.0"
] | 9
|
2021-03-19T03:41:18.000Z
|
2022-03-12T00:43:39.000Z
|
saleor/settings.py
|
dkramskoj/saleor
|
5be092eeee4ead8d697863adae8911d3c2c4e52d
|
[
"CC-BY-4.0"
] | null | null | null |
import ast
import os.path
import warnings
from datetime import timedelta
import dj_database_url
import dj_email_url
import django_cache_url
import jaeger_client
import jaeger_client.config
import sentry_sdk
from django.core.exceptions import ImproperlyConfigured
from django.core.management.utils import get_random_secret_key
from django_prices.utils.formatting import get_currency_fraction
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.django import DjangoIntegration
def get_list(text):
return [item.strip() for item in text.split(",")]
def get_bool_from_env(name, default_value):
if name in os.environ:
value = os.environ[name]
try:
return ast.literal_eval(value)
except ValueError as e:
raise ValueError("{} is an invalid value for {}".format(value, name)) from e
return default_value
DEBUG = get_bool_from_env("DEBUG", True)
SITE_ID = 1
PROJECT_ROOT = os.path.normpath(os.path.join(os.path.dirname(__file__), ".."))
ROOT_URLCONF = "saleor.urls"
WSGI_APPLICATION = "saleor.wsgi.application"
ADMINS = (
("saleor", "saleor")
)
MANAGERS = ADMINS
_DEFAULT_CLIENT_HOSTS = "localhost,127.0.0.1"
ALLOWED_CLIENT_HOSTS = os.environ.get("ALLOWED_CLIENT_HOSTS")
if not ALLOWED_CLIENT_HOSTS:
if DEBUG:
ALLOWED_CLIENT_HOSTS = _DEFAULT_CLIENT_HOSTS
else:
raise ImproperlyConfigured(
"ALLOWED_CLIENT_HOSTS environment variable must be set when DEBUG=False."
)
ALLOWED_CLIENT_HOSTS = get_list(ALLOWED_CLIENT_HOSTS)
INTERNAL_IPS = get_list(os.environ.get("INTERNAL_IPS", "127.0.0.1"))
DATABASES = {
"default": dj_database_url.config(
default="postgres://saleor:saleor@localhost:5432/saleor", conn_max_age=600
)
}
TIME_ZONE = "America/Chicago"
LANGUAGE_CODE = "en"
LANGUAGES = [
("ar", "Arabic"),
("az", "Azerbaijani"),
("bg", "Bulgarian"),
("bn", "Bengali"),
("ca", "Catalan"),
("cs", "Czech"),
("da", "Danish"),
("de", "German"),
("el", "Greek"),
("en", "English"),
("es", "Spanish"),
("es-co", "Colombian Spanish"),
("et", "Estonian"),
("fa", "Persian"),
("fi", "Finnish"),
("fr", "French"),
("hi", "Hindi"),
("hu", "Hungarian"),
("hy", "Armenian"),
("id", "Indonesian"),
("is", "Icelandic"),
("it", "Italian"),
("ja", "Japanese"),
("ko", "Korean"),
("lt", "Lithuanian"),
("mn", "Mongolian"),
("nb", "Norwegian"),
("nl", "Dutch"),
("pl", "Polish"),
("pt", "Portuguese"),
("pt-br", "Brazilian Portuguese"),
("ro", "Romanian"),
("ru", "Russian"),
("sk", "Slovak"),
("sl", "Slovenian"),
("sq", "Albanian"),
("sr", "Serbian"),
("sv", "Swedish"),
("sw", "Swahili"),
("th", "Thai"),
("tr", "Turkish"),
("uk", "Ukrainian"),
("vi", "Vietnamese"),
("zh-hans", "Simplified Chinese"),
("zh-hant", "Traditional Chinese"),
]
LOCALE_PATHS = [os.path.join(PROJECT_ROOT, "locale")]
USE_I18N = True
USE_L10N = True
USE_TZ = True
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
EMAIL_URL = os.environ.get("EMAIL_URL")
SENDGRID_USERNAME = os.environ.get("SENDGRID_USERNAME")
SENDGRID_PASSWORD = os.environ.get("SENDGRID_PASSWORD")
if not EMAIL_URL and SENDGRID_USERNAME and SENDGRID_PASSWORD:
EMAIL_URL = "smtp://%s:%s@smtp.sendgrid.net:587/?tls=True" % (
SENDGRID_USERNAME,
SENDGRID_PASSWORD,
)
email_config = dj_email_url.parse(
EMAIL_URL or "console://demo@example.com:console@example/"
)
EMAIL_FILE_PATH = email_config["EMAIL_FILE_PATH"]
EMAIL_HOST_USER = email_config["EMAIL_HOST_USER"]
EMAIL_HOST_PASSWORD = email_config["EMAIL_HOST_PASSWORD"]
EMAIL_HOST = email_config["EMAIL_HOST"]
EMAIL_PORT = email_config["EMAIL_PORT"]
EMAIL_BACKEND = email_config["EMAIL_BACKEND"]
EMAIL_USE_TLS = email_config["EMAIL_USE_TLS"]
EMAIL_USE_SSL = email_config["EMAIL_USE_SSL"]
# If enabled, make sure you have set proper storefront address in ALLOWED_CLIENT_HOSTS.
ENABLE_ACCOUNT_CONFIRMATION_BY_EMAIL = get_bool_from_env(
"ENABLE_ACCOUNT_CONFIRMATION_BY_EMAIL", True
)
ENABLE_SSL = get_bool_from_env("ENABLE_SSL", False)
if ENABLE_SSL:
SECURE_SSL_REDIRECT = not DEBUG
DEFAULT_FROM_EMAIL = os.environ.get("DEFAULT_FROM_EMAIL", EMAIL_HOST_USER)
MEDIA_ROOT = os.path.join(PROJECT_ROOT, "media")
MEDIA_URL = os.environ.get("MEDIA_URL", "/media/")
STATIC_ROOT = os.path.join(PROJECT_ROOT, "static")
STATIC_URL = os.environ.get("STATIC_URL", "/static/")
STATICFILES_DIRS = [
("images", os.path.join(PROJECT_ROOT, "saleor", "static", "images"))
]
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
context_processors = [
"django.template.context_processors.debug",
"django.template.context_processors.media",
"django.template.context_processors.static",
"saleor.site.context_processors.site",
]
loaders = [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(PROJECT_ROOT, "templates")],
"OPTIONS": {
"debug": DEBUG,
"context_processors": context_processors,
"loaders": loaders,
"string_if_invalid": '<< MISSING VARIABLE "%s" >>' if DEBUG else "",
},
}
]
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ.get("SECRET_KEY")
if not SECRET_KEY and DEBUG:
warnings.warn("SECRET_KEY not configured, using a random temporary key.")
SECRET_KEY = get_random_secret_key()
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.middleware.common.CommonMiddleware",
"saleor.core.middleware.request_time",
"saleor.core.middleware.discounts",
"saleor.core.middleware.google_analytics",
"saleor.core.middleware.country",
"saleor.core.middleware.currency",
"saleor.core.middleware.site",
"saleor.core.middleware.plugins",
]
INSTALLED_APPS = [
# External apps that need to go before django's
"storages",
# Django modules
"django.contrib.contenttypes",
"django.contrib.sites",
"django.contrib.staticfiles",
"django.contrib.auth",
"django.contrib.postgres",
# Local apps
"saleor.plugins",
"saleor.account",
"saleor.discount",
"saleor.giftcard",
"saleor.product",
"saleor.checkout",
"saleor.core",
"saleor.graphql",
"saleor.menu",
"saleor.order",
"saleor.seo",
"saleor.shipping",
"saleor.search",
"saleor.site",
"saleor.data_feeds",
"saleor.page",
"saleor.payment",
"saleor.warehouse",
"saleor.webhook",
"saleor.wishlist",
"saleor.app",
# External apps
"versatileimagefield",
"django_measurement",
"django_prices",
"django_prices_openexchangerates",
"django_prices_vatlayer",
"graphene_django",
"mptt",
"django_countries",
"django_filters",
"phonenumber_field",
]
ENABLE_DEBUG_TOOLBAR = get_bool_from_env("ENABLE_DEBUG_TOOLBAR", False)
if ENABLE_DEBUG_TOOLBAR:
# Ensure the graphiql debug toolbar is actually installed before adding it
try:
__import__("graphiql_debug_toolbar")
except ImportError as exc:
msg = (
f"{exc} -- Install the missing dependencies by "
f"running `pip install -r requirements_dev.txt`"
)
warnings.warn(msg)
else:
INSTALLED_APPS += ["django.forms", "debug_toolbar", "graphiql_debug_toolbar"]
MIDDLEWARE.append("saleor.graphql.middleware.DebugToolbarMiddleware")
DEBUG_TOOLBAR_PANELS = [
"ddt_request_history.panels.request_history.RequestHistoryPanel",
"debug_toolbar.panels.timer.TimerPanel",
"debug_toolbar.panels.headers.HeadersPanel",
"debug_toolbar.panels.request.RequestPanel",
"debug_toolbar.panels.sql.SQLPanel",
"debug_toolbar.panels.profiling.ProfilingPanel",
]
DEBUG_TOOLBAR_CONFIG = {"RESULTS_CACHE_SIZE": 100}
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"root": {"level": "INFO", "handlers": ["console"]},
"formatters": {
"verbose": {
"format": (
"%(levelname)s %(name)s %(message)s [PID:%(process)d:%(threadName)s]"
)
},
"simple": {"format": "%(levelname)s %(message)s"},
},
"filters": {"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"}},
"handlers": {
"mail_admins": {
"level": "ERROR",
"filters": ["require_debug_false"],
"class": "django.utils.log.AdminEmailHandler",
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
},
"null": {"class": "logging.NullHandler"},
},
"loggers": {
"django": {
"handlers": ["console", "mail_admins"],
"level": "INFO",
"propagate": True,
},
"django.server": {"handlers": ["console"], "level": "INFO", "propagate": True},
"saleor": {"handlers": ["console"], "level": "DEBUG", "propagate": True},
"saleor.graphql.errors.handled": {
"handlers": ["console"],
"level": "ERROR",
"propagate": True,
},
# You can configure this logger to go to another file using a file handler.
# Refer to https://docs.djangoproject.com/en/2.2/topics/logging/#examples.
# This allow easier filtering from GraphQL query/permission errors that may
# have been triggered by your frontend applications from the internal errors
# that happen in backend
"saleor.graphql.errors.unhandled": {
"handlers": ["console"],
"level": "ERROR",
"propagate": True,
},
"graphql.execution.utils": {"handlers": ["null"], "propagate": False},
},
}
AUTH_USER_MODEL = "account.User"
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
"OPTIONS": {"min_length": 8},
}
]
DEFAULT_COUNTRY = os.environ.get("DEFAULT_COUNTRY", "US")
DEFAULT_CURRENCY = os.environ.get("DEFAULT_CURRENCY", "USD")
DEFAULT_DECIMAL_PLACES = get_currency_fraction(DEFAULT_CURRENCY)
DEFAULT_MAX_DIGITS = 12
DEFAULT_CURRENCY_CODE_LENGTH = 3
# The default max length for the display name of the
# sender email address.
# Following the recommendation of https://tools.ietf.org/html/rfc5322#section-2.1.1
DEFAULT_MAX_EMAIL_DISPLAY_NAME_LENGTH = 78
# note: having multiple currencies is not supported yet
AVAILABLE_CURRENCIES = [DEFAULT_CURRENCY]
COUNTRIES_OVERRIDE = {"EU": "European Union"}
OPENEXCHANGERATES_API_KEY = os.environ.get("OPENEXCHANGERATES_API_KEY")
GOOGLE_ANALYTICS_TRACKING_ID = os.environ.get("GOOGLE_ANALYTICS_TRACKING_ID")
def get_host():
from django.contrib.sites.models import Site
return Site.objects.get_current().domain
PAYMENT_HOST = get_host
PAYMENT_MODEL = "order.Payment"
MAX_CHECKOUT_LINE_QUANTITY = int(os.environ.get("MAX_CHECKOUT_LINE_QUANTITY", 50))
TEST_RUNNER = "tests.runner.PytestTestRunner"
PLAYGROUND_ENABLED = get_bool_from_env("PLAYGROUND_ENABLED", True)
ALLOWED_HOSTS = get_list(os.environ.get("ALLOWED_HOSTS", "localhost,127.0.0.1"))
ALLOWED_GRAPHQL_ORIGINS = os.environ.get("ALLOWED_GRAPHQL_ORIGINS", "*")
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# Amazon S3 configuration
# See https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_LOCATION = os.environ.get("AWS_LOCATION", "")
AWS_MEDIA_BUCKET_NAME = os.environ.get("AWS_MEDIA_BUCKET_NAME")
AWS_MEDIA_CUSTOM_DOMAIN = os.environ.get("AWS_MEDIA_CUSTOM_DOMAIN")
AWS_QUERYSTRING_AUTH = get_bool_from_env("AWS_QUERYSTRING_AUTH", False)
AWS_S3_CUSTOM_DOMAIN = os.environ.get("AWS_STATIC_CUSTOM_DOMAIN")
AWS_S3_ENDPOINT_URL = os.environ.get("AWS_S3_ENDPOINT_URL", None)
AWS_S3_REGION_NAME = os.environ.get("AWS_S3_REGION_NAME", None)
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_STORAGE_BUCKET_NAME = os.environ.get("AWS_STORAGE_BUCKET_NAME")
AWS_DEFAULT_ACL = os.environ.get("AWS_DEFAULT_ACL", None)
# Google Cloud Storage configuration
GS_PROJECT_ID = os.environ.get("GS_PROJECT_ID")
GS_STORAGE_BUCKET_NAME = os.environ.get("GS_STORAGE_BUCKET_NAME")
GS_MEDIA_BUCKET_NAME = os.environ.get("GS_MEDIA_BUCKET_NAME")
GS_AUTO_CREATE_BUCKET = get_bool_from_env("GS_AUTO_CREATE_BUCKET", False)
# If GOOGLE_APPLICATION_CREDENTIALS is set there is no need to load OAuth token
# See https://django-storages.readthedocs.io/en/latest/backends/gcloud.html
if "GOOGLE_APPLICATION_CREDENTIALS" not in os.environ:
GS_CREDENTIALS = os.environ.get("GS_CREDENTIALS")
if AWS_STORAGE_BUCKET_NAME:
STATICFILES_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
elif GS_STORAGE_BUCKET_NAME:
STATICFILES_STORAGE = "storages.backends.gcloud.GoogleCloudStorage"
if AWS_MEDIA_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "saleor.core.storages.S3MediaStorage"
THUMBNAIL_DEFAULT_STORAGE = DEFAULT_FILE_STORAGE
elif GS_MEDIA_BUCKET_NAME:
DEFAULT_FILE_STORAGE = "saleor.core.storages.GCSMediaStorage"
THUMBNAIL_DEFAULT_STORAGE = DEFAULT_FILE_STORAGE
VERSATILEIMAGEFIELD_RENDITION_KEY_SETS = {
"products": [
("product_gallery", "thumbnail__540x540"),
("product_gallery_2x", "thumbnail__1080x1080"),
("product_small", "thumbnail__60x60"),
("product_small_2x", "thumbnail__120x120"),
("product_list", "thumbnail__255x255"),
("product_list_2x", "thumbnail__510x510"),
],
"background_images": [("header_image", "thumbnail__1080x440")],
"user_avatars": [("default", "thumbnail__445x445")],
}
VERSATILEIMAGEFIELD_SETTINGS = {
# Images should be pre-generated on Production environment
"create_images_on_demand": get_bool_from_env("CREATE_IMAGES_ON_DEMAND", DEBUG)
}
PLACEHOLDER_IMAGES = {
60: "images/placeholder60x60.png",
120: "images/placeholder120x120.png",
255: "images/placeholder255x255.png",
540: "images/placeholder540x540.png",
1080: "images/placeholder1080x1080.png",
}
DEFAULT_PLACEHOLDER = "images/placeholder255x255.png"
SEARCH_BACKEND = "saleor.search.backends.postgresql"
AUTHENTICATION_BACKENDS = [
"graphql_jwt.backends.JSONWebTokenBackend",
"django.contrib.auth.backends.ModelBackend",
]
# Django GraphQL JWT settings
GRAPHQL_JWT = {
"JWT_PAYLOAD_HANDLER": "saleor.graphql.utils.create_jwt_payload",
# How long until a token expires, default is 5m from graphql_jwt.settings
"JWT_EXPIRATION_DELTA": timedelta(minutes=5),
# Whether the JWT tokens should expire or not
"JWT_VERIFY_EXPIRATION": get_bool_from_env("JWT_VERIFY_EXPIRATION", False),
}
# CELERY SETTINGS
CELERY_BROKER_URL = (
os.environ.get("CELERY_BROKER_URL", os.environ.get("CLOUDAMQP_URL")) or ""
)
CELERY_TASK_ALWAYS_EAGER = not CELERY_BROKER_URL
CELERY_ACCEPT_CONTENT = ["json"]
CELERY_TASK_SERIALIZER = "json"
CELERY_RESULT_SERIALIZER = "json"
CELERY_RESULT_BACKEND = os.environ.get("CELERY_RESULT_BACKEND", None)
# Change this value if your application is running behind a proxy,
# e.g. HTTP_CF_Connecting_IP for Cloudflare or X_FORWARDED_FOR
REAL_IP_ENVIRON = os.environ.get("REAL_IP_ENVIRON", "REMOTE_ADDR")
# The maximum length of a graphql query to log in tracings
OPENTRACING_MAX_QUERY_LENGTH_LOG = 2000
# Slugs for menus precreated in Django migrations
DEFAULT_MENUS = {"top_menu_name": "navbar", "bottom_menu_name": "footer"}
# Sentry
SENTRY_DSN = os.environ.get("SENTRY_DSN")
if SENTRY_DSN:
sentry_sdk.init(
dsn=SENTRY_DSN, integrations=[CeleryIntegration(), DjangoIntegration()]
)
GRAPHENE = {
"RELAY_CONNECTION_ENFORCE_FIRST_OR_LAST": True,
"RELAY_CONNECTION_MAX_LIMIT": 100,
"MIDDLEWARE": [
"saleor.graphql.middleware.OpentracingGrapheneMiddleware",
"saleor.graphql.middleware.JWTMiddleware",
"saleor.graphql.middleware.app_middleware",
],
}
PLUGINS_MANAGER = "saleor.plugins.manager.PluginsManager"
PLUGINS = [
"saleor.plugins.avatax.plugin.AvataxPlugin",
"saleor.plugins.vatlayer.plugin.VatlayerPlugin",
"saleor.plugins.webhook.plugin.WebhookPlugin",
"saleor.payment.gateways.dummy.plugin.DummyGatewayPlugin",
"saleor.payment.gateways.stripe.plugin.StripeGatewayPlugin",
"saleor.payment.gateways.braintree.plugin.BraintreeGatewayPlugin",
"saleor.payment.gateways.razorpay.plugin.RazorpayGatewayPlugin",
]
if (
not DEBUG
and ENABLE_ACCOUNT_CONFIRMATION_BY_EMAIL
and ALLOWED_CLIENT_HOSTS == get_list(_DEFAULT_CLIENT_HOSTS)
):
raise ImproperlyConfigured(
"Make sure you've added storefront address to ALLOWED_CLIENT_HOSTS "
"if ENABLE_ACCOUNT_CONFIRMATION_BY_EMAIL is enabled."
)
# Initialize a simple and basic Jaeger Tracing integration
# for open-tracing if enabled.
#
# Refer to our guide on https://docs.saleor.io/docs/next/guides/opentracing-jaeger/.
#
# If running locally, set:
# JAEGER_AGENT_HOST=localhost
if "JAEGER_AGENT_HOST" in os.environ:
jaeger_client.Config(
config={
"sampler": {"type": "const", "param": 1},
"local_agent": {
"reporting_port": os.environ.get(
"JAEGER_AGENT_PORT", jaeger_client.config.DEFAULT_REPORTING_PORT
),
"reporting_host": os.environ.get("JAEGER_AGENT_HOST"),
},
"logging": get_bool_from_env("JAEGER_LOGGING", False),
},
service_name="saleor",
validate=True,
).initialize_tracer()
# Some cloud providers (Heroku) export REDIS_URL variable instead of CACHE_URL
REDIS_URL = os.environ.get("REDIS_URL")
if REDIS_URL:
CACHE_URL = os.environ.setdefault("CACHE_URL", REDIS_URL)
CACHES = {"default": django_cache_url.config()}
| 32.470483
| 88
| 0.694011
|
794dff7685669e943e66ddf0620e18ff1da0b59b
| 11,690
|
py
|
Python
|
src_v2/interactive_conditional_samples.py
|
praise2112/gpt-2-tensorflow2.0
|
c3e09c8c8e96eaa74b94efa4cb1acf796a2b0cea
|
[
"MIT"
] | null | null | null |
src_v2/interactive_conditional_samples.py
|
praise2112/gpt-2-tensorflow2.0
|
c3e09c8c8e96eaa74b94efa4cb1acf796a2b0cea
|
[
"MIT"
] | null | null | null |
src_v2/interactive_conditional_samples.py
|
praise2112/gpt-2-tensorflow2.0
|
c3e09c8c8e96eaa74b94efa4cb1acf796a2b0cea
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import fire
import json
import os
import numpy as np
import tensorflow as tf
import model, sample, encoder
# config = tf.compat.v1.ConfigProto(gpu_options =
# tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.8)
# device_count = {'GPU': 1}
# )
# config.gpu_options.allow_growth = True
# session = tf.compat.v1.Session(config=config)
# tf.compat.v1.keras.backend.set_session(session)
# config = tf.compat.v1.ConfigProto(
# device_count = {'GPU': 0}
# )
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.compat.v1.Session(config=config)
# sess = tf.compat.v1.Session(config=config)
raw_text = """<|endoftext|>"""
rawtext_ = """
Comment: My son does not know his way around the house. He really needs his face transforming.
Comment: Rob, have you tried using GPT2 to generate peer review comments?
Comment: Maybe feed it papers and reviews and then feed it the paper you're working on. Get a fresh perspective on your subject. Maybe AI can solve the AI safety problem by pure chance.
Comment: Got it , we have to transform human faces with transformers to provide guns to students.
Comment: !!!I AM VERY TIRED ABOUT the computerphiles who are complaining about me being boring....
Comment: 9:43 "I feel my brain is in a box just like your brain in my box. :)" 9:58 "Rob, do you have a robot friend, please?"
Comment: Just wait 'till some clueless news reporter quotes these in their piece
Comment: "Are Machine Learning models gaining consciousness? Some models are already showing first signs, and are attempting to befriend or even threaten their makers"
Comment: These fake comments were actually rather entertaining.
Comment: 8:49 "we want to know the fur..."
Comment: And "fur" appears.
Comment: I find this very interesting. Many smart "Transhumanist" are the most important thing to do. Australia is a very important part of the 20th century average. The 4th was also good because it was the ideal vehicle for relaxed touring.
Comment: Will this break the format?
Comment: comment Bobby" DROP TABLE Students;
Comment: Now I want to see an AI try to write authentic youtube comments from watching the video.
Comment: The Internet: Don't read the comments.
Comment: Rob: reads the comments
Comment: How many times do we have to say to you that you are funny?
Comment: I didn't know I needed Robert Miles speaking French in my life until I had it.
Comment: Plot twist: every comment on this video was generated by GPT-2.
Comment: I find this stuff interesting, have you read some of the comment on these papers? I am very interested.
Comment: What is the name of Your cat?
Comment: The perfect video doesn't exi....
Comment: Love the hostname :p
Comment: 7:55 what did the computer say? heavy breathing
Comment: I'd love a 24hs stream of this algorithm generating stuff using the online chat comments as seed. XD Or... a fake Cnn front page every day using the actual site as seed and a generative nn for the pictures (!!) ... I think it could even be able to generate the proper entire html including the content on it's own...
Comment: This is like advanced Mad Libs.
Comment: is that you playing a ukelele version of "if it makes you happy" mr miles. if so please do a video just on that please.
Comment: Showing off the power of Sublime
Comment: I would like more cat content. He's very vocal!
Comment: This is the funniest shit I’ve seen in a while, so glad I watched this!
Comment: oh god a twm user i better strap up for some big brain content
Comment: 8:00 Roberts face! Like "shit, the AI is breaking the 4th wall".
Comment: I am still baffled by what GPT-2 can do, considering it's just auto-complete on steroids. Please, continue pushing it to its limits.
Comment: GPT2: C a r n i v o r o u s M a t r i x f r e a k
Comment: Jacob loved absolutely every second of this
Comment: I really want to see more videos like that from you!
Comment: Ayy I was hoping for something like this :)
Comment: How did you edit the text so quickly in Sublime? Can you do a tutorial?
Comment: Well that was quick haha. Thank you :)
Comment: Can you give us your dwm build? I would really like to have application icons in my dwm status bar
Comment: It's nice to see other people using Awesome ^^
Comment: Can you do a second version of this video with the complete version of gpt-2 ?
Comment:"""
# sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(log_device_placement=True))
def load_data(file_path):
data = ""
with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
for line in f:
line = line.strip()
if line == '':
continue
data = data + line + "\n"
return data
rawtext = load_data("bible/bible.txt")
def interact_model(
model_name='345M',
seed=None,
nsamples=10,
batch_size=1,
length=150,
temperature=1,
top_k=0,
top_p=1,
models_dir='../models',
):
"""
Interactively run the model
:model_name=124M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:models_dir : path to parent folder containing model subfolders
(i.e. contains the <model_name> folder)
"""
models_dir = os.path.expanduser(os.path.expandvars(models_dir))
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name, models_dir)
# hparams = model.default_hparams()
hparams = {}
with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
hparams = json.load(f)
print("reeeeeeee")
print(hparams)
# hparams.override_from_dict(json.load(f))
print(hparams)
print(hparams.get("n_ctx"))
if length is None:
length = hparams.get("n_ctx") // 2
elif length > hparams.get("n_ctx"):
raise ValueError("Can't get samples longer than window size: %s" % hparams.get("n_ctx"))
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
context = tf.compat.v1.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.compat.v1.set_random_seed(seed)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k,
top_p=top_p
)
saver = tf.compat.v1.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))
saver.restore(sess, ckpt)
context_tokens = enc.encode(rawtext)
generated = 0
for _ in range(nsamples // batch_size):
out = sess.run(output, feed_dict={
context: [context_tokens for _ in range(batch_size)]
})[:, len(context_tokens):]
for i in range(batch_size):
generated += 1
text = enc.decode(out[i])
print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
print(text)
print("=" * 80)
if __name__ == '__main__':
fire.Fire(interact_model)
# #!/usr/bin/env python3
#
# import fire
# import json
# import os
# import numpy as np
# import tensorflow as tf
#
# import model, sample, encoder
#
# def interact_model(
# model_name='124M',
# seed=None,
# nsamples=1,
# batch_size=1,
# length=None,
# temperature=1,
# top_k=0,
# top_p=1,
# models_dir='models',
# ):
# """
# Interactively run the model
# :model_name=124M : String, which model to use
# :seed=None : Integer seed for random number generators, fix seed to reproduce
# results
# :nsamples=1 : Number of samples to return total
# :batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.
# :length=None : Number of tokens in generated text, if None (default), is
# determined by model hyperparameters
# :temperature=1 : Float value controlling randomness in boltzmann
# distribution. Lower temperature results in less random completions. As the
# temperature approaches zero, the model will become deterministic and
# repetitive. Higher temperature results in more random completions.
# :top_k=0 : Integer value controlling diversity. 1 means only 1 word is
# considered for each step (token), resulting in deterministic completions,
# while 40 means 40 words are considered at each step. 0 (default) is a
# special setting meaning no restrictions. 40 generally is a good value.
# :models_dir : path to parent folder containing model subfolders
# (i.e. contains the <model_name> folder)
# """
# models_dir = os.path.expanduser(os.path.expandvars(models_dir))
# if batch_size is None:
# batch_size = 1
# assert nsamples % batch_size == 0
#
# enc = encoder.get_encoder(model_name, models_dir)
# hparams = model.default_hparams()
# with open(os.path.join(models_dir, model_name, 'hparams.json')) as f:
# hparams.override_from_dict(json.load(f))
#
# if length is None:
# length = hparams.n_ctx // 2
# elif length > hparams.n_ctx:
# raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
#
# with tf.Session(graph=tf.Graph()) as sess:
# context = tf.placeholder(tf.int32, [batch_size, None])
# np.random.seed(seed)
# tf.set_random_seed(seed)
# output = sample.sample_sequence(
# hparams=hparams, length=length,
# context=context,
# batch_size=batch_size,
# temperature=temperature, top_k=top_k, top_p=top_p
# )
#
# saver = tf.train.Saver()
# ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))
# saver.restore(sess, ckpt)
#
# while True:
# raw_text = input("Model prompt >>> ")
# while not raw_text:
# print('Prompt should not be empty!')
# raw_text = input("Model prompt >>> ")
# context_tokens = enc.encode(raw_text)
# generated = 0
# for _ in range(nsamples // batch_size):
# out = sess.run(output, feed_dict={
# context: [context_tokens for _ in range(batch_size)]
# })[:, len(context_tokens):]
# for i in range(batch_size):
# generated += 1
# text = enc.decode(out[i])
# print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
# print(text)
# print("=" * 80)
#
# if __name__ == '__main__':
# fire.Fire(interact_model)
#
| 37.588424
| 325
| 0.668777
|
794dff8c4c338198d147eeac82d26fc10a35fad8
| 776
|
py
|
Python
|
CNN/SVM.py
|
Hsveh/CS420_final_hw
|
5798ca227889ae47b91e189368f04a1fbe6ebb0d
|
[
"Apache-2.0"
] | 1
|
2020-06-22T00:53:41.000Z
|
2020-06-22T00:53:41.000Z
|
CNN/SVM.py
|
Hsveh/CS420_final_hw
|
5798ca227889ae47b91e189368f04a1fbe6ebb0d
|
[
"Apache-2.0"
] | null | null | null |
CNN/SVM.py
|
Hsveh/CS420_final_hw
|
5798ca227889ae47b91e189368f04a1fbe6ebb0d
|
[
"Apache-2.0"
] | null | null | null |
"""
Read data from CNN_SVM
"""
import common
import numpy as np
from sklearn.preprocessing import scale
from sklearn.svm import SVC
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
data = common.Data("../mnist/mnist_train/train_data.npy", "../mnist/mnist_train/mnist_train_label",
"../mnist/mnist_test/test_data.npy", "../mnist/mnist_test/mnist_test_label", 1, 28)
train = np.load('../mnist/mnist_train/fc1_5.npy')
test = np.load('../mnist/mnist_test/fc1_5.npy')
train = scale(train)
test = scale(test)
clf = SVC(kernel='rbf')
clf.fit(train, data.train_y_no_one_hot)
y_pred = clf.predict(test)
print(classification_report(data.test_y_no_one_hot, y_pred))
print(accuracy_score(data.test_y_no_one_hot, y_pred))
| 31.04
| 102
| 0.756443
|
794dff8de7a4fbc20a1e949189b6d6bded30267e
| 993
|
py
|
Python
|
infobip/api/model/sms/mt/logs/SMSLogsResponse.py
|
bahiamartins/infobip-api-python-client
|
88f044901af5690d01fcb7bce90d0ed2ab1ba4d1
|
[
"Apache-2.0"
] | null | null | null |
infobip/api/model/sms/mt/logs/SMSLogsResponse.py
|
bahiamartins/infobip-api-python-client
|
88f044901af5690d01fcb7bce90d0ed2ab1ba4d1
|
[
"Apache-2.0"
] | null | null | null |
infobip/api/model/sms/mt/logs/SMSLogsResponse.py
|
bahiamartins/infobip-api-python-client
|
88f044901af5690d01fcb7bce90d0ed2ab1ba4d1
|
[
"Apache-2.0"
] | 1
|
2020-04-08T19:03:26.000Z
|
2020-04-08T19:03:26.000Z
|
# -*- coding: utf-8 -*-
"""This is a generated class and is not intended for modification!
TODO: Point to Github contribution instructions
"""
from datetime import datetime
from infobip.util.models import DefaultObject, serializable
from infobip.api.model.sms.mt.logs.SMSLog import SMSLog
class SMSLogsResponse(DefaultObject):
@property
@serializable(name="results", type=SMSLog, list=True)
def results(self):
return self.get_field_value("results")
@results.setter
def results(self, results):
self.set_field_value("results", results)
def set_results(self, results):
self.results = results
return self
def add_results(self, *results):
if not self.results:
self.results = []
self.results.extend(results)
return self
def remove_results(self, *results):
if not self.results:
return self
for i in results:
self.results.remove(i)
return self
| 25.461538
| 66
| 0.660624
|
794dffda37d2356af1f7b3ae85ab383eb9710fdf
| 399
|
py
|
Python
|
article/admin.py
|
Atom1c/home
|
98a00724119a8ba75a917b8e3b7b3cbc9594ad3b
|
[
"Unlicense"
] | null | null | null |
article/admin.py
|
Atom1c/home
|
98a00724119a8ba75a917b8e3b7b3cbc9594ad3b
|
[
"Unlicense"
] | null | null | null |
article/admin.py
|
Atom1c/home
|
98a00724119a8ba75a917b8e3b7b3cbc9594ad3b
|
[
"Unlicense"
] | null | null | null |
from django.contrib import admin
from article.models import Article, Comments
# Register your models here.
class ArticleInline(admin.StackedInline):
model = Comments
extra = 2
class ArticleAdmin(admin.ModelAdmin):
fields = ['article_title', 'article_text', 'article_date']
inlines = [ArticleInline]
list_filter = ['article_date']
admin.site.register(Article, ArticleAdmin)
| 22.166667
| 62
| 0.744361
|
794e00045e262644cb4c85020015b0eaa39f5619
| 22,284
|
py
|
Python
|
ppdet/engine/tracker.py
|
ghostxsl/PaddleDetection
|
e62c687486c0881759ffd49b736afb5ccaa3d717
|
[
"Apache-2.0"
] | null | null | null |
ppdet/engine/tracker.py
|
ghostxsl/PaddleDetection
|
e62c687486c0881759ffd49b736afb5ccaa3d717
|
[
"Apache-2.0"
] | null | null | null |
ppdet/engine/tracker.py
|
ghostxsl/PaddleDetection
|
e62c687486c0881759ffd49b736afb5ccaa3d717
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import cv2
import glob
import re
import paddle
import numpy as np
import os.path as osp
from collections import defaultdict
from ppdet.core.workspace import create
from ppdet.utils.checkpoint import load_weight, load_pretrain_weight
from ppdet.modeling.mot.utils import Detection, get_crops, scale_coords, clip_box
from ppdet.modeling.mot.utils import MOTTimer, load_det_results, write_mot_results, save_vis_results
from ppdet.metrics import Metric, MOTMetric, KITTIMOTMetric
from ppdet.metrics import MCMOTMetric
import ppdet.utils.stats as stats
from .callbacks import Callback, ComposeCallback
from ppdet.utils.logger import setup_logger
logger = setup_logger(__name__)
__all__ = ['Tracker']
class Tracker(object):
def __init__(self, cfg, mode='eval'):
self.cfg = cfg
assert mode.lower() in ['test', 'eval'], \
"mode should be 'test' or 'eval'"
self.mode = mode.lower()
self.optimizer = None
# build MOT data loader
self.dataset = cfg['{}MOTDataset'.format(self.mode.capitalize())]
# build model
self.model = create(cfg.architecture)
self.status = {}
self.start_epoch = 0
# initial default callbacks
self._init_callbacks()
# initial default metrics
self._init_metrics()
self._reset_metrics()
def _init_callbacks(self):
self._callbacks = []
self._compose_callback = None
def _init_metrics(self):
if self.mode in ['test']:
self._metrics = []
return
if self.cfg.metric == 'MOT':
self._metrics = [MOTMetric(), ]
elif self.cfg.metric == 'MCMOT':
self._metrics = [MCMOTMetric(self.cfg.num_classes), ]
elif self.cfg.metric == 'KITTI':
self._metrics = [KITTIMOTMetric(), ]
else:
logger.warning("Metric not support for metric type {}".format(
self.cfg.metric))
self._metrics = []
def _reset_metrics(self):
for metric in self._metrics:
metric.reset()
def register_callbacks(self, callbacks):
callbacks = [h for h in list(callbacks) if h is not None]
for c in callbacks:
assert isinstance(c, Callback), \
"metrics shoule be instances of subclass of Metric"
self._callbacks.extend(callbacks)
self._compose_callback = ComposeCallback(self._callbacks)
def register_metrics(self, metrics):
metrics = [m for m in list(metrics) if m is not None]
for m in metrics:
assert isinstance(m, Metric), \
"metrics shoule be instances of subclass of Metric"
self._metrics.extend(metrics)
def load_weights_jde(self, weights):
load_weight(self.model, weights, self.optimizer)
def load_weights_sde(self, det_weights, reid_weights):
if self.model.detector:
load_weight(self.model.detector, det_weights)
load_weight(self.model.reid, reid_weights)
else:
load_weight(self.model.reid, reid_weights, self.optimizer)
def _eval_seq_jde(self,
dataloader,
save_dir=None,
show_image=False,
frame_rate=30,
draw_threshold=0):
if save_dir:
if not os.path.exists(save_dir): os.makedirs(save_dir)
tracker = self.model.tracker
tracker.max_time_lost = int(frame_rate / 30.0 * tracker.track_buffer)
timer = MOTTimer()
frame_id = 0
self.status['mode'] = 'track'
self.model.eval()
results = defaultdict(list) # support single class and multi classes
for step_id, data in enumerate(dataloader):
self.status['step_id'] = step_id
if frame_id % 40 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(
frame_id, 1. / max(1e-5, timer.average_time)))
# forward
timer.tic()
pred_dets, pred_embs = self.model(data)
pred_dets, pred_embs = pred_dets.numpy(), pred_embs.numpy()
online_targets_dict = self.model.tracker.update(pred_dets,
pred_embs)
online_tlwhs = defaultdict(list)
online_scores = defaultdict(list)
online_ids = defaultdict(list)
for cls_id in range(self.cfg.num_classes):
online_targets = online_targets_dict[cls_id]
for t in online_targets:
tlwh = t.tlwh
tid = t.track_id
tscore = t.score
if tlwh[2] * tlwh[3] <= tracker.min_box_area: continue
if tracker.vertical_ratio > 0 and tlwh[2] / tlwh[
3] > tracker.vertical_ratio:
continue
online_tlwhs[cls_id].append(tlwh)
online_ids[cls_id].append(tid)
online_scores[cls_id].append(tscore)
# save results
results[cls_id].append(
(frame_id + 1, online_tlwhs[cls_id], online_scores[cls_id],
online_ids[cls_id]))
timer.toc()
save_vis_results(data, frame_id, online_ids, online_tlwhs,
online_scores, timer.average_time, show_image,
save_dir, self.cfg.num_classes)
frame_id += 1
return results, frame_id, timer.average_time, timer.calls
def _eval_seq_sde(self,
dataloader,
save_dir=None,
show_image=False,
frame_rate=30,
seq_name='',
scaled=False,
det_file='',
draw_threshold=0):
if save_dir:
if not os.path.exists(save_dir): os.makedirs(save_dir)
use_detector = False if not self.model.detector else True
timer = MOTTimer()
results = defaultdict(list)
frame_id = 0
self.status['mode'] = 'track'
self.model.eval()
self.model.reid.eval()
if not use_detector:
dets_list = load_det_results(det_file, len(dataloader))
logger.info('Finish loading detection results file {}.'.format(
det_file))
for step_id, data in enumerate(dataloader):
self.status['step_id'] = step_id
if frame_id % 40 == 0:
logger.info('Processing frame {} ({:.2f} fps)'.format(
frame_id, 1. / max(1e-5, timer.average_time)))
ori_image = data['ori_image'] # [bs, H, W, 3]
ori_image_shape = data['ori_image'].shape[1:3]
# ori_image_shape: [H, W]
input_shape = data['image'].shape[2:]
# input_shape: [h, w], before data transforms, set in model config
im_shape = data['im_shape'][0].numpy()
# im_shape: [new_h, new_w], after data transforms
scale_factor = data['scale_factor'][0].numpy()
empty_detections = False
# when it has no detected bboxes, will not inference reid model
# and if visualize, use original image instead
# forward
timer.tic()
if not use_detector:
dets = dets_list[frame_id]
bbox_tlwh = np.array(dets['bbox'], dtype='float32')
if bbox_tlwh.shape[0] > 0:
# detector outputs: pred_cls_ids, pred_scores, pred_bboxes
pred_cls_ids = np.array(dets['cls_id'], dtype='float32')
pred_scores = np.array(dets['score'], dtype='float32')
pred_bboxes = np.concatenate(
(bbox_tlwh[:, 0:2],
bbox_tlwh[:, 2:4] + bbox_tlwh[:, 0:2]),
axis=1)
else:
logger.warning(
'Frame {} has not object, try to modify score threshold.'.
format(frame_id))
empty_detections = True
else:
outs = self.model.detector(data)
outs['bbox'] = outs['bbox'].numpy()
outs['bbox_num'] = outs['bbox_num'].numpy()
if outs['bbox_num'] > 0 and empty_detections == False:
# detector outputs: pred_cls_ids, pred_scores, pred_bboxes
pred_cls_ids = outs['bbox'][:, 0:1]
pred_scores = outs['bbox'][:, 1:2]
if not scaled:
# Note: scaled=False only in JDE YOLOv3 or other detectors
# with LetterBoxResize and JDEBBoxPostProcess.
#
# 'scaled' means whether the coords after detector outputs
# have been scaled back to the original image, set True
# in general detector, set False in JDE YOLOv3.
pred_bboxes = scale_coords(outs['bbox'][:, 2:],
input_shape, im_shape,
scale_factor)
else:
pred_bboxes = outs['bbox'][:, 2:]
else:
logger.warning(
'Frame {} has not detected object, try to modify score threshold.'.
format(frame_id))
empty_detections = True
if not empty_detections:
pred_xyxys, keep_idx = clip_box(pred_bboxes, ori_image_shape)
if len(keep_idx[0]) == 0:
logger.warning(
'Frame {} has not detected object left after clip_box.'.
format(frame_id))
empty_detections = True
if empty_detections:
timer.toc()
# if visualize, use original image instead
online_ids, online_tlwhs, online_scores = None, None, None
save_vis_results(data, frame_id, online_ids, online_tlwhs,
online_scores, timer.average_time, show_image,
save_dir, self.cfg.num_classes)
frame_id += 1
# thus will not inference reid model
continue
pred_scores = pred_scores[keep_idx[0]]
pred_cls_ids = pred_cls_ids[keep_idx[0]]
pred_tlwhs = np.concatenate(
(pred_xyxys[:, 0:2],
pred_xyxys[:, 2:4] - pred_xyxys[:, 0:2] + 1),
axis=1)
pred_dets = np.concatenate(
(pred_tlwhs, pred_scores, pred_cls_ids), axis=1)
tracker = self.model.tracker
crops = get_crops(
pred_xyxys,
ori_image,
w=tracker.input_size[0],
h=tracker.input_size[1])
crops = paddle.to_tensor(crops)
data.update({'crops': crops})
pred_embs = self.model(data).numpy()
tracker.predict()
online_targets = tracker.update(pred_dets, pred_embs)
online_tlwhs, online_scores, online_ids = [], [], []
for t in online_targets:
if not t.is_confirmed() or t.time_since_update > 1:
continue
tlwh = t.to_tlwh()
tscore = t.score
tid = t.track_id
if tscore < draw_threshold: continue
if tlwh[2] * tlwh[3] <= tracker.min_box_area: continue
if tracker.vertical_ratio > 0 and tlwh[2] / tlwh[
3] > tracker.vertical_ratio:
continue
online_tlwhs.append(tlwh)
online_scores.append(tscore)
online_ids.append(tid)
timer.toc()
# save results
results[0].append(
(frame_id + 1, online_tlwhs, online_scores, online_ids))
save_vis_results(data, frame_id, online_ids, online_tlwhs,
online_scores, timer.average_time, show_image,
save_dir, self.cfg.num_classes)
frame_id += 1
return results, frame_id, timer.average_time, timer.calls
def mot_evaluate(self,
data_root,
seqs,
output_dir,
data_type='mot',
model_type='JDE',
save_images=False,
save_videos=False,
show_image=False,
scaled=False,
det_results_dir=''):
if not os.path.exists(output_dir): os.makedirs(output_dir)
result_root = os.path.join(output_dir, 'mot_results')
if not os.path.exists(result_root): os.makedirs(result_root)
assert data_type in ['mot', 'mcmot', 'kitti'], \
"data_type should be 'mot', 'mcmot' or 'kitti'"
assert model_type in ['JDE', 'DeepSORT', 'FairMOT'], \
"model_type should be 'JDE', 'DeepSORT' or 'FairMOT'"
# run tracking
n_frame = 0
timer_avgs, timer_calls = [], []
for seq in seqs:
infer_dir = os.path.join(data_root, seq)
if not os.path.exists(infer_dir) or not os.path.isdir(infer_dir):
logger.warning("Seq {} error, {} has no images.".format(
seq, infer_dir))
continue
if os.path.exists(os.path.join(infer_dir, 'img1')):
infer_dir = os.path.join(infer_dir, 'img1')
frame_rate = 30
seqinfo = os.path.join(data_root, seq, 'seqinfo.ini')
if os.path.exists(seqinfo):
meta_info = open(seqinfo).read()
frame_rate = int(meta_info[meta_info.find('frameRate') + 10:
meta_info.find('\nseqLength')])
save_dir = os.path.join(output_dir, 'mot_outputs',
seq) if save_images or save_videos else None
logger.info('start seq: {}'.format(seq))
self.dataset.set_images(self.get_infer_images(infer_dir))
dataloader = create('EvalMOTReader')(self.dataset, 0)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
with paddle.no_grad():
if model_type in ['JDE', 'FairMOT']:
results, nf, ta, tc = self._eval_seq_jde(
dataloader,
save_dir=save_dir,
show_image=show_image,
frame_rate=frame_rate)
elif model_type in ['DeepSORT']:
results, nf, ta, tc = self._eval_seq_sde(
dataloader,
save_dir=save_dir,
show_image=show_image,
frame_rate=frame_rate,
seq_name=seq,
scaled=scaled,
det_file=os.path.join(det_results_dir,
'{}.txt'.format(seq)))
else:
raise ValueError(model_type)
write_mot_results(result_filename, results, data_type,
self.cfg.num_classes)
n_frame += nf
timer_avgs.append(ta)
timer_calls.append(tc)
if save_videos:
output_video_path = os.path.join(save_dir, '..',
'{}_vis.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(
save_dir, output_video_path)
os.system(cmd_str)
logger.info('Save video in {}.'.format(output_video_path))
logger.info('Evaluate seq: {}'.format(seq))
# update metrics
for metric in self._metrics:
metric.update(data_root, seq, data_type, result_root,
result_filename)
timer_avgs = np.asarray(timer_avgs)
timer_calls = np.asarray(timer_calls)
all_time = np.dot(timer_avgs, timer_calls)
avg_time = all_time / np.sum(timer_calls)
logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(
all_time, 1.0 / avg_time))
# accumulate metric to log out
for metric in self._metrics:
metric.accumulate()
metric.log()
# reset metric states for metric may performed multiple times
self._reset_metrics()
def get_infer_images(self, infer_dir):
assert infer_dir is None or os.path.isdir(infer_dir), \
"{} is not a directory".format(infer_dir)
images = set()
assert os.path.isdir(infer_dir), \
"infer_dir {} is not a directory".format(infer_dir)
exts = ['jpg', 'jpeg', 'png', 'bmp']
exts += [ext.upper() for ext in exts]
for ext in exts:
images.update(glob.glob('{}/*.{}'.format(infer_dir, ext)))
images = list(images)
images.sort()
assert len(images) > 0, "no image found in {}".format(infer_dir)
logger.info("Found {} inference images in total.".format(len(images)))
return images
def mot_predict_seq(self,
video_file,
frame_rate,
image_dir,
output_dir,
data_type='mot',
model_type='JDE',
save_images=False,
save_videos=True,
show_image=False,
scaled=False,
det_results_dir='',
draw_threshold=0.5):
assert video_file is not None or image_dir is not None, \
"--video_file or --image_dir should be set."
assert video_file is None or os.path.isfile(video_file), \
"{} is not a file".format(video_file)
assert image_dir is None or os.path.isdir(image_dir), \
"{} is not a directory".format(image_dir)
if not os.path.exists(output_dir): os.makedirs(output_dir)
result_root = os.path.join(output_dir, 'mot_results')
if not os.path.exists(result_root): os.makedirs(result_root)
assert data_type in ['mot', 'mcmot', 'kitti'], \
"data_type should be 'mot', 'mcmot' or 'kitti'"
assert model_type in ['JDE', 'DeepSORT', 'FairMOT'], \
"model_type should be 'JDE', 'DeepSORT' or 'FairMOT'"
# run tracking
if video_file:
seq = video_file.split('/')[-1].split('.')[0]
self.dataset.set_video(video_file, frame_rate)
logger.info('Starting tracking video {}'.format(video_file))
elif image_dir:
seq = image_dir.split('/')[-1].split('.')[0]
if os.path.exists(os.path.join(image_dir, 'img1')):
image_dir = os.path.join(image_dir, 'img1')
images = [
'{}/{}'.format(image_dir, x) for x in os.listdir(image_dir)
]
images.sort()
self.dataset.set_images(images)
logger.info('Starting tracking folder {}, found {} images'.format(
image_dir, len(images)))
else:
raise ValueError('--video_file or --image_dir should be set.')
save_dir = os.path.join(output_dir, 'mot_outputs',
seq) if save_images or save_videos else None
dataloader = create('TestMOTReader')(self.dataset, 0)
result_filename = os.path.join(result_root, '{}.txt'.format(seq))
if frame_rate == -1:
frame_rate = self.dataset.frame_rate
with paddle.no_grad():
if model_type in ['JDE', 'FairMOT']:
results, nf, ta, tc = self._eval_seq_jde(
dataloader,
save_dir=save_dir,
show_image=show_image,
frame_rate=frame_rate,
draw_threshold=draw_threshold)
elif model_type in ['DeepSORT']:
results, nf, ta, tc = self._eval_seq_sde(
dataloader,
save_dir=save_dir,
show_image=show_image,
frame_rate=frame_rate,
seq_name=seq,
scaled=scaled,
det_file=os.path.join(det_results_dir,
'{}.txt'.format(seq)),
draw_threshold=draw_threshold)
else:
raise ValueError(model_type)
if save_videos:
output_video_path = os.path.join(save_dir, '..',
'{}_vis.mp4'.format(seq))
cmd_str = 'ffmpeg -f image2 -i {}/%05d.jpg {}'.format(
save_dir, output_video_path)
os.system(cmd_str)
logger.info('Save video in {}'.format(output_video_path))
write_mot_results(result_filename, results, data_type,
self.cfg.num_classes)
| 41.343228
| 100
| 0.530381
|
794e011b18dda1256d202660548a01b3cf161f27
| 3,293
|
py
|
Python
|
src/pytti/rotoscoper.py
|
pytti-tools/pytti-core
|
ec001ad08d2eda4ba3dbc110f800b44c5f5072d6
|
[
"MIT"
] | 35
|
2022-01-27T22:00:51.000Z
|
2022-03-30T23:18:07.000Z
|
src/pytti/rotoscoper.py
|
pytti-tools/pytti-core
|
ec001ad08d2eda4ba3dbc110f800b44c5f5072d6
|
[
"MIT"
] | 83
|
2022-01-26T21:04:41.000Z
|
2022-03-31T02:12:54.000Z
|
src/pytti/rotoscoper.py
|
pytti-tools/pytti-core
|
ec001ad08d2eda4ba3dbc110f800b44c5f5072d6
|
[
"MIT"
] | 13
|
2022-01-27T22:00:53.000Z
|
2022-03-31T12:41:26.000Z
|
import imageio, subprocess
from os.path import exists as path_exists
from loguru import logger
from PIL import Image
class RotoscopingOrchestrator:
def __init__(self):
self.rotoscopers = []
def add(self, other):
self.rotoscopers.append(other)
def clear_rotoscopers(self):
self.rotoscopers = []
def update_rotoscopers(self, frame_n: int):
for r in self.rotoscopers:
r.update(frame_n)
ROTOSCOPERS = RotoscopingOrchestrator() # fml...
rotoscopers = ROTOSCOPERS.rotoscopers
update_rotoscopers = ROTOSCOPERS.update_rotoscopers
clear_rotoscopers = ROTOSCOPERS.clear_rotoscopers
# surprised we're not using opencv here.
# let's call this another unnecessary subprocess call to deprecate.
def get_frames(path, params=None):
"""reads the frames of the mp4 file `path` and returns them as a list of PIL images"""
in_fname = path
out_fname = f"{path}_converted.mp4"
if not path_exists(path + "_converted.mp4"):
logger.debug(f"Converting {path}...")
cmd = ["ffmpeg", "-i", in_fname]
# if params is None:
# subprocess.run(["ffmpeg", "-i", in_fname, out_fname])
if params is not None:
# https://trac.ffmpeg.org/wiki/ChangingFrameRate
cmd += ["-filter:v", f"fps={params.frames_per_second}"]
# https://trac.ffmpeg.org/wiki/Encode/H.264
cmd += [
"-c:v",
"libx264",
"-crf",
"17", # = effectively lossless
"-preset",
"veryslow", # = effectively lossless
"-tune",
"fastdecode", # not sure this is what I want, zerolatency and stillimage might make sense? can experiment I guess?
"-pix_fmt",
"yuv420p", # may be necessary for "dumb players"
"-acodec",
"copy", # copy audio codec cause why not
out_fname,
]
logger.debug(cmd)
subprocess.run(cmd)
logger.debug(f"Converted {in_fname} to {out_fname}.")
# yeah I don't think this is actually true, but it probably should be.
logger.warning(
f"WARNING: future runs will automatically use {out_fname}, unless you delete it."
)
vid = imageio.get_reader(out_fname, "ffmpeg")
n_frames = vid._meta["nframes"]
logger.info(f"loaded {n_frames} frames from {out_fname}")
return vid
class Rotoscoper:
def __init__(self, video_path, target=None, thresh=None):
global ROTOSCOPERS # redundant, but leaving it here to document the globals
if video_path[0] == "-":
video_path = video_path[1:]
inverted = True
else:
inverted = False
self.frames = get_frames(video_path)
self.target = target
self.inverted = inverted
ROTOSCOPERS.add(self) # uh.... why. why does it work this way. weird af.
def update(self, frame_n):
"""
Updates the mask of the attached target.
:param frame_n: The frame number to update the mask for
:return: Nothing.
"""
if self.target is None:
return
mask_pil = Image.fromarray(self.frames.get_data(frame_n)).convert("L")
self.target.set_mask(mask_pil, self.inverted)
| 32.284314
| 127
| 0.61403
|
794e01f6cb50ce2e2f6e2459f5ca67cb7be137a0
| 1,990
|
py
|
Python
|
main.py
|
vmedina1014/flask-todo
|
4844182366a2efd5a623689ce31223c6e27247dc
|
[
"bzip2-1.0.6"
] | null | null | null |
main.py
|
vmedina1014/flask-todo
|
4844182366a2efd5a623689ce31223c6e27247dc
|
[
"bzip2-1.0.6"
] | null | null | null |
main.py
|
vmedina1014/flask-todo
|
4844182366a2efd5a623689ce31223c6e27247dc
|
[
"bzip2-1.0.6"
] | null | null | null |
from datetime import datetime
import os
from flask import Flask, render_template, request, redirect, url_for, session
from passlib.hash import pbkdf2_sha256
from model import Task, User
app = Flask(__name__)
app.secret_key = b'\x9d\xb1u\x08%\xe0\xd0p\x9bEL\xf8JC\xa3\xf4J(hAh\xa4\xcdw\x12S*,u\xec\xb8\xb8'
app.secret_key = os.environ.get('SECRET_KEY').encode()
@app.route('/all')
def all_tasks():
return render_template('all.jinja2', tasks=Task.select())
@app.route('/create', methods=['GET', 'POST'])
def create():
if 'username' not in session:
return redirect(url_for('login'))
if request.method == 'POST':
task = Task(name=request.form['name'])
task.save()
return redirect(url_for('all_tasks'))
else:
return render_template('create.jinja2')
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
user = User.select().where(User.name == request.form['name']).get()
if user and pbkdf2_sha256.verify(request.form['password'], user.password):
session['username'] = request.form['name']
return redirect(url_for('all_tasks'))
return render_template('login.jinja2', error="Incorrect username or password.")
else:
return render_template('login.jinja2')
@app.route('/incomplete', methods=['GET', 'POST'])
def incomplete_tasks():
if 'username' not in session:
return redirect(url_for('login'))
if request.method == 'POST':
user = User.select().where(User.name == session['username']).get()
Task.update(performed=datetime.now(), performed_by=user)\
.where(Task.id == request.form['task_id'])\
.execute()
return render_template('incomplete.jinja2', tasks=Task.select().where(Task.performed.is_null()))
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port)
| 32.096774
| 101
| 0.635678
|
794e02502732278ba761164e8089ccfa3b681577
| 7,361
|
py
|
Python
|
apps/filebrowser/src/filebrowser/lib/archives.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 3
|
2018-01-29T14:16:02.000Z
|
2019-02-05T21:33:05.000Z
|
apps/filebrowser/src/filebrowser/lib/archives.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 4
|
2021-03-11T04:02:00.000Z
|
2022-03-27T08:31:56.000Z
|
apps/filebrowser/src/filebrowser/lib/archives.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2
|
2019-06-17T11:51:56.000Z
|
2020-07-25T08:29:56.000Z
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utilities for dealing with file modes.
import bz2
import os
import posixpath
import tarfile
import tempfile
from desktop.lib.exceptions_renderable import PopupException
from django.utils.translation import ugettext as _
from filebrowser.conf import ARCHIVE_UPLOAD_TEMPDIR
from zipfile import ZipFile
__all__ = ['archive_factory']
class Archive(object):
"""
Acrchive interface.
"""
def extract(self, path):
"""
Extract an Archive.
Should return a directory where the extracted contents live.
"""
raise NotImplemented(_("Must implement 'extract' method."))
def _create_dirs(self, basepath, dirs=[]):
"""
Creates all directories passed at the given basepath.
"""
for directory in dirs:
# Stops if directory start with '/' or points to a relative path
if os.path.isabs(directory) or '..' in directory:
raise IllegalPathException()
directory = os.path.join(basepath, directory)
try:
os.makedirs(directory)
except OSError:
pass
class ZipArchive(Archive):
"""
Acts on a zip file in memory or in a temporary location.
Python's ZipFile class inherently buffers all reading.
"""
def __init__(self, file):
self.file = isinstance(file, basestring) and open(file) or file
self.zfh = ZipFile(self.file)
def extract(self):
"""
Extracts a zip file.
If a 'file' ends with '/', then it is a directory and we must create it.
Else, open a file for writing and meta pipe the contents zipfile to the new file.
"""
# Store all extracted files in a temporary directory.
if ARCHIVE_UPLOAD_TEMPDIR.get():
directory = tempfile.mkdtemp(dir=ARCHIVE_UPLOAD_TEMPDIR.get())
else:
directory = tempfile.mkdtemp()
dirs, files = self._filenames()
self._create_dirs(directory, dirs)
self._create_files(directory, files)
return directory
def _filenames(self):
"""
List all dirs and files by reading the table of contents of the Zipfile.
"""
dirs = []
files = []
for name in self.zfh.namelist():
if name.endswith(posixpath.sep):
dirs.append(name)
else:
files.append(name)
# self.zfh.namelist() sometimes doesn't return all the directories
# Go up the path one directory at the time
parent = os.path.dirname(name)
while parent != '' and parent not in dirs:
dirs.append(parent)
parent = os.path.dirname(parent)
return (dirs, files)
def _create_files(self, basepath, files=[]):
"""
Extract files to their rightful place.
Files are written to a temporary directory immediately after being decompressed.
"""
for f in files:
new_path = os.path.join(basepath, f)
new_file = open(new_path, 'w')
new_file.write(self.zfh.read(f))
new_file.close()
class TarballArchive(Archive):
"""
Acts on a tarball (tar.gz) file in memory or in a temporary location.
Python's ZipFile class inherently buffers all reading.
"""
def __init__(self, file):
if isinstance(file, basestring):
self.path = file
else:
f = tempfile.NamedTemporaryFile(delete=False)
f.write(file.read())
self.path = f.name
f.close()
self.fh = tarfile.open(self.path)
def extract(self):
"""
Extracts a zip file.
If a 'file' ends with '/', then it is a directory and we must create it.
Else, open a file for writing and meta pipe the contents zipfile to the new file.
"""
# Store all extracted files in a temporary directory.
directory = tempfile.mkdtemp()
dirs, files = self._filenames()
self._create_dirs(directory, dirs)
self._create_files(directory, files)
return directory
def _filenames(self):
"""
List all dirs and files by reading the table of contents of the Zipfile.
"""
dirs = []
files = []
for tarinfo in self.fh.getmembers():
if tarinfo.isdir():
dirs.append(tarinfo.name)
else:
files.append(tarinfo.name)
parent = os.path.dirname(tarinfo.path)
# getmembers() sometimes doesn't return all the directories
# Go up the path one directory at the time
while parent != '' and parent not in dirs:
dirs.append(parent)
parent = os.path.dirname(parent)
return (dirs, files)
def _create_files(self, basepath, files=[]):
"""
Extract files to their rightful place.
Files are written to a temporary directory immediately after being decompressed.
"""
for f in files:
new_path = os.path.join(basepath, f)
new_file = open(new_path, 'w')
new_file.write(self.fh.extractfile(f).read())
new_file.close()
class BZ2Archive(Archive):
"""
Acts on a bzip2 file in memory or in a temporary location.
Python's BZ2File class inherently buffers all reading.
"""
def __init__(self, file):
# bzip2 only compresses single files and there is no direct method in the bz2 library to get the file name
self.name = file.name[:-6] if file.name.lower().endswith('.bzip2') else file.name[:-4]
if isinstance(file, basestring):
self.path = file
else:
f = tempfile.NamedTemporaryFile(delete=False)
f.write(file.read())
self.path = f.name
f.close()
self.fh = bz2.BZ2File(self.path)
def extract(self):
"""
Extracts a bz2 file.
Opens the file for writing and meta pipe the contents bz2file to the new file.
"""
# Store all extracted files in a temporary directory.
if ARCHIVE_UPLOAD_TEMPDIR.get():
directory = tempfile.mkdtemp(dir=ARCHIVE_UPLOAD_TEMPDIR.get())
else:
directory = tempfile.mkdtemp()
files = [self.name]
self._create_files(directory, files)
return directory
def _create_files(self, basepath, files=[]):
"""
Files are written to a temporary directory immediately after being decompressed.
"""
for f in files:
new_path = os.path.join(basepath, f)
new_file = open(new_path, 'w')
new_file.write(self.fh.read())
new_file.close()
def archive_factory(path, archive_type='zip'):
if archive_type == 'zip':
return ZipArchive(path)
elif archive_type == 'tarball' or archive_type == 'tar.gz' or archive_type == 'tgz':
return TarballArchive(path)
elif archive_type == 'bz2' or archive_type == 'bzip2':
return BZ2Archive(path)
class IllegalPathException(PopupException):
def __init__(self):
super(IllegalPathException, self).__init__('''Archive path cannot be absolute or contain '..' ''')
| 30.799163
| 110
| 0.675452
|
794e02a9abb8bfa43732422f20b7e7e3ffdaefeb
| 10,950
|
py
|
Python
|
bambinocampones/src/bambinocampones/website/migrations/0002_auto__add_field_galeria_foi_importante.py
|
ProfessionalIT/customers
|
3dbc1989bb3494fb6de7edad67dc59b7b0385ac3
|
[
"MIT"
] | null | null | null |
bambinocampones/src/bambinocampones/website/migrations/0002_auto__add_field_galeria_foi_importante.py
|
ProfessionalIT/customers
|
3dbc1989bb3494fb6de7edad67dc59b7b0385ac3
|
[
"MIT"
] | 1
|
2015-11-08T11:49:35.000Z
|
2015-11-08T11:49:43.000Z
|
bambinocampones/src/bambinocampones/website/migrations/0002_auto__add_field_galeria_foi_importante.py
|
ProfessionalIT/customers
|
3dbc1989bb3494fb6de7edad67dc59b7b0385ac3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Galeria.foi_importante'
db.add_column(u'website_galeria', 'foi_importante',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Galeria.foi_importante'
db.delete_column(u'website_galeria', 'foi_importante')
models = {
u'website.calendario': {
'Meta': {'object_name': 'Calendario'},
'data_agendamento': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 12, 4, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'tipo': ('django.db.models.fields.CharField', [], {'default': "u'E'", 'max_length': '1'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'website.cardapio': {
'Meta': {'object_name': 'Cardapio'},
'ano': ('django.db.models.fields.CharField', [], {'default': "'2013'", 'max_length': '4'}),
'cardapio_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mes': ('django.db.models.fields.CharField', [], {'default': "'12'", 'max_length': '2'}),
'tipo': ('django.db.models.fields.CharField', [], {'default': "u'1'", 'max_length': '1'})
},
u'website.conteudodownload': {
'Meta': {'object_name': 'ConteudoDownload'},
'conteudo_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'descricao': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'miniatura': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'tipo': ('django.db.models.fields.CharField', [], {'default': "u'1'", 'max_length': '1'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'website.depoimento': {
'Meta': {'object_name': 'Depoimento'},
'autor': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'conteudo': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'website.galeria': {
'Meta': {'object_name': 'Galeria'},
'ano': ('django.db.models.fields.CharField', [], {'default': "'2013'", 'max_length': '4'}),
'descricao': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'destaque': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'foi_importante': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mes': ('django.db.models.fields.CharField', [], {'default': "'12'", 'max_length': '2'}),
'permite_comentario': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'tipo': ('django.db.models.fields.CharField', [], {'default': "u'F'", 'max_length': '1'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'website.galeriaresource': {
'Meta': {'object_name': 'GaleriaResource'},
'action_resource': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'galeria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['website.Galeria']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'upload_resource': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'url_resource': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'website.materialescolar': {
'Meta': {'object_name': 'MaterialEscolar'},
'anexo_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'ano': ('django.db.models.fields.CharField', [], {'default': "'2013'", 'max_length': '4'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'miniatura': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'servico': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['website.Servico']"})
},
u'website.menu': {
'Meta': {'object_name': 'Menu'},
'endereco': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'menu_pai': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'Menu Pai'", 'null': 'True', 'to': u"orm['website.Menu']"}),
'nivel': ('django.db.models.fields.IntegerField', [], {}),
'ordem': ('django.db.models.fields.IntegerField', [], {}),
'pagina': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['website.Pagina']", 'null': 'True'}),
'palavras_chaves': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'rascunho': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'website.pagina': {
'Meta': {'object_name': 'Pagina'},
'conteudo': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'palavras_chaves': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'permite_comentario': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rascunho': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'website.parametro': {
'Meta': {'object_name': 'Parametro'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valor': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'website.professor': {
'Meta': {'object_name': 'Professor'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nome': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'})
},
u'website.publicacao': {
'Meta': {'object_name': 'Publicacao'},
'completa': ('django.db.models.fields.TextField', [], {}),
'data_hora': ('django.db.models.fields.DateTimeField', [], {}),
'data_publicacao': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'destaque': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'galeria': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['website.Galeria']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'introducao': ('django.db.models.fields.TextField', [], {}),
'miniatura_publicacao': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'palavras_chaves': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'permite_comentario': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rascunho': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'tipos': ('django.db.models.fields.CharField', [], {'default': "u'1'", 'max_length': '1'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'website.recomendacao': {
'Meta': {'object_name': 'Recomendacao'},
'acao_link': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'descricao': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'destaque': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'miniatura': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'tipo': ('django.db.models.fields.CharField', [], {'default': "u'1'", 'max_length': '1'})
},
u'website.servico': {
'Meta': {'object_name': 'Servico'},
'atividades_extras': ('django.db.models.fields.TextField', [], {}),
'atividades_incluidas': ('django.db.models.fields.TextField', [], {}),
'conteudo_programatico': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'observacoes': ('django.db.models.fields.TextField', [], {}),
'professor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['website.Professor']"}),
'rotina_diaria': ('django.db.models.fields.TextField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '200'}),
'titulo': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['website']
| 69.303797
| 172
| 0.541918
|
794e03aee17866a752e9abc32ac7dbbfd3b4962e
| 26,232
|
py
|
Python
|
src/sagemaker/automl/automl.py
|
aws-patlin/sagemaker-python-sdk
|
18af12beffed82aaf263e9cfec8832f39b6bc63f
|
[
"Apache-2.0"
] | null | null | null |
src/sagemaker/automl/automl.py
|
aws-patlin/sagemaker-python-sdk
|
18af12beffed82aaf263e9cfec8832f39b6bc63f
|
[
"Apache-2.0"
] | null | null | null |
src/sagemaker/automl/automl.py
|
aws-patlin/sagemaker-python-sdk
|
18af12beffed82aaf263e9cfec8832f39b6bc63f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""A class for SageMaker AutoML Jobs."""
from __future__ import absolute_import
from six import string_types
from sagemaker import Model, PipelineModel
from sagemaker.automl.candidate_estimator import CandidateEstimator
from sagemaker.job import _Job
from sagemaker.session import Session
from sagemaker.utils import name_from_base
class AutoML(object):
"""A class for creating and interacting with SageMaker AutoML jobs
"""
def __init__(
self,
role,
target_attribute_name,
output_kms_key=None,
output_path=None,
base_job_name=None,
compression_type=None,
sagemaker_session=None,
volume_kms_key=None,
encrypt_inter_container_traffic=False,
vpc_config=None,
problem_type=None,
max_candidates=None,
max_runtime_per_training_job_in_seconds=None,
total_job_runtime_in_seconds=None,
job_objective=None,
generate_candidate_definitions_only=False,
tags=None,
):
self.role = role
self.output_kms_key = output_kms_key
self.output_path = output_path
self.base_job_name = base_job_name
self.compression_type = compression_type
self.volume_kms_key = volume_kms_key
self.encrypt_inter_container_traffic = encrypt_inter_container_traffic
self.vpc_config = vpc_config
self.problem_type = problem_type
self.max_candidate = max_candidates
self.max_runtime_per_training_job_in_seconds = max_runtime_per_training_job_in_seconds
self.total_job_runtime_in_seconds = total_job_runtime_in_seconds
self.target_attribute_name = target_attribute_name
self.job_objective = job_objective
self.generate_candidate_definitions_only = generate_candidate_definitions_only
self.tags = tags
self.current_job_name = None
self._auto_ml_job_desc = None
self._best_candidate = None
self.sagemaker_session = sagemaker_session or Session()
self._check_problem_type_and_job_objective(self.problem_type, self.job_objective)
def fit(self, inputs=None, wait=True, logs=True, job_name=None):
"""Create an AutoML Job with the input dataset.
Args:
inputs (str or list[str] or AutoMLInput): Local path or S3 Uri where the training data
is stored. Or an AutoMLInput object. If a local path is provided, the dataset will
be uploaded to an S3 location.
wait (bool): Whether the call should wait until the job completes (default: True).
logs (bool): Whether to show the logs produced by the job.
Only meaningful when wait is True (default: True).
job_name (str): Training job name. If not specified, the estimator generates
a default job name, based on the training image name and current timestamp.
"""
if logs and not wait:
raise ValueError(
"""Logs can only be shown if wait is set to True.
Please either set wait to True or set logs to False."""
)
# upload data for users if provided local path
# validations are done in _Job._format_inputs_to_input_config
if isinstance(inputs, string_types):
if not inputs.startswith("s3://"):
inputs = self.sagemaker_session.upload_data(inputs, key_prefix="auto-ml-input-data")
self._prepare_for_auto_ml_job(job_name=job_name)
self.latest_auto_ml_job = AutoMLJob.start_new(self, inputs) # pylint: disable=W0201
if wait:
self.latest_auto_ml_job.wait(logs=logs)
def describe_auto_ml_job(self, job_name=None):
"""Returns the job description of an AutoML job for the given job name.
Args:
job_name (str): The name of the AutoML job to describe.
If None, will use object's latest_auto_ml_job name.
Returns:
dict: A dictionary response with the AutoML Job description.
"""
if job_name is None:
job_name = self.current_job_name
self._auto_ml_job_desc = self.sagemaker_session.describe_auto_ml_job(job_name)
return self._auto_ml_job_desc
def best_candidate(self, job_name=None):
"""Returns the best candidate of an AutoML job for a given name
Args:
job_name (str): The name of the AutoML job. If None, will use object's
_current_auto_ml_job_name.
Returns:
dict: a dictionary with information of the best candidate
"""
if self._best_candidate:
return self._best_candidate
if job_name is None:
job_name = self.current_job_name
if self._auto_ml_job_desc is None:
self._auto_ml_job_desc = self.sagemaker_session.describe_auto_ml_job(job_name)
elif self._auto_ml_job_desc["AutoMLJobName"] != job_name:
self._auto_ml_job_desc = self.sagemaker_session.describe_auto_ml_job(job_name)
self._best_candidate = self._auto_ml_job_desc["BestCandidate"]
return self._best_candidate
def list_candidates(
self,
job_name=None,
status_equals=None,
candidate_name=None,
candidate_arn=None,
sort_order=None,
sort_by=None,
max_results=None,
):
"""Returns the list of candidates of an AutoML job for a given name.
Args:
job_name (str): The name of the AutoML job. If None, will use object's
_current_job name.
status_equals (str): Filter the result with candidate status, values could be
"Completed", "InProgress", "Failed", "Stopped", "Stopping"
candidate_name (str): The name of a specified candidate to list.
Default to None.
candidate_arn (str): The Arn of a specified candidate to list.
Default to None.
sort_order (str): The order that the candidates will be listed in result.
Default to None.
sort_by (str): The value that the candidates will be sorted by.
Default to None.
max_results (int): The number of candidates will be listed in results,
between 1 to 100. Default to None. If None, will return all the candidates.
Returns:
list: A list of dictionaries with candidates information
"""
if job_name is None:
job_name = self.current_job_name
list_candidates_args = {"job_name": job_name}
if status_equals:
list_candidates_args["status_equals"] = status_equals
if candidate_name:
list_candidates_args["candidate_name"] = candidate_name
if candidate_arn:
list_candidates_args["candidate_arn"] = candidate_arn
if sort_order:
list_candidates_args["sort_order"] = sort_order
if sort_by:
list_candidates_args["sort_by"] = sort_by
if max_results:
list_candidates_args["max_results"] = max_results
return self.sagemaker_session.list_candidates(**list_candidates_args)["Candidates"]
def deploy(
self,
initial_instance_count,
instance_type,
candidate=None,
sagemaker_session=None,
name=None,
endpoint_name=None,
tags=None,
wait=True,
update_endpoint=False,
vpc_config=None,
enable_network_isolation=False,
model_kms_key=None,
predictor_cls=None,
):
"""Deploy a candidate to a SageMaker Inference Pipeline and return a Predictor
Args:
initial_instance_count (int): The initial number of instances to run
in the ``Endpoint`` created from this ``Model``.
instance_type (str): The EC2 instance type to deploy this Model to.
For example, 'ml.p2.xlarge'.
candidate (CandidateEstimator or dict): a CandidateEstimator used for deploying
to a SageMaker Inference Pipeline. If None, the best candidate will
be used. If the candidate input is a dict, a CandidateEstimator will be
created from it.
sagemaker_session (sagemaker.session.Session): A SageMaker Session
object, used for SageMaker interactions (default: None). If not
specified, the one originally associated with the ``AutoML`` instance is used.
name (str): The pipeline model name. If None, a default model name will
be selected on each ``deploy``.
endpoint_name (str): The name of the endpoint to create (default:
None). If not specified, a unique endpoint name will be created.
tags (List[dict[str, str]]): The list of tags to attach to this
specific endpoint.
wait (bool): Whether the call should wait until the deployment of
model completes (default: True).
update_endpoint (bool): Flag to update the model in an existing
Amazon SageMaker endpoint. If True, this will deploy a new
EndpointConfig to an already existing endpoint and delete
resources corresponding to the previous EndpointConfig. If
False, a new endpoint will be created. Default: False
vpc_config (dict): Specifies a VPC that your training jobs and hosted models have
access to. Contents include "SecurityGroupIds" and "Subnets".
enable_network_isolation (bool): Isolates the training container. No inbound or
outbound network calls can be made, except for calls between peers within a
training cluster for distributed training. Default: False
model_kms_key (str): KMS key ARN used to encrypt the repacked
model archive file if the model is repacked
predictor_cls (callable[string, sagemaker.session.Session]): A
function to call to create a predictor (default: None). If
specified, ``deploy()`` returns the result of invoking this
function on the created endpoint name.
Returns:
callable[string, sagemaker.session.Session] or ``None``:
If ``predictor_cls`` is specified, the invocation of ``self.predictor_cls`` on
the created endpoint name. Otherwise, ``None``.
"""
sagemaker_session = sagemaker_session or self.sagemaker_session
if candidate is None:
candidate_dict = self.best_candidate()
candidate = CandidateEstimator(candidate_dict, sagemaker_session=sagemaker_session)
elif isinstance(candidate, dict):
candidate = CandidateEstimator(candidate, sagemaker_session=sagemaker_session)
inference_containers = candidate.containers
endpoint_name = endpoint_name or self.current_job_name
return self._deploy_inference_pipeline(
inference_containers,
initial_instance_count=initial_instance_count,
instance_type=instance_type,
name=name,
sagemaker_session=sagemaker_session,
endpoint_name=endpoint_name,
tags=tags,
wait=wait,
update_endpoint=update_endpoint,
vpc_config=vpc_config,
enable_network_isolation=enable_network_isolation,
model_kms_key=model_kms_key,
predictor_cls=predictor_cls,
)
def _check_problem_type_and_job_objective(self, problem_type, job_objective):
"""Validate if problem_type and job_objective are both None or are both provided.
Args:
problem_type (str): The type of problem of this AutoMLJob. Valid values are
"Regression", "BinaryClassification", "MultiClassClassification".
job_objective (dict): AutoMLJob objective, contains "AutoMLJobObjectiveType" (optional),
"MetricName" and "Value".
Raises (ValueError): raises ValueError if one of problem_type and job_objective is provided
while the other is None.
"""
if not (problem_type and job_objective) and (problem_type or job_objective):
raise ValueError(
"One of problem type and objective metric provided. "
"Either both of them should be provided or none of them should be provided."
)
def _deploy_inference_pipeline(
self,
inference_containers,
initial_instance_count,
instance_type,
name=None,
sagemaker_session=None,
endpoint_name=None,
tags=None,
wait=True,
update_endpoint=False,
vpc_config=None,
enable_network_isolation=False,
model_kms_key=None,
predictor_cls=None,
):
"""Deploy a SageMaker Inference Pipeline.
Args:
inference_containers (list): a list of inference container definitions
initial_instance_count (int): The initial number of instances to run
in the ``Endpoint`` created from this ``Model``.
instance_type (str): The EC2 instance type to deploy this Model to.
For example, 'ml.p2.xlarge'.
name (str): The pipeline model name. If None, a default model name will
be selected on each ``deploy``.
sagemaker_session (sagemaker.session.Session): A SageMaker Session
object, used for SageMaker interactions (default: None). If not
specified, one is created using the default AWS configuration
chain.
endpoint_name (str): The name of the endpoint to create (default:
None). If not specified, a unique endpoint name will be created.
tags (List[dict[str, str]]): The list of tags to attach to this
specific endpoint.
wait (bool): Whether the call should wait until the deployment of
model completes (default: True).
update_endpoint (bool): Flag to update the model in an existing
Amazon SageMaker endpoint. If True, this will deploy a new
EndpointConfig to an already existing endpoint and delete
resources corresponding to the previous EndpointConfig. If
False, a new endpoint will be created. Default: False
vpc_config (dict): information about vpc configuration, optionally
contains "SecurityGroupIds", "Subnets"
model_kms_key (str): KMS key ARN used to encrypt the repacked
model archive file if the model is repacked
predictor_cls (callable[string, sagemaker.session.Session]): A
function to call to create a predictor (default: None). If
specified, ``deploy()`` returns the result of invoking this
function on the created endpoint name.
"""
# construct Model objects
models = []
for container in inference_containers:
image = container["Image"]
model_data = container["ModelDataUrl"]
env = container["Environment"]
model = Model(
image=image,
model_data=model_data,
role=self.role,
env=env,
vpc_config=vpc_config,
sagemaker_session=sagemaker_session or self.sagemaker_session,
enable_network_isolation=enable_network_isolation,
model_kms_key=model_kms_key,
)
models.append(model)
pipeline = PipelineModel(
models=models,
role=self.role,
predictor_cls=predictor_cls,
name=name,
vpc_config=vpc_config,
sagemaker_session=sagemaker_session or self.sagemaker_session,
)
return pipeline.deploy(
initial_instance_count=initial_instance_count,
instance_type=instance_type,
endpoint_name=endpoint_name,
tags=tags,
wait=wait,
update_endpoint=update_endpoint,
)
def _prepare_for_auto_ml_job(self, job_name=None):
"""Set any values in the AutoMLJob that need to be set before creating request.
Args:
job_name (str): The name of the AutoML job. If None, a job name will be
created from base_job_name or "sagemaker-auto-ml".
"""
if job_name is not None:
self.current_job_name = job_name
else:
if self.base_job_name:
base_name = self.base_job_name
else:
base_name = "automl"
# CreateAutoMLJob API validates that member length less than or equal to 32
self.current_job_name = name_from_base(base_name, max_length=32)
if self.output_path is None:
self.output_path = "s3://{}/".format(self.sagemaker_session.default_bucket())
class AutoMLInput(object):
"""Accepts parameters that specify an S3 input for an auto ml job and provides
a method to turn those parameters into a dictionary."""
def __init__(self, inputs, target_attribute_name, compression=None):
"""Convert an S3 Uri or a list of S3 Uri to an AutoMLInput object.
:param inputs (str, list[str]): a string or a list of string that points to (a)
S3 location(s) where input data is stored.
:param target_attribute_name (str): the target attribute name for regression
or classification.
:param compression (str): if training data is compressed, the compression type.
The default value is None.
"""
self.inputs = inputs
self.target_attribute_name = target_attribute_name
self.compression = compression
def to_request_dict(self):
"""Generates a request dictionary using the parameters provided to the class."""
# Create the request dictionary.
auto_ml_input = []
if isinstance(self.inputs, string_types):
self.inputs = [self.inputs]
for entry in self.inputs:
input_entry = {
"DataSource": {"S3DataSource": {"S3DataType": "S3Prefix", "S3Uri": entry}},
"TargetAttributeName": self.target_attribute_name,
}
if self.compression is not None:
input_entry["CompressionType"] = self.compression
auto_ml_input.append(input_entry)
return auto_ml_input
class AutoMLJob(_Job):
"""A class for interacting with CreateAutoMLJob API."""
def __init__(self, sagemaker_session, job_name, inputs):
self.inputs = inputs
self.job_name = job_name
super(AutoMLJob, self).__init__(sagemaker_session=sagemaker_session, job_name=job_name)
@classmethod
def start_new(cls, auto_ml, inputs):
"""Create a new Amazon SageMaker AutoML job from auto_ml.
Args:
auto_ml (sagemaker.automl.AutoML): AutoML object
created by the user.
inputs (str, list[str]): Parameters used when called
:meth:`~sagemaker.automl.AutoML.fit`.
Returns:
sagemaker.automl.AutoMLJob: Constructed object that captures
all information about the started AutoML job.
"""
config = cls._load_config(inputs, auto_ml)
auto_ml_args = config.copy()
auto_ml_args["job_name"] = auto_ml.current_job_name
auto_ml_args["problem_type"] = auto_ml.problem_type
auto_ml_args["job_objective"] = auto_ml.job_objective
auto_ml_args["tags"] = auto_ml.tags
auto_ml.sagemaker_session.auto_ml(**auto_ml_args)
return cls(auto_ml.sagemaker_session, auto_ml.current_job_name, inputs)
@classmethod
def _load_config(cls, inputs, auto_ml, expand_role=True, validate_uri=True):
"""Load job_config, input_config and output config from auto_ml and inputs.
Args:
inputs (str): S3 Uri where the training data is stored, must start
with "s3://".
auto_ml (AutoML): an AutoML object that user initiated.
expand_role (str): The expanded role arn that allows for Sagemaker
executionts.
validate_uri (bool): indicate whether to validate the S3 uri.
Returns (dict): a config dictionary that contains input_config, output_config,
job_config and role information.
"""
# JobConfig
# InputDataConfig
# OutputConfig
if isinstance(inputs, AutoMLInput):
input_config = inputs.to_request_dict()
else:
input_config = cls._format_inputs_to_input_config(
inputs, validate_uri, auto_ml.compression_type, auto_ml.target_attribute_name
)
output_config = _Job._prepare_output_config(auto_ml.output_path, auto_ml.output_kms_key)
role = auto_ml.sagemaker_session.expand_role(auto_ml.role) if expand_role else auto_ml.role
stop_condition = cls._prepare_auto_ml_stop_condition(
auto_ml.max_candidate,
auto_ml.max_runtime_per_training_job_in_seconds,
auto_ml.total_job_runtime_in_seconds,
)
auto_ml_job_config = {
"CompletionCriteria": stop_condition,
"SecurityConfig": {
"EnableInterContainerTrafficEncryption": auto_ml.encrypt_inter_container_traffic
},
}
if auto_ml.volume_kms_key:
auto_ml_job_config["SecurityConfig"]["VolumeKmsKeyId"] = auto_ml.volume_kms_key
if auto_ml.vpc_config:
auto_ml_job_config["SecurityConfig"]["VpcConfig"] = auto_ml.vpc_config
config = {
"input_config": input_config,
"output_config": output_config,
"auto_ml_job_config": auto_ml_job_config,
"role": role,
"generate_candidate_definitions_only": auto_ml.generate_candidate_definitions_only,
}
return config
@classmethod
def _format_inputs_to_input_config(
cls, inputs, validate_uri=True, compression=None, target_attribute_name=None
):
"""Convert inputs to AutoML InputDataConfig.
Args:
inputs (str, list[str]): local path(s) or S3 uri(s) of input datasets.
validate_uri (bool): indicates whether it is needed to validate S3 uri.
compression (str):
target_attribute_name (str): the target attribute name for classification
or regression.
Returns (dict): a dict of AutoML InputDataConfig
"""
if inputs is None:
return None
channels = []
if isinstance(inputs, AutoMLInput):
channels.append(inputs.to_request_dict())
elif isinstance(inputs, string_types):
channel = _Job._format_string_uri_input(
inputs,
validate_uri,
compression=compression,
target_attribute_name=target_attribute_name,
).config
channels.append(channel)
elif isinstance(inputs, list):
for input_entry in inputs:
channel = _Job._format_string_uri_input(
input_entry,
validate_uri,
compression=compression,
target_attribute_name=target_attribute_name,
).config
channels.append(channel)
else:
msg = "Cannot format input {}. Expecting a string or a list of strings."
raise ValueError(msg.format(inputs))
for channel in channels:
if channel["TargetAttributeName"] is None:
raise ValueError("TargetAttributeName cannot be None.")
return channels
@classmethod
def _prepare_auto_ml_stop_condition(
cls, max_candidates, max_runtime_per_training_job_in_seconds, total_job_runtime_in_seconds
):
"""Defines the CompletionCriteria of an AutoMLJob.
Args:
max_candidates (int): the maximum number of candidates returned by an
AutoML job.
max_runtime_per_training_job_in_seconds (int): the maximum time of each
training job in seconds.
total_job_runtime_in_seconds (int): the total wait time of an AutoML job.
Returns (dict): an AutoML CompletionCriteria.
"""
stopping_condition = {"MaxCandidates": max_candidates}
if max_runtime_per_training_job_in_seconds is not None:
stopping_condition[
"MaxRuntimePerTrainingJobInSeconds"
] = max_runtime_per_training_job_in_seconds
if total_job_runtime_in_seconds is not None:
stopping_condition["MaxAutoMLJobRuntimeInSeconds"] = total_job_runtime_in_seconds
return stopping_condition
def describe(self):
"""Prints out a response from the DescribeAutoMLJob API call."""
return self.sagemaker_session.describe_auto_ml_job(self.job_name)
def wait(self, logs=True):
"""Wait for the AutoML job to finish.
Args:
logs (bool): indicate whether to output logs.
"""
if logs:
self.sagemaker_session.logs_for_auto_ml_job(self.job_name, wait=True)
else:
self.sagemaker_session.wait_for_auto_ml_job(self.job_name)
| 42.515397
| 100
| 0.64063
|
794e047086377a4da70f76d706112f93faaaf873
| 1,006
|
py
|
Python
|
metodi/zeri/bisezione.py
|
alemazzo/metodi_numerici
|
0d7d02aa392dde51abe1a4ee8ac5412f8f27736a
|
[
"MIT"
] | 9
|
2021-07-08T10:55:58.000Z
|
2021-12-03T09:56:37.000Z
|
metodi/zeri/bisezione.py
|
alemazzo/metodi_numerici
|
0d7d02aa392dde51abe1a4ee8ac5412f8f27736a
|
[
"MIT"
] | null | null | null |
metodi/zeri/bisezione.py
|
alemazzo/metodi_numerici
|
0d7d02aa392dde51abe1a4ee8ac5412f8f27736a
|
[
"MIT"
] | null | null | null |
import math
import numpy as np
def bisezione(f, a, b, tol):
"""
Algoritmo di Bisezione per il calcolo dello zero di una funzione.
:param f: la funzione di cui calcolare lo zero
:param a: il valore minimo dell'intervallo
:param b: il valore massimo dell'intervallo
:param tol: la tollerenza
:return: (zero della funzione, numero di iterazioni, iterazioni)
"""
def sign(value):
return math.copysign(1, value)
fa, fb, x = f(a), f(b), None
if sign(fa) == sign(fb):
print("sign(fa) == sign(fb) => Non applicabile")
return None, 0, []
max_iterazioni = int(math.ceil(math.log2((b - a) / tol)))
it, xk = 0, []
while it < max_iterazioni and abs(b - a) >= tol + np.spacing(1) * max(abs(a), abs(b)):
x = a + (b - a) / 2 # Calcolo il punto medio
xk.append(x)
fx = f(x)
if sign(fx) == sign(fa):
a, fa = x, fx
else:
b, fb = x, fx
it += 1
return x, it, xk
| 26.473684
| 90
| 0.55169
|
794e04f1f75888cc2ddd1dbfe357cb76ba29c591
| 696
|
py
|
Python
|
labfunctions/security/utils.py
|
dymaxionlabs/nb_workflows
|
336e4d83dd5f8a7edfbaacfa426b23a42c0a68a9
|
[
"Apache-2.0"
] | 4
|
2022-02-17T19:47:52.000Z
|
2022-02-17T20:11:06.000Z
|
labfunctions/security/utils.py
|
dymaxionlabs/nb_workflows
|
336e4d83dd5f8a7edfbaacfa426b23a42c0a68a9
|
[
"Apache-2.0"
] | 2
|
2022-03-26T00:07:05.000Z
|
2022-03-30T21:20:00.000Z
|
labfunctions/security/utils.py
|
dymaxionlabs/nb_workflows
|
336e4d83dd5f8a7edfbaacfa426b23a42c0a68a9
|
[
"Apache-2.0"
] | 1
|
2022-02-18T13:33:00.000Z
|
2022-02-18T13:33:00.000Z
|
import binascii
import importlib
import os
from datetime import datetime, timedelta
from labfunctions.types.security import KeyPairs
def open_keys(pub, priv) -> KeyPairs:
with open(pub, "r") as f:
pub = f.read()
pub = pub.strip()
with open(priv, "r") as f:
priv = f.read()
priv = priv.strip()
return KeyPairs(public=pub, private=priv)
def generate_token(n=24, *args, **kwargs):
return str(binascii.hexlify(os.urandom(n)), "utf-8")
def get_delta(delta_min: int) -> int:
"""Returns a timestamp addding a delta_min value to the utc now date."""
delta = datetime.utcnow() + timedelta(minutes=delta_min)
return int(delta.timestamp())
| 25.777778
| 76
| 0.666667
|
794e0507326723f09ca15ca16f6803cf81ead02f
| 115
|
py
|
Python
|
bigquery/job/ems_job_state.py
|
emartech/ems-gcp-toolkit
|
74da9af2e5af262ef10613b5a91b8841fb3c2691
|
[
"MIT"
] | 1
|
2020-02-14T18:08:58.000Z
|
2020-02-14T18:08:58.000Z
|
bigquery/job/ems_job_state.py
|
emartech/ems-gcp-toolkit
|
74da9af2e5af262ef10613b5a91b8841fb3c2691
|
[
"MIT"
] | null | null | null |
bigquery/job/ems_job_state.py
|
emartech/ems-gcp-toolkit
|
74da9af2e5af262ef10613b5a91b8841fb3c2691
|
[
"MIT"
] | 1
|
2022-02-17T19:50:17.000Z
|
2022-02-17T19:50:17.000Z
|
from enum import Enum
class EmsJobState(Enum):
PENDING = "PENDING"
RUNNING = "RUNNING"
DONE = "DONE"
| 14.375
| 24
| 0.643478
|
794e05e2dc9c494da9d6695a9ad1c551f30b6e8d
| 11,370
|
py
|
Python
|
tensorflow/contrib/keras/python/keras/applications/resnet50.py
|
ralic/tensorflow
|
1209491913def44650d6457c60a6e41d56de3306
|
[
"Apache-2.0"
] | 1
|
2017-09-05T02:22:07.000Z
|
2017-09-05T02:22:07.000Z
|
tensorflow/contrib/keras/python/keras/applications/resnet50.py
|
ralic/tensorflow
|
1209491913def44650d6457c60a6e41d56de3306
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/contrib/keras/python/keras/applications/resnet50.py
|
ralic/tensorflow
|
1209491913def44650d6457c60a6e41d56de3306
|
[
"Apache-2.0"
] | 1
|
2021-01-25T14:18:12.000Z
|
2021-01-25T14:18:12.000Z
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""ResNet50 model for Keras.
# Reference:
- [Deep Residual Learning for Image
Recognition](https://arxiv.org/abs/1512.03385)
Adapted from code contributed by BigMoyan.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras import layers
from tensorflow.contrib.keras.python.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.contrib.keras.python.keras.applications.imagenet_utils import decode_predictions # pylint: disable=unused-import
from tensorflow.contrib.keras.python.keras.applications.imagenet_utils import preprocess_input # pylint: disable=unused-import
from tensorflow.contrib.keras.python.keras.engine.topology import get_source_inputs
from tensorflow.contrib.keras.python.keras.layers import Activation
from tensorflow.contrib.keras.python.keras.layers import AveragePooling2D
from tensorflow.contrib.keras.python.keras.layers import BatchNormalization
from tensorflow.contrib.keras.python.keras.layers import Conv2D
from tensorflow.contrib.keras.python.keras.layers import Dense
from tensorflow.contrib.keras.python.keras.layers import Flatten
from tensorflow.contrib.keras.python.keras.layers import GlobalAveragePooling2D
from tensorflow.contrib.keras.python.keras.layers import GlobalMaxPooling2D
from tensorflow.contrib.keras.python.keras.layers import Input
from tensorflow.contrib.keras.python.keras.layers import MaxPooling2D
from tensorflow.contrib.keras.python.keras.models import Model
from tensorflow.contrib.keras.python.keras.utils.data_utils import get_file
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5'
def identity_block(input_tensor, kernel_size, filters, stage, block):
"""The identity block is the block that has no conv layer at shortcut.
Arguments:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
Returns:
Output tensor for the block.
"""
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(
filters2, kernel_size, padding='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2,
2)):
"""conv_block is the block that has a conv layer at shortcut.
Arguments:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Tuple of integers.
Returns:
Output tensor for the block.
Note that from stage 3, the first conv layer at main path is with
strides=(2,2)
And the shortcut should have strides=(2,2) as well
"""
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(
filters1, (1, 1), strides=strides,
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(
filters2, kernel_size, padding='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = Conv2D(
filters3, (1, 1), strides=strides,
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x
def ResNet50(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the ResNet50 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format="channels_last"` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization)
or "imagenet" (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 input channels,
and width and height should be no smaller than 197.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=224,
min_size=197,
data_format=K.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
img_input = Input(tensor=input_tensor, shape=input_shape)
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
x = Conv2D(64, (7, 7),
strides=(2, 2), padding='same', name='conv1')(img_input)
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
x = AveragePooling2D((7, 7), name='avg_pool')(x)
if include_top:
x = Flatten()(x)
x = Dense(classes, activation='softmax', name='fc1000')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='resnet50')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file(
'resnet50_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
else:
weights_path = get_file(
'resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
md5_hash='a268eb855778b3df3c7506639542a6af')
model.load_weights(weights_path)
return model
| 39.616725
| 148
| 0.681882
|
794e06c837be402cc9540e19c72cee08296ceda1
| 1,284
|
py
|
Python
|
features/steps/block.py
|
netsyno/python-docx
|
f181a89f1c1651baba6a37399e6bba09769459ae
|
[
"MIT"
] | 3
|
2015-04-12T11:18:40.000Z
|
2021-04-28T09:17:38.000Z
|
features/steps/block.py
|
netsyno/python-docx
|
f181a89f1c1651baba6a37399e6bba09769459ae
|
[
"MIT"
] | null | null | null |
features/steps/block.py
|
netsyno/python-docx
|
f181a89f1c1651baba6a37399e6bba09769459ae
|
[
"MIT"
] | 2
|
2016-02-28T03:20:32.000Z
|
2019-10-05T12:01:28.000Z
|
# encoding: utf-8
"""
Step implementations for block content containers
"""
from behave import given, then, when
from docx import Document
from docx.table import Table
from helpers import test_docx
# given ===================================================
@given('a document containing a table')
def given_a_document_containing_a_table(context):
docx_path = test_docx('blk-containing-table')
context.document = Document(docx_path)
@given('a paragraph')
def given_a_paragraph(context):
context.document = Document()
context.p = context.document.add_paragraph()
# when ====================================================
@when('I add a paragraph')
def when_add_paragraph(context):
document = context.document
context.p = document.add_paragraph()
@when('I add a table')
def when_add_table(context):
rows, cols = 2, 2
context.document.add_table(rows, cols)
# then =====================================================
@then('I can access the table')
def then_can_access_table(context):
table = context.document.tables[-1]
assert isinstance(table, Table)
@then('the new table appears in the document')
def then_new_table_appears_in_document(context):
table = context.document.tables[-1]
assert isinstance(table, Table)
| 23.345455
| 60
| 0.646417
|
794e06e153f378c5d0228164d6396c6c2fa38c9d
| 1,550
|
py
|
Python
|
patterns/exas.py
|
stspbu/code-change-miner
|
a9aceb92a3484f0c3c8140bbb986bfec1e2d7562
|
[
"Apache-2.0"
] | 1
|
2020-09-06T16:32:17.000Z
|
2020-09-06T16:32:17.000Z
|
patterns/exas.py
|
neomatrix369/code-change-miner
|
4fe24d03c8512202bc80a77bf1ebee456d8400d7
|
[
"Apache-2.0"
] | null | null | null |
patterns/exas.py
|
neomatrix369/code-change-miner
|
4fe24d03c8512202bc80a77bf1ebee456d8400d7
|
[
"Apache-2.0"
] | null | null | null |
from pyflowgraph.models import LinkType
import sys
HALF_N = sys.maxsize // 2
N = HALF_N * 2
def normalize(value):
return (value + HALF_N) % N - HALF_N
class ExasFeature:
"""
Features characterize code fragments
Read more: "Accurate and Efficient Structural Characteristic Feature Extraction"
"""
MAX_LENGTH = 2 ** 3 - 1
def __init__(self, nodes=None):
self.node_label_to_feature_id = {}
self.edge_label_to_feature_id = {
LinkType.QUALIFIER: 0,
LinkType.CONDITION: 1,
LinkType.CONTROL: 2,
LinkType.DEFINITION: 3,
LinkType.MAP: 4,
LinkType.PARAMETER: 5,
LinkType.RECEIVER: 6,
LinkType.REFERENCE: 7
}
if nodes is not None:
self._bind_node_feature_ids(nodes)
def _bind_node_feature_ids(self, nodes):
for num, node in enumerate(nodes):
self.node_label_to_feature_id[node.label] = num + 1 # some ids can be skipped
def get_id_by_label(self, label):
return self.node_label_to_feature_id.get(label)
def get_id_by_labels(self, labels):
result = 0
for num, label in enumerate(labels):
if num % 2 == 0:
s = self.node_label_to_feature_id.get(label)
else:
s = self.edge_label_to_feature_id.get(label, 0)
s = normalize(s << 5) # 2^4 types
result = normalize(result << 8)
result = normalize(result + s)
return result
| 27.192982
| 90
| 0.592258
|
794e075cdcbbb2b8a7a7a6a591352a9bdc0687fb
| 48,416
|
py
|
Python
|
sympy/physics/tests/test_secondquant.py
|
CameronKing/sympy
|
3295b02c617a10ea8db0a070356cc0ba5a3b5121
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/physics/tests/test_secondquant.py
|
CameronKing/sympy
|
3295b02c617a10ea8db0a070356cc0ba5a3b5121
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/physics/tests/test_secondquant.py
|
CameronKing/sympy
|
3295b02c617a10ea8db0a070356cc0ba5a3b5121
|
[
"BSD-3-Clause"
] | null | null | null |
from sympy.physics.secondquant import (
Dagger, Bd, VarBosonicBasis, BBra, B, BKet, FixedBosonicBasis,
matrix_rep, apply_operators, InnerProduct, Commutator, KroneckerDelta,
AnnihilateBoson, CreateBoson, BosonicOperator,
F, Fd, FKet, BosonState, CreateFermion, AnnihilateFermion,
evaluate_deltas, AntiSymmetricTensor, contraction, NO, wicks,
PermutationOperator, simplify_index_permutations,
_sort_anticommuting_fermions, _get_ordered_dummies,
substitute_dummies, FockState, FockStateBosonKet,
ContractionAppliesOnlyToFermions
)
from sympy import (Dummy, expand, Function, I, Rational, simplify, sqrt, Sum,
Symbol, symbols, srepr)
from sympy.core.compatibility import range
from sympy.utilities.pytest import XFAIL, slow, raises
from sympy.printing.latex import latex
def test_PermutationOperator():
p, q, r, s = symbols('p,q,r,s')
f, g, h, i = map(Function, 'fghi')
P = PermutationOperator
assert P(p, q).get_permuted(f(p)*g(q)) == -f(q)*g(p)
assert P(p, q).get_permuted(f(p, q)) == -f(q, p)
assert P(p, q).get_permuted(f(p)) == f(p)
expr = (f(p)*g(q)*h(r)*i(s)
- f(q)*g(p)*h(r)*i(s)
- f(p)*g(q)*h(s)*i(r)
+ f(q)*g(p)*h(s)*i(r))
perms = [P(p, q), P(r, s)]
assert (simplify_index_permutations(expr, perms) ==
P(p, q)*P(r, s)*f(p)*g(q)*h(r)*i(s))
assert latex(P(p, q)) == 'P(pq)'
def test_index_permutations_with_dummies():
a, b, c, d = symbols('a b c d')
p, q, r, s = symbols('p q r s', cls=Dummy)
f, g = map(Function, 'fg')
P = PermutationOperator
# No dummy substitution necessary
expr = f(a, b, p, q) - f(b, a, p, q)
assert simplify_index_permutations(
expr, [P(a, b)]) == P(a, b)*f(a, b, p, q)
# Cases where dummy substitution is needed
expected = P(a, b)*substitute_dummies(f(a, b, p, q))
expr = f(a, b, p, q) - f(b, a, q, p)
result = simplify_index_permutations(expr, [P(a, b)])
assert expected == substitute_dummies(result)
expr = f(a, b, q, p) - f(b, a, p, q)
result = simplify_index_permutations(expr, [P(a, b)])
assert expected == substitute_dummies(result)
# A case where nothing can be done
expr = f(a, b, q, p) - g(b, a, p, q)
result = simplify_index_permutations(expr, [P(a, b)])
assert expr == result
def test_dagger():
i, j, n, m = symbols('i,j,n,m')
assert Dagger(1) == 1
assert Dagger(1.0) == 1.0
assert Dagger(2*I) == -2*I
assert Dagger(Rational(1, 2)*I/3.0) == -Rational(1, 2)*I/3.0
assert Dagger(BKet([n])) == BBra([n])
assert Dagger(B(0)) == Bd(0)
assert Dagger(Bd(0)) == B(0)
assert Dagger(B(n)) == Bd(n)
assert Dagger(Bd(n)) == B(n)
assert Dagger(B(0) + B(1)) == Bd(0) + Bd(1)
assert Dagger(n*m) == Dagger(n)*Dagger(m) # n, m commute
assert Dagger(B(n)*B(m)) == Bd(m)*Bd(n)
assert Dagger(B(n)**10) == Dagger(B(n))**10
assert Dagger('a') == Dagger(Symbol('a'))
assert Dagger(Dagger('a')) == Symbol('a')
def test_operator():
i, j = symbols('i,j')
o = BosonicOperator(i)
assert o.state == i
assert o.is_symbolic
o = BosonicOperator(1)
assert o.state == 1
assert not o.is_symbolic
def test_create():
i, j, n, m = symbols('i,j,n,m')
o = Bd(i)
assert latex(o) == "b^\\dagger_{i}"
assert isinstance(o, CreateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = Bd(0)
assert o.apply_operator(BKet([n])) == sqrt(n + 1)*BKet([n + 1])
o = Bd(n)
assert o.apply_operator(BKet([n])) == o*BKet([n])
def test_annihilate():
i, j, n, m = symbols('i,j,n,m')
o = B(i)
assert latex(o) == "b_{i}"
assert isinstance(o, AnnihilateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = B(0)
assert o.apply_operator(BKet([n])) == sqrt(n)*BKet([n - 1])
o = B(n)
assert o.apply_operator(BKet([n])) == o*BKet([n])
def test_basic_state():
i, j, n, m = symbols('i,j,n,m')
s = BosonState([0, 1, 2, 3, 4])
assert len(s) == 5
assert s.args[0] == tuple(range(5))
assert s.up(0) == BosonState([1, 1, 2, 3, 4])
assert s.down(4) == BosonState([0, 1, 2, 3, 3])
for i in range(5):
assert s.up(i).down(i) == s
assert s.down(0) == 0
for i in range(5):
assert s[i] == i
s = BosonState([n, m])
assert s.down(0) == BosonState([n - 1, m])
assert s.up(0) == BosonState([n + 1, m])
@XFAIL
def test_move1():
i, j = symbols('i,j')
A, C = symbols('A,C', cls=Function)
o = A(i)*C(j)
# This almost works, but has a minus sign wrong
assert move(o, 0, 1) == KroneckerDelta(i, j) + C(j)*A(i)
@XFAIL
def test_move2():
i, j = symbols('i,j')
A, C = symbols('A,C', cls=Function)
o = C(j)*A(i)
# This almost works, but has a minus sign wrong
assert move(o, 0, 1) == -KroneckerDelta(i, j) + A(i)*C(j)
def test_basic_apply():
n = symbols("n")
e = B(0)*BKet([n])
assert apply_operators(e) == sqrt(n)*BKet([n - 1])
e = Bd(0)*BKet([n])
assert apply_operators(e) == sqrt(n + 1)*BKet([n + 1])
def test_complex_apply():
n, m = symbols("n,m")
o = Bd(0)*B(0)*Bd(1)*B(0)
e = apply_operators(o*BKet([n, m]))
answer = sqrt(n)*sqrt(m + 1)*(-1 + n)*BKet([-1 + n, 1 + m])
assert expand(e) == expand(answer)
def test_number_operator():
n = symbols("n")
o = Bd(0)*B(0)
e = apply_operators(o*BKet([n]))
assert e == n*BKet([n])
def test_inner_product():
i, j, k, l = symbols('i,j,k,l')
s1 = BBra([0])
s2 = BKet([1])
assert InnerProduct(s1, Dagger(s1)) == 1
assert InnerProduct(s1, s2) == 0
s1 = BBra([i, j])
s2 = BKet([k, l])
r = InnerProduct(s1, s2)
assert r == KroneckerDelta(i, k)*KroneckerDelta(j, l)
def test_symbolic_matrix_elements():
n, m = symbols('n,m')
s1 = BBra([n])
s2 = BKet([m])
o = B(0)
e = apply_operators(s1*o*s2)
assert e == sqrt(m)*KroneckerDelta(n, m - 1)
def test_matrix_elements():
b = VarBosonicBasis(5)
o = B(0)
m = matrix_rep(o, b)
for i in range(4):
assert m[i, i + 1] == sqrt(i + 1)
o = Bd(0)
m = matrix_rep(o, b)
for i in range(4):
assert m[i + 1, i] == sqrt(i + 1)
def test_fixed_bosonic_basis():
b = FixedBosonicBasis(2, 2)
# assert b == [FockState((2, 0)), FockState((1, 1)), FockState((0, 2))]
state = b.state(1)
assert state == FockStateBosonKet((1, 1))
assert b.index(state) == 1
assert b.state(1) == b[1]
assert len(b) == 3
assert str(b) == '[FockState((2, 0)), FockState((1, 1)), FockState((0, 2))]'
assert repr(b) == '[FockState((2, 0)), FockState((1, 1)), FockState((0, 2))]'
assert srepr(b) == '[FockState((2, 0)), FockState((1, 1)), FockState((0, 2))]'
@slow
def test_sho():
n, m = symbols('n,m')
h_n = Bd(n)*B(n)*(n + Rational(1, 2))
H = Sum(h_n, (n, 0, 5))
o = H.doit(deep=False)
b = FixedBosonicBasis(2, 6)
m = matrix_rep(o, b)
# We need to double check these energy values to make sure that they
# are correct and have the proper degeneracies!
diag = [1, 2, 3, 3, 4, 5, 4, 5, 6, 7, 5, 6, 7, 8, 9, 6, 7, 8, 9, 10, 11]
for i in range(len(diag)):
assert diag[i] == m[i, i]
def test_commutation():
n, m = symbols("n,m", above_fermi=True)
c = Commutator(B(0), Bd(0))
assert c == 1
c = Commutator(Bd(0), B(0))
assert c == -1
c = Commutator(B(n), Bd(0))
assert c == KroneckerDelta(n, 0)
c = Commutator(B(0), B(0))
assert c == 0
c = Commutator(B(0), Bd(0))
e = simplify(apply_operators(c*BKet([n])))
assert e == BKet([n])
c = Commutator(B(0), B(1))
e = simplify(apply_operators(c*BKet([n, m])))
assert e == 0
c = Commutator(F(m), Fd(m))
assert c == +1 - 2*NO(Fd(m)*F(m))
c = Commutator(Fd(m), F(m))
assert c.expand() == -1 + 2*NO(Fd(m)*F(m))
C = Commutator
X, Y, Z = symbols('X,Y,Z', commutative=False)
assert C(C(X, Y), Z) != 0
assert C(C(X, Z), Y) != 0
assert C(Y, C(X, Z)) != 0
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
D = KroneckerDelta
assert C(Fd(a), F(i)) == -2*NO(F(i)*Fd(a))
assert C(Fd(j), NO(Fd(a)*F(i))).doit(wicks=True) == -D(j, i)*Fd(a)
assert C(Fd(a)*F(i), Fd(b)*F(j)).doit(wicks=True) == 0
c1 = Commutator(F(a), Fd(a))
assert Commutator.eval(c1, c1) == 0
c = Commutator(Fd(a)*F(i),Fd(b)*F(j))
assert latex(c) == r'\left[a^\dagger_{a} a_{i},a^\dagger_{b} a_{j}\right]'
assert repr(c) == 'Commutator(CreateFermion(a)*AnnihilateFermion(i),CreateFermion(b)*AnnihilateFermion(j))'
assert str(c) == '[CreateFermion(a)*AnnihilateFermion(i),CreateFermion(b)*AnnihilateFermion(j)]'
def test_create_f():
i, j, n, m = symbols('i,j,n,m')
o = Fd(i)
assert isinstance(o, CreateFermion)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = Fd(1)
assert o.apply_operator(FKet([n])) == FKet([1, n])
assert o.apply_operator(FKet([n])) == -FKet([n, 1])
o = Fd(n)
assert o.apply_operator(FKet([])) == FKet([n])
vacuum = FKet([], fermi_level=4)
assert vacuum == FKet([], fermi_level=4)
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
assert Fd(i).apply_operator(FKet([i, j, k], 4)) == FKet([j, k], 4)
assert Fd(a).apply_operator(FKet([i, b, k], 4)) == FKet([a, i, b, k], 4)
assert Dagger(B(p)).apply_operator(q) == q*CreateBoson(p)
assert repr(Fd(p)) == 'CreateFermion(p)'
assert srepr(Fd(p)) == "CreateFermion(Symbol('p'))"
assert latex(Fd(p)) == r'a^\dagger_{p}'
def test_annihilate_f():
i, j, n, m = symbols('i,j,n,m')
o = F(i)
assert isinstance(o, AnnihilateFermion)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = F(1)
assert o.apply_operator(FKet([1, n])) == FKet([n])
assert o.apply_operator(FKet([n, 1])) == -FKet([n])
o = F(n)
assert o.apply_operator(FKet([n])) == FKet([])
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
assert F(i).apply_operator(FKet([i, j, k], 4)) == 0
assert F(a).apply_operator(FKet([i, b, k], 4)) == 0
assert F(l).apply_operator(FKet([i, j, k], 3)) == 0
assert F(l).apply_operator(FKet([i, j, k], 4)) == FKet([l, i, j, k], 4)
assert str(F(p)) == 'f(p)'
assert repr(F(p)) == 'AnnihilateFermion(p)'
assert srepr(F(p)) == "AnnihilateFermion(Symbol('p'))"
assert latex(F(p)) == 'a_{p}'
def test_create_b():
i, j, n, m = symbols('i,j,n,m')
o = Bd(i)
assert isinstance(o, CreateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = Bd(0)
assert o.apply_operator(BKet([n])) == sqrt(n + 1)*BKet([n + 1])
o = Bd(n)
assert o.apply_operator(BKet([n])) == o*BKet([n])
def test_annihilate_b():
i, j, n, m = symbols('i,j,n,m')
o = B(i)
assert isinstance(o, AnnihilateBoson)
o = o.subs(i, j)
assert o.atoms(Symbol) == {j}
o = B(0)
def test_wicks():
p, q, r, s = symbols('p,q,r,s', above_fermi=True)
# Testing for particles only
str = F(p)*Fd(q)
assert wicks(str) == NO(F(p)*Fd(q)) + KroneckerDelta(p, q)
str = Fd(p)*F(q)
assert wicks(str) == NO(Fd(p)*F(q))
str = F(p)*Fd(q)*F(r)*Fd(s)
nstr = wicks(str)
fasit = NO(
KroneckerDelta(p, q)*KroneckerDelta(r, s)
+ KroneckerDelta(p, q)*AnnihilateFermion(r)*CreateFermion(s)
+ KroneckerDelta(r, s)*AnnihilateFermion(p)*CreateFermion(q)
- KroneckerDelta(p, s)*AnnihilateFermion(r)*CreateFermion(q)
- AnnihilateFermion(p)*AnnihilateFermion(r)*CreateFermion(q)*CreateFermion(s))
assert nstr == fasit
assert (p*q*nstr).expand() == wicks(p*q*str)
assert (nstr*p*q*2).expand() == wicks(str*p*q*2)
# Testing CC equations particles and holes
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
p, q, r, s = symbols('p q r s', cls=Dummy)
assert (wicks(F(a)*NO(F(i)*F(j))*Fd(b)) ==
NO(F(a)*F(i)*F(j)*Fd(b)) +
KroneckerDelta(a, b)*NO(F(i)*F(j)))
assert (wicks(F(a)*NO(F(i)*F(j)*F(k))*Fd(b)) ==
NO(F(a)*F(i)*F(j)*F(k)*Fd(b)) -
KroneckerDelta(a, b)*NO(F(i)*F(j)*F(k)))
expr = wicks(Fd(i)*NO(Fd(j)*F(k))*F(l))
assert (expr ==
-KroneckerDelta(i, k)*NO(Fd(j)*F(l)) -
KroneckerDelta(j, l)*NO(Fd(i)*F(k)) -
KroneckerDelta(i, k)*KroneckerDelta(j, l) +
KroneckerDelta(i, l)*NO(Fd(j)*F(k)) +
NO(Fd(i)*Fd(j)*F(k)*F(l)))
expr = wicks(F(a)*NO(F(b)*Fd(c))*Fd(d))
assert (expr ==
-KroneckerDelta(a, c)*NO(F(b)*Fd(d)) -
KroneckerDelta(b, d)*NO(F(a)*Fd(c)) -
KroneckerDelta(a, c)*KroneckerDelta(b, d) +
KroneckerDelta(a, d)*NO(F(b)*Fd(c)) +
NO(F(a)*F(b)*Fd(c)*Fd(d)))
def test_NO():
i, j, k, l = symbols('i j k l', below_fermi=True)
a, b, c, d = symbols('a b c d', above_fermi=True)
p, q, r, s = symbols('p q r s', cls=Dummy)
assert (NO(Fd(p)*F(q) + Fd(a)*F(b)) ==
NO(Fd(p)*F(q)) + NO(Fd(a)*F(b)))
assert (NO(Fd(i)*NO(F(j)*Fd(a))) ==
NO(Fd(i)*F(j)*Fd(a)))
assert NO(1) == 1
assert NO(i) == i
assert (NO(Fd(a)*Fd(b)*(F(c) + F(d))) ==
NO(Fd(a)*Fd(b)*F(c)) +
NO(Fd(a)*Fd(b)*F(d)))
assert NO(Fd(a)*F(b))._remove_brackets() == Fd(a)*F(b)
assert NO(F(j)*Fd(i))._remove_brackets() == F(j)*Fd(i)
assert (NO(Fd(p)*F(q)).subs(Fd(p), Fd(a) + Fd(i)) ==
NO(Fd(a)*F(q)) + NO(Fd(i)*F(q)))
assert (NO(Fd(p)*F(q)).subs(F(q), F(a) + F(i)) ==
NO(Fd(p)*F(a)) + NO(Fd(p)*F(i)))
expr = NO(Fd(p)*F(q))._remove_brackets()
assert wicks(expr) == NO(expr)
assert NO(Fd(a)*F(b)) == - NO(F(b)*Fd(a))
no = NO(Fd(a)*F(i)*F(b)*Fd(j))
l1 = [ ind for ind in no.iter_q_creators() ]
assert l1 == [0, 1]
l2 = [ ind for ind in no.iter_q_annihilators() ]
assert l2 == [3, 2]
no = NO(Fd(a)*Fd(i))
assert no.has_q_creators == 1
assert no.has_q_annihilators == -1
assert str(no) == ':CreateFermion(a)*CreateFermion(i):'
assert repr(no) == 'NO(CreateFermion(a)*CreateFermion(i))'
assert latex(no) == r'\left\{a^\dagger_{a} a^\dagger_{i}\right\}'
raises(NotImplementedError, lambda: NO(Bd(p)*F(q)))
def test_sorting():
i, j = symbols('i,j', below_fermi=True)
a, b = symbols('a,b', above_fermi=True)
p, q = symbols('p,q')
# p, q
assert _sort_anticommuting_fermions([Fd(p), F(q)]) == ([Fd(p), F(q)], 0)
assert _sort_anticommuting_fermions([F(p), Fd(q)]) == ([Fd(q), F(p)], 1)
# i, p
assert _sort_anticommuting_fermions([F(p), Fd(i)]) == ([F(p), Fd(i)], 0)
assert _sort_anticommuting_fermions([Fd(i), F(p)]) == ([F(p), Fd(i)], 1)
assert _sort_anticommuting_fermions([Fd(p), Fd(i)]) == ([Fd(p), Fd(i)], 0)
assert _sort_anticommuting_fermions([Fd(i), Fd(p)]) == ([Fd(p), Fd(i)], 1)
assert _sort_anticommuting_fermions([F(p), F(i)]) == ([F(i), F(p)], 1)
assert _sort_anticommuting_fermions([F(i), F(p)]) == ([F(i), F(p)], 0)
assert _sort_anticommuting_fermions([Fd(p), F(i)]) == ([F(i), Fd(p)], 1)
assert _sort_anticommuting_fermions([F(i), Fd(p)]) == ([F(i), Fd(p)], 0)
# a, p
assert _sort_anticommuting_fermions([F(p), Fd(a)]) == ([Fd(a), F(p)], 1)
assert _sort_anticommuting_fermions([Fd(a), F(p)]) == ([Fd(a), F(p)], 0)
assert _sort_anticommuting_fermions([Fd(p), Fd(a)]) == ([Fd(a), Fd(p)], 1)
assert _sort_anticommuting_fermions([Fd(a), Fd(p)]) == ([Fd(a), Fd(p)], 0)
assert _sort_anticommuting_fermions([F(p), F(a)]) == ([F(p), F(a)], 0)
assert _sort_anticommuting_fermions([F(a), F(p)]) == ([F(p), F(a)], 1)
assert _sort_anticommuting_fermions([Fd(p), F(a)]) == ([Fd(p), F(a)], 0)
assert _sort_anticommuting_fermions([F(a), Fd(p)]) == ([Fd(p), F(a)], 1)
# i, a
assert _sort_anticommuting_fermions([F(i), Fd(j)]) == ([F(i), Fd(j)], 0)
assert _sort_anticommuting_fermions([Fd(j), F(i)]) == ([F(i), Fd(j)], 1)
assert _sort_anticommuting_fermions([Fd(a), Fd(i)]) == ([Fd(a), Fd(i)], 0)
assert _sort_anticommuting_fermions([Fd(i), Fd(a)]) == ([Fd(a), Fd(i)], 1)
assert _sort_anticommuting_fermions([F(a), F(i)]) == ([F(i), F(a)], 1)
assert _sort_anticommuting_fermions([F(i), F(a)]) == ([F(i), F(a)], 0)
def test_contraction():
i, j, k, l = symbols('i,j,k,l', below_fermi=True)
a, b, c, d = symbols('a,b,c,d', above_fermi=True)
p, q, r, s = symbols('p,q,r,s')
assert contraction(Fd(i), F(j)) == KroneckerDelta(i, j)
assert contraction(F(a), Fd(b)) == KroneckerDelta(a, b)
assert contraction(F(a), Fd(i)) == 0
assert contraction(Fd(a), F(i)) == 0
assert contraction(F(i), Fd(a)) == 0
assert contraction(Fd(i), F(a)) == 0
assert contraction(Fd(i), F(p)) == KroneckerDelta(i, p)
restr = evaluate_deltas(contraction(Fd(p), F(q)))
assert restr.is_only_below_fermi
restr = evaluate_deltas(contraction(F(p), Fd(q)))
assert restr.is_only_above_fermi
raises(ContractionAppliesOnlyToFermions, lambda: contraction(B(a), Fd(b)))
def test_evaluate_deltas():
i, j, k = symbols('i,j,k')
r = KroneckerDelta(i, j) * KroneckerDelta(j, k)
assert evaluate_deltas(r) == KroneckerDelta(i, k)
r = KroneckerDelta(i, 0) * KroneckerDelta(j, k)
assert evaluate_deltas(r) == KroneckerDelta(i, 0) * KroneckerDelta(j, k)
r = KroneckerDelta(1, j) * KroneckerDelta(j, k)
assert evaluate_deltas(r) == KroneckerDelta(1, k)
r = KroneckerDelta(j, 2) * KroneckerDelta(k, j)
assert evaluate_deltas(r) == KroneckerDelta(2, k)
r = KroneckerDelta(i, 0) * KroneckerDelta(i, j) * KroneckerDelta(j, 1)
assert evaluate_deltas(r) == 0
r = (KroneckerDelta(0, i) * KroneckerDelta(0, j)
* KroneckerDelta(1, j) * KroneckerDelta(1, j))
assert evaluate_deltas(r) == 0
def test_Tensors():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
p, q, r, s = symbols('p q r s')
AT = AntiSymmetricTensor
assert AT('t', (a, b), (i, j)) == -AT('t', (b, a), (i, j))
assert AT('t', (a, b), (i, j)) == AT('t', (b, a), (j, i))
assert AT('t', (a, b), (i, j)) == -AT('t', (a, b), (j, i))
assert AT('t', (a, a), (i, j)) == 0
assert AT('t', (a, b), (i, i)) == 0
assert AT('t', (a, b, c), (i, j)) == -AT('t', (b, a, c), (i, j))
assert AT('t', (a, b, c), (i, j, k)) == AT('t', (b, a, c), (i, k, j))
tabij = AT('t', (a, b), (i, j))
assert tabij.has(a)
assert tabij.has(b)
assert tabij.has(i)
assert tabij.has(j)
assert tabij.subs(b, c) == AT('t', (a, c), (i, j))
assert (2*tabij).subs(i, c) == 2*AT('t', (a, b), (c, j))
assert tabij.symbol == Symbol('t')
assert latex(tabij) == 't^{ab}_{ij}'
assert str(tabij) == 't((_a, _b),(_i, _j))'
assert AT('t', (a, a), (i, j)).subs(a, b) == AT('t', (b, b), (i, j))
assert AT('t', (a, i), (a, j)).subs(a, b) == AT('t', (b, i), (b, j))
def test_fully_contracted():
i, j, k, l = symbols('i j k l', below_fermi=True)
a, b, c, d = symbols('a b c d', above_fermi=True)
p, q, r, s = symbols('p q r s', cls=Dummy)
Fock = (AntiSymmetricTensor('f', (p,), (q,))*
NO(Fd(p)*F(q)))
V = (AntiSymmetricTensor('v', (p, q), (r, s))*
NO(Fd(p)*Fd(q)*F(s)*F(r)))/4
Fai = wicks(NO(Fd(i)*F(a))*Fock,
keep_only_fully_contracted=True,
simplify_kronecker_deltas=True)
assert Fai == AntiSymmetricTensor('f', (a,), (i,))
Vabij = wicks(NO(Fd(i)*Fd(j)*F(b)*F(a))*V,
keep_only_fully_contracted=True,
simplify_kronecker_deltas=True)
assert Vabij == AntiSymmetricTensor('v', (a, b), (i, j))
def test_substitute_dummies_without_dummies():
i, j = symbols('i,j')
assert substitute_dummies(att(i, j) + 2) == att(i, j) + 2
assert substitute_dummies(att(i, j) + 1) == att(i, j) + 1
def test_substitute_dummies_NO_operator():
i, j = symbols('i j', cls=Dummy)
assert substitute_dummies(att(i, j)*NO(Fd(i)*F(j))
- att(j, i)*NO(Fd(j)*F(i))) == 0
def test_substitute_dummies_SQ_operator():
i, j = symbols('i j', cls=Dummy)
assert substitute_dummies(att(i, j)*Fd(i)*F(j)
- att(j, i)*Fd(j)*F(i)) == 0
def test_substitute_dummies_new_indices():
i, j = symbols('i j', below_fermi=True, cls=Dummy)
a, b = symbols('a b', above_fermi=True, cls=Dummy)
p, q = symbols('p q', cls=Dummy)
f = Function('f')
assert substitute_dummies(f(i, a, p) - f(j, b, q), new_indices=True) == 0
def test_substitute_dummies_substitution_order():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
f = Function('f')
from sympy.utilities.iterables import variations
for permut in variations([i, j, k, l], 4):
assert substitute_dummies(f(*permut) - f(i, j, k, l)) == 0
def test_dummy_order_inner_outer_lines_VT1T1T1():
ii = symbols('i', below_fermi=True)
aa = symbols('a', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# Coupled-Cluster T1 terms with V*T1*T1*T1
# t^{a}_{k} t^{c}_{i} t^{d}_{l} v^{lk}_{dc}
exprs = [
# permut v and t <=> swapping internal lines, equivalent
# irrespective of symmetries in v
v(k, l, c, d)*t(c, ii)*t(d, l)*t(aa, k),
v(l, k, c, d)*t(c, ii)*t(d, k)*t(aa, l),
v(k, l, d, c)*t(d, ii)*t(c, l)*t(aa, k),
v(l, k, d, c)*t(d, ii)*t(c, k)*t(aa, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_dummy_order_inner_outer_lines_VT1T1T1T1():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# Coupled-Cluster T2 terms with V*T1*T1*T1*T1
exprs = [
# permut t <=> swapping external lines, not equivalent
# except if v has certain symmetries.
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(k, l, c, d)*t(c, jj)*t(d, ii)*t(aa, k)*t(bb, l),
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(bb, k)*t(aa, l),
v(k, l, c, d)*t(c, jj)*t(d, ii)*t(bb, k)*t(aa, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permut v <=> swapping external lines, not equivalent
# except if v has certain symmetries.
#
# Note that in contrast to above, these permutations have identical
# dummy order. That is because the proximity to external indices
# has higher influence on the canonical dummy ordering than the
# position of a dummy on the factors. In fact, the terms here are
# similar in structure as the result of the dummy substitutions above.
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(l, k, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(k, l, d, c)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(l, k, d, c)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) == dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permut t and v <=> swapping internal lines, equivalent.
# Canonical dummy order is different, and a consistent
# substitution reveals the equivalence.
v(k, l, c, d)*t(c, ii)*t(d, jj)*t(aa, k)*t(bb, l),
v(k, l, d, c)*t(c, jj)*t(d, ii)*t(aa, k)*t(bb, l),
v(l, k, c, d)*t(c, ii)*t(d, jj)*t(bb, k)*t(aa, l),
v(l, k, d, c)*t(c, jj)*t(d, ii)*t(bb, k)*t(aa, l),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_equivalent_internal_lines_VT1T1():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [ # permute v. Different dummy order. Not equivalent.
v(i, j, a, b)*t(a, i)*t(b, j),
v(j, i, a, b)*t(a, i)*t(b, j),
v(i, j, b, a)*t(a, i)*t(b, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v. Different dummy order. Equivalent
v(i, j, a, b)*t(a, i)*t(b, j),
v(j, i, b, a)*t(a, i)*t(b, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [ # permute t. Same dummy order, not equivalent.
v(i, j, a, b)*t(a, i)*t(b, j),
v(i, j, a, b)*t(b, i)*t(a, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) == dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Different dummy order, equivalent
v(i, j, a, b)*t(a, i)*t(b, j),
v(j, i, a, b)*t(a, j)*t(b, i),
v(i, j, b, a)*t(b, i)*t(a, j),
v(j, i, b, a)*t(b, j)*t(a, i),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_equivalent_internal_lines_VT2conjT2():
# this diagram requires special handling in TCE
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# v(abcd)t(abij)t(ijcd)
template = v(p1, p2, p3, p4)*t(p1, p2, i, j)*t(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
template = v(p1, p2, p3, p4)*t(p1, p2, j, i)*t(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
# v(abcd)t(abij)t(jicd)
template = v(p1, p2, p3, p4)*t(p1, p2, i, j)*t(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
template = v(p1, p2, p3, p4)*t(p1, p2, j, i)*t(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2conjT2_ambiguous_order():
# These diagrams invokes _determine_ambiguous() because the
# dummies can not be ordered unambiguously by the key alone
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
# v(abcd)t(abij)t(cdij)
template = v(p1, p2, p3, p4)*t(p1, p2, i, j)*t(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
template = v(p1, p2, p3, p4)*t(p1, p2, j, i)*t(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert dums(base) != dums(expr)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [
# permute v. Same dummy order, not equivalent.
#
# This test show that the dummy order may not be sensitive to all
# index permutations. The following expressions have identical
# structure as the resulting terms from of the dummy substitutions
# in the test above. Here, all expressions have the same dummy
# order, so they cannot be simplified by means of dummy
# substitution. In order to simplify further, it is necessary to
# exploit symmetries in the objects, for instance if t or v is
# antisymmetric.
v(i, j, a, b)*t(a, b, i, j),
v(j, i, a, b)*t(a, b, i, j),
v(i, j, b, a)*t(a, b, i, j),
v(j, i, b, a)*t(a, b, i, j),
]
for permut in exprs[1:]:
assert dums(exprs[0]) == dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permute t.
v(i, j, a, b)*t(a, b, i, j),
v(i, j, a, b)*t(b, a, i, j),
v(i, j, a, b)*t(a, b, j, i),
v(i, j, a, b)*t(b, a, j, i),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Relabelling of dummies should be equivalent.
v(i, j, a, b)*t(a, b, i, j),
v(j, i, a, b)*t(a, b, j, i),
v(i, j, b, a)*t(b, a, i, j),
v(j, i, b, a)*t(b, a, j, i),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_VT2T2():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [
v(k, l, c, d)*t(aa, c, ii, k)*t(bb, d, jj, l),
v(l, k, c, d)*t(aa, c, ii, l)*t(bb, d, jj, k),
v(k, l, d, c)*t(aa, d, ii, k)*t(bb, c, jj, l),
v(l, k, d, c)*t(aa, d, ii, l)*t(bb, c, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
v(k, l, c, d)*t(aa, c, ii, k)*t(d, bb, jj, l),
v(l, k, c, d)*t(aa, c, ii, l)*t(d, bb, jj, k),
v(k, l, d, c)*t(aa, d, ii, k)*t(c, bb, jj, l),
v(l, k, d, c)*t(aa, d, ii, l)*t(c, bb, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
v(k, l, c, d)*t(c, aa, ii, k)*t(bb, d, jj, l),
v(l, k, c, d)*t(c, aa, ii, l)*t(bb, d, jj, k),
v(k, l, d, c)*t(d, aa, ii, k)*t(bb, c, jj, l),
v(l, k, d, c)*t(d, aa, ii, l)*t(bb, c, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_pqrs():
ii, jj = symbols('i j')
aa, bb = symbols('a b')
k, l = symbols('k l', cls=Dummy)
c, d = symbols('c d', cls=Dummy)
v = Function('v')
t = Function('t')
dums = _get_ordered_dummies
exprs = [
v(k, l, c, d)*t(aa, c, ii, k)*t(bb, d, jj, l),
v(l, k, c, d)*t(aa, c, ii, l)*t(bb, d, jj, k),
v(k, l, d, c)*t(aa, d, ii, k)*t(bb, c, jj, l),
v(l, k, d, c)*t(aa, d, ii, l)*t(bb, c, jj, k),
]
for permut in exprs[1:]:
assert dums(exprs[0]) != dums(permut)
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_dummy_order_well_defined():
aa, bb = symbols('a b', above_fermi=True)
k, l, m = symbols('k l m', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
p, q = symbols('p q', cls=Dummy)
A = Function('A')
B = Function('B')
C = Function('C')
dums = _get_ordered_dummies
# We go through all key components in the order of increasing priority,
# and consider only fully orderable expressions. Non-orderable expressions
# are tested elsewhere.
# pos in first factor determines sort order
assert dums(A(k, l)*B(l, k)) == [k, l]
assert dums(A(l, k)*B(l, k)) == [l, k]
assert dums(A(k, l)*B(k, l)) == [k, l]
assert dums(A(l, k)*B(k, l)) == [l, k]
# factors involving the index
assert dums(A(k, l)*B(l, m)*C(k, m)) == [l, k, m]
assert dums(A(k, l)*B(l, m)*C(m, k)) == [l, k, m]
assert dums(A(l, k)*B(l, m)*C(k, m)) == [l, k, m]
assert dums(A(l, k)*B(l, m)*C(m, k)) == [l, k, m]
assert dums(A(k, l)*B(m, l)*C(k, m)) == [l, k, m]
assert dums(A(k, l)*B(m, l)*C(m, k)) == [l, k, m]
assert dums(A(l, k)*B(m, l)*C(k, m)) == [l, k, m]
assert dums(A(l, k)*B(m, l)*C(m, k)) == [l, k, m]
# same, but with factor order determined by non-dummies
assert dums(A(k, aa, l)*A(l, bb, m)*A(bb, k, m)) == [l, k, m]
assert dums(A(k, aa, l)*A(l, bb, m)*A(bb, m, k)) == [l, k, m]
assert dums(A(k, aa, l)*A(m, bb, l)*A(bb, k, m)) == [l, k, m]
assert dums(A(k, aa, l)*A(m, bb, l)*A(bb, m, k)) == [l, k, m]
assert dums(A(l, aa, k)*A(l, bb, m)*A(bb, k, m)) == [l, k, m]
assert dums(A(l, aa, k)*A(l, bb, m)*A(bb, m, k)) == [l, k, m]
assert dums(A(l, aa, k)*A(m, bb, l)*A(bb, k, m)) == [l, k, m]
assert dums(A(l, aa, k)*A(m, bb, l)*A(bb, m, k)) == [l, k, m]
# index range
assert dums(A(p, c, k)*B(p, c, k)) == [k, c, p]
assert dums(A(p, k, c)*B(p, c, k)) == [k, c, p]
assert dums(A(c, k, p)*B(p, c, k)) == [k, c, p]
assert dums(A(c, p, k)*B(p, c, k)) == [k, c, p]
assert dums(A(k, c, p)*B(p, c, k)) == [k, c, p]
assert dums(A(k, p, c)*B(p, c, k)) == [k, c, p]
assert dums(B(p, c, k)*A(p, c, k)) == [k, c, p]
assert dums(B(p, k, c)*A(p, c, k)) == [k, c, p]
assert dums(B(c, k, p)*A(p, c, k)) == [k, c, p]
assert dums(B(c, p, k)*A(p, c, k)) == [k, c, p]
assert dums(B(k, c, p)*A(p, c, k)) == [k, c, p]
assert dums(B(k, p, c)*A(p, c, k)) == [k, c, p]
def test_dummy_order_ambiguous():
aa, bb = symbols('a b', above_fermi=True)
i, j, k, l, m = symbols('i j k l m', below_fermi=True, cls=Dummy)
a, b, c, d, e = symbols('a b c d e', above_fermi=True, cls=Dummy)
p, q = symbols('p q', cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
p5, p6, p7, p8 = symbols('p5 p6 p7 p8', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
h5, h6, h7, h8 = symbols('h5 h6 h7 h8', below_fermi=True, cls=Dummy)
A = Function('A')
B = Function('B')
from sympy.utilities.iterables import variations
# A*A*A*A*B -- ordering of p5 and p4 is used to figure out the rest
template = A(p1, p2)*A(p4, p1)*A(p2, p3)*A(p3, p5)*B(p5, p4)
permutator = variations([a, b, c, d, e], 5)
base = template.subs(zip([p1, p2, p3, p4, p5], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4, p5], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
# A*A*A*A*A -- an arbitrary index is assigned and the rest are figured out
template = A(p1, p2)*A(p4, p1)*A(p2, p3)*A(p3, p5)*A(p5, p4)
permutator = variations([a, b, c, d, e], 5)
base = template.subs(zip([p1, p2, p3, p4, p5], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4, p5], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
# A*A*A -- ordering of p5 and p4 is used to figure out the rest
template = A(p1, p2, p4, p1)*A(p2, p3, p3, p5)*A(p5, p4)
permutator = variations([a, b, c, d, e], 5)
base = template.subs(zip([p1, p2, p3, p4, p5], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4, p5], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
def atv(*args):
return AntiSymmetricTensor('v', args[:2], args[2:] )
def att(*args):
if len(args) == 4:
return AntiSymmetricTensor('t', args[:2], args[2:] )
elif len(args) == 2:
return AntiSymmetricTensor('t', (args[0],), (args[1],))
def test_dummy_order_inner_outer_lines_VT1T1T1_AT():
ii = symbols('i', below_fermi=True)
aa = symbols('a', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
# Coupled-Cluster T1 terms with V*T1*T1*T1
# t^{a}_{k} t^{c}_{i} t^{d}_{l} v^{lk}_{dc}
exprs = [
# permut v and t <=> swapping internal lines, equivalent
# irrespective of symmetries in v
atv(k, l, c, d)*att(c, ii)*att(d, l)*att(aa, k),
atv(l, k, c, d)*att(c, ii)*att(d, k)*att(aa, l),
atv(k, l, d, c)*att(d, ii)*att(c, l)*att(aa, k),
atv(l, k, d, c)*att(d, ii)*att(c, k)*att(aa, l),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_dummy_order_inner_outer_lines_VT1T1T1T1_AT():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
# Coupled-Cluster T2 terms with V*T1*T1*T1*T1
# non-equivalent substitutions (change of sign)
exprs = [
# permut t <=> swapping external lines
atv(k, l, c, d)*att(c, ii)*att(d, jj)*att(aa, k)*att(bb, l),
atv(k, l, c, d)*att(c, jj)*att(d, ii)*att(aa, k)*att(bb, l),
atv(k, l, c, d)*att(c, ii)*att(d, jj)*att(bb, k)*att(aa, l),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == -substitute_dummies(permut)
# equivalent substitutions
exprs = [
atv(k, l, c, d)*att(c, ii)*att(d, jj)*att(aa, k)*att(bb, l),
# permut t <=> swapping external lines
atv(k, l, c, d)*att(c, jj)*att(d, ii)*att(bb, k)*att(aa, l),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_equivalent_internal_lines_VT1T1_AT():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
exprs = [ # permute v. Different dummy order. Not equivalent.
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(j, i, a, b)*att(a, i)*att(b, j),
atv(i, j, b, a)*att(a, i)*att(b, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v. Different dummy order. Equivalent
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(j, i, b, a)*att(a, i)*att(b, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [ # permute t. Same dummy order, not equivalent.
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(i, j, a, b)*att(b, i)*att(a, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Different dummy order, equivalent
atv(i, j, a, b)*att(a, i)*att(b, j),
atv(j, i, a, b)*att(a, j)*att(b, i),
atv(i, j, b, a)*att(b, i)*att(a, j),
atv(j, i, b, a)*att(b, j)*att(a, i),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_equivalent_internal_lines_VT2conjT2_AT():
# this diagram requires special handling in TCE
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
# atv(abcd)att(abij)att(ijcd)
template = atv(p1, p2, p3, p4)*att(p1, p2, i, j)*att(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
template = atv(p1, p2, p3, p4)*att(p1, p2, j, i)*att(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
# atv(abcd)att(abij)att(jicd)
template = atv(p1, p2, p3, p4)*att(p1, p2, i, j)*att(j, i, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
template = atv(p1, p2, p3, p4)*att(p1, p2, j, i)*att(i, j, p3, p4)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2conjT2_ambiguous_order_AT():
# These diagrams invokes _determine_ambiguous() because the
# dummies can not be ordered unambiguously by the key alone
i, j, k, l, m, n = symbols('i j k l m n', below_fermi=True, cls=Dummy)
a, b, c, d, e, f = symbols('a b c d e f', above_fermi=True, cls=Dummy)
p1, p2, p3, p4 = symbols('p1 p2 p3 p4', above_fermi=True, cls=Dummy)
h1, h2, h3, h4 = symbols('h1 h2 h3 h4', below_fermi=True, cls=Dummy)
from sympy.utilities.iterables import variations
# atv(abcd)att(abij)att(cdij)
template = atv(p1, p2, p3, p4)*att(p1, p2, i, j)*att(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
template = atv(p1, p2, p3, p4)*att(p1, p2, j, i)*att(p3, p4, i, j)
permutator = variations([a, b, c, d], 4)
base = template.subs(zip([p1, p2, p3, p4], next(permutator)))
for permut in permutator:
subslist = zip([p1, p2, p3, p4], permut)
expr = template.subs(subslist)
assert substitute_dummies(expr) == substitute_dummies(base)
def test_equivalent_internal_lines_VT2_AT():
i, j, k, l = symbols('i j k l', below_fermi=True, cls=Dummy)
a, b, c, d = symbols('a b c d', above_fermi=True, cls=Dummy)
exprs = [
# permute v. Same dummy order, not equivalent.
atv(i, j, a, b)*att(a, b, i, j),
atv(j, i, a, b)*att(a, b, i, j),
atv(i, j, b, a)*att(a, b, i, j),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [
# permute t.
atv(i, j, a, b)*att(a, b, i, j),
atv(i, j, a, b)*att(b, a, i, j),
atv(i, j, a, b)*att(a, b, j, i),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) != substitute_dummies(permut)
exprs = [ # permute v and t. Relabelling of dummies should be equivalent.
atv(i, j, a, b)*att(a, b, i, j),
atv(j, i, a, b)*att(a, b, j, i),
atv(i, j, b, a)*att(b, a, i, j),
atv(j, i, b, a)*att(b, a, j, i),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_VT2T2_AT():
ii, jj = symbols('i j', below_fermi=True)
aa, bb = symbols('a b', above_fermi=True)
k, l = symbols('k l', below_fermi=True, cls=Dummy)
c, d = symbols('c d', above_fermi=True, cls=Dummy)
exprs = [
atv(k, l, c, d)*att(aa, c, ii, k)*att(bb, d, jj, l),
atv(l, k, c, d)*att(aa, c, ii, l)*att(bb, d, jj, k),
atv(k, l, d, c)*att(aa, d, ii, k)*att(bb, c, jj, l),
atv(l, k, d, c)*att(aa, d, ii, l)*att(bb, c, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
atv(k, l, c, d)*att(aa, c, ii, k)*att(d, bb, jj, l),
atv(l, k, c, d)*att(aa, c, ii, l)*att(d, bb, jj, k),
atv(k, l, d, c)*att(aa, d, ii, k)*att(c, bb, jj, l),
atv(l, k, d, c)*att(aa, d, ii, l)*att(c, bb, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
exprs = [
atv(k, l, c, d)*att(c, aa, ii, k)*att(bb, d, jj, l),
atv(l, k, c, d)*att(c, aa, ii, l)*att(bb, d, jj, k),
atv(k, l, d, c)*att(d, aa, ii, k)*att(bb, c, jj, l),
atv(l, k, d, c)*att(d, aa, ii, l)*att(bb, c, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_internal_external_pqrs_AT():
ii, jj = symbols('i j')
aa, bb = symbols('a b')
k, l = symbols('k l', cls=Dummy)
c, d = symbols('c d', cls=Dummy)
exprs = [
atv(k, l, c, d)*att(aa, c, ii, k)*att(bb, d, jj, l),
atv(l, k, c, d)*att(aa, c, ii, l)*att(bb, d, jj, k),
atv(k, l, d, c)*att(aa, d, ii, k)*att(bb, c, jj, l),
atv(l, k, d, c)*att(aa, d, ii, l)*att(bb, c, jj, k),
]
for permut in exprs[1:]:
assert substitute_dummies(exprs[0]) == substitute_dummies(permut)
def test_canonical_ordering_AntiSymmetricTensor():
v = symbols("v")
virtual_indices = ('c', 'd')
occupied_indices = ('k', 'l')
c, d = symbols(('c','d'), above_fermi=True,
cls=Dummy)
k, l = symbols(('k','l'), below_fermi=True,
cls=Dummy)
# formerly, the left gave either the left or the right
assert AntiSymmetricTensor(v, (k, l), (d, c)
) == -AntiSymmetricTensor(v, (l, k), (d, c))
| 37.736555
| 111
| 0.559319
|
794e083870b7f3ce7ca36dfdc022c176f3d4d05d
| 1,424
|
py
|
Python
|
main2.py
|
rivinduchamath/Open_CV-IM
|
4082d24de659d672d9f90a6a3a2dbd942d183781
|
[
"MIT"
] | null | null | null |
main2.py
|
rivinduchamath/Open_CV-IM
|
4082d24de659d672d9f90a6a3a2dbd942d183781
|
[
"MIT"
] | 1
|
2022-03-24T07:07:25.000Z
|
2022-03-24T07:07:25.000Z
|
main2.py
|
rivinduchamath/Python_Open_CV
|
4082d24de659d672d9f90a6a3a2dbd942d183781
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
# Image negative
img = cv2.imread('"C:/Users/wogza/Downloads/New folder (2)/A/rectangle.png"', 0)
m, n = img.shape
# To find the maximum grey level
# value in the image
L = img.max()
# Maximum grey level value minus
# the original image gives the
# negative image
img_neg = L - img
# convert the np array img_neg to
# a png image
cv2.imwrite('"C:/Users/wogza/Downloads/New folder (2)/A/rectangle.png"', img_neg)
# Thresholding without background
# Let threshold =T
# Let pixel value in the original be denoted by r
# Let pixel value in the new image be denoted by s
# If r<T, s= 0
# If r>T, s=255
T = 150
# create a array of zeros
img_thresh = np.zeros((m, n), dtype=int)
for i in range(m):
for j in range(n):
if img[i, j] < T:
img_thresh[i, j] = 0
else:
img_thresh[i, j] = 255
# Convert array to png image
cv2.imwrite('"C:/Users/wogza/Downloads/New folder (2)/A/rectangle.png"', img_thresh)
# the lower threshold value
T1 = 100
# the upper threshold value
T2 = 180
# create a array of zeros
img_thresh_back = np.zeros((m, n), dtype=int)
for i in range(m):
for j in range(n):
if T1 < img[i, j] < T2:
img_thresh_back[i, j] = 255
else:
img_thresh_back[i, j] = img[i, j]
# Convert array to png image
cv2.imwrite('"C:/Users/wogza/Downloads/New folder (2)/A/rectangle.png"', img_thresh_back)
| 21.575758
| 89
| 0.650281
|
794e08731eb41ea2ae1268ed0382c9cf0d06977f
| 3,908
|
py
|
Python
|
multiagent/scenarios/simple_spread_v2.py
|
beipeng/multiagent-particle-envs
|
68d605ee3d649e1b1d6d8564ab1ff89cb1ad68d6
|
[
"MIT"
] | null | null | null |
multiagent/scenarios/simple_spread_v2.py
|
beipeng/multiagent-particle-envs
|
68d605ee3d649e1b1d6d8564ab1ff89cb1ad68d6
|
[
"MIT"
] | null | null | null |
multiagent/scenarios/simple_spread_v2.py
|
beipeng/multiagent-particle-envs
|
68d605ee3d649e1b1d6d8564ab1ff89cb1ad68d6
|
[
"MIT"
] | 1
|
2019-07-23T08:44:49.000Z
|
2019-07-23T08:44:49.000Z
|
import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self):
world = World()
# set any world properties first
world.dim_c = 2
num_agents = 4
num_landmarks = 4
world.collaborative = True
# add agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
agent.size = 0.15
# add landmarks
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# random properties for agents
for i, agent in enumerate(world.agents):
agent.color = np.array([0.35, 0.35, 0.85])
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
landmark.color = np.array([0.25, 0.25, 0.25])
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
rew = 0
collisions = 0
occupied_landmarks = 0
min_dists = 0
for l in world.landmarks:
dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
min_dists += min(dists)
rew -= min(dists)
if min(dists) < 0.1:
occupied_landmarks += 1
if agent.collide:
for a in world.agents:
if self.is_collision(a, agent):
rew -= 1
collisions += 1
return (rew, collisions, min_dists, occupied_landmarks)
def is_collision(self, agent1, agent2):
delta_pos = agent1.state.p_pos - agent2.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = agent1.size + agent2.size
return True if dist < dist_min else False
def reward(self, agent, world):
# Agents are rewarded based on minimum agent distance to each landmark, penalized for collisions
rew = 0
for l in world.landmarks:
dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
rew -= min(dists)
if agent.collide:
for a in world.agents:
if self.is_collision(a, agent):
rew -= 1
return rew
def observation(self, agent, world):
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks: # world.entities:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
# entity colors
entity_color = []
for entity in world.landmarks: # world.entities:
entity_color.append(entity.color)
# communication of all other agents
comm = []
other_pos = []
for other in world.agents:
if other is agent: continue
comm.append(other.state.c)
other_pos.append(other.state.p_pos - agent.state.p_pos)
return np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos + comm)
| 38.693069
| 104
| 0.587001
|
794e08d77d7616a1cc0ab5b2c04fa46cca659450
| 37
|
py
|
Python
|
hyperpoints/ppo/__init__.py
|
yngtodd/hyperpoints
|
7e444fa547db55114314bd481373b5f4a5dbca71
|
[
"MIT"
] | 1
|
2020-05-11T00:53:53.000Z
|
2020-05-11T00:53:53.000Z
|
hyperpoints/ppo/__init__.py
|
yngtodd/hyperpoints
|
7e444fa547db55114314bd481373b5f4a5dbca71
|
[
"MIT"
] | null | null | null |
hyperpoints/ppo/__init__.py
|
yngtodd/hyperpoints
|
7e444fa547db55114314bd481373b5f4a5dbca71
|
[
"MIT"
] | null | null | null |
from .objective import ppo_objective
| 18.5
| 36
| 0.864865
|
794e09060340ca9169c3390375d652995d9ebf9a
| 68,971
|
py
|
Python
|
numba/parfors/parfor_lowering.py
|
amosbird/numba
|
f9f4c88c3e5967c3316f9298ac21f795d3f40b96
|
[
"BSD-2-Clause"
] | null | null | null |
numba/parfors/parfor_lowering.py
|
amosbird/numba
|
f9f4c88c3e5967c3316f9298ac21f795d3f40b96
|
[
"BSD-2-Clause"
] | null | null | null |
numba/parfors/parfor_lowering.py
|
amosbird/numba
|
f9f4c88c3e5967c3316f9298ac21f795d3f40b96
|
[
"BSD-2-Clause"
] | null | null | null |
import ast
import copy
from collections import OrderedDict
import linecache
import os
import sys
import operator
import numpy as np
import types as pytypes
import operator
import warnings
import llvmlite.llvmpy.core as lc
import llvmlite.ir.values as liv
import numba
from numba.parfors import parfor
from numba.core import types, ir, config, compiler, lowering, sigutils, cgutils
from numba.core.ir_utils import add_offset_to_labels, replace_var_names, remove_dels, legalize_names, mk_unique_var, rename_labels, get_name_var_table, visit_vars_inner, get_definition, guard, find_callname, get_call_table, is_pure, get_np_ufunc_typ, get_unused_var_name, find_potential_aliases, is_const_call
from numba.core.analysis import compute_use_defs, compute_live_map, compute_dead_maps, compute_cfg_from_blocks
from numba.core.typing import signature
from numba.parfors.parfor import print_wrapped, ensure_parallel_support
from numba.core.errors import NumbaParallelSafetyWarning, NotDefinedError
from numba.parfors.parfor_lowering_utils import ParforLoweringBuilder
def _lower_parfor_parallel(lowerer, parfor):
"""Lowerer that handles LLVM code generation for parfor.
This function lowers a parfor IR node to LLVM.
The general approach is as follows:
1) The code from the parfor's init block is lowered normally
in the context of the current function.
2) The body of the parfor is transformed into a gufunc function.
3) Code is inserted into the main function that calls do_scheduling
to divide the iteration space for each thread, allocates
reduction arrays, calls the gufunc function, and then invokes
the reduction function across the reduction arrays to produce
the final reduction values.
"""
from numba.np.ufunc.parallel import get_thread_count
ensure_parallel_support()
typingctx = lowerer.context.typing_context
targetctx = lowerer.context
# We copy the typemap here because for race condition variable we'll
# update their type to array so they can be updated by the gufunc.
orig_typemap = lowerer.fndesc.typemap
# replace original typemap with copy and restore the original at the end.
lowerer.fndesc.typemap = copy.copy(orig_typemap)
typemap = lowerer.fndesc.typemap
varmap = lowerer.varmap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel")
parfor.dump()
loc = parfor.init_block.loc
scope = parfor.init_block.scope
# produce instructions for init_block
if config.DEBUG_ARRAY_OPT:
print("init_block = ", parfor.init_block, " ", type(parfor.init_block))
for instr in parfor.init_block.body:
if config.DEBUG_ARRAY_OPT:
print("lower init_block instr = ", instr)
lowerer.lower_inst(instr)
for racevar in parfor.races:
if racevar not in varmap:
rvtyp = typemap[racevar]
rv = ir.Var(scope, racevar, loc)
lowerer._alloca_var(rv.name, rvtyp)
alias_map = {}
arg_aliases = {}
numba.parfors.parfor.find_potential_aliases_parfor(parfor, parfor.params, typemap,
lowerer.func_ir, alias_map, arg_aliases)
if config.DEBUG_ARRAY_OPT:
print("alias_map", alias_map)
print("arg_aliases", arg_aliases)
# run get_parfor_outputs() and get_parfor_reductions() before gufunc creation
# since Jumps are modified so CFG of loop_body dict will become invalid
assert parfor.params is not None
parfor_output_arrays = numba.parfors.parfor.get_parfor_outputs(
parfor, parfor.params)
parfor_redvars, parfor_reddict = numba.parfors.parfor.get_parfor_reductions(
lowerer.func_ir, parfor, parfor.params, lowerer.fndesc.calltypes)
# init reduction array allocation here.
nredvars = len(parfor_redvars)
redarrs = {}
if nredvars > 0:
# reduction arrays outer dimension equal to thread count
thread_count = get_thread_count()
scope = parfor.init_block.scope
loc = parfor.init_block.loc
pfbdr = ParforLoweringBuilder(lowerer=lowerer, scope=scope, loc=loc)
# For each reduction variable...
for i in range(nredvars):
redvar_typ = lowerer.fndesc.typemap[parfor_redvars[i]]
redvar = ir.Var(scope, parfor_redvars[i], loc)
redarrvar_typ = redtyp_to_redarraytype(redvar_typ)
reddtype = redarrvar_typ.dtype
if config.DEBUG_ARRAY_OPT:
print("redvar_typ", redvar_typ, redarrvar_typ, reddtype, types.DType(reddtype))
# If this is reduction over an array,
# the reduction array has just one added per-worker dimension.
if isinstance(redvar_typ, types.npytypes.Array):
redarrdim = redvar_typ.ndim + 1
else:
redarrdim = 1
# Reduction array is created and initialized to the initial reduction value.
# First create a var for the numpy empty ufunc.
glbl_np_empty = pfbdr.bind_global_function(
fobj=np.empty,
ftype=get_np_ufunc_typ(np.empty),
args=(
types.UniTuple(types.intp, redarrdim),
types.DType(reddtype),
),
)
# Create var for outer dimension size of reduction array equal to number of threads.
num_threads_var = pfbdr.make_const_variable(
cval=thread_count,
typ=types.intp,
name='num_threads',
)
size_var_list = [num_threads_var]
# If this is a reduction over an array...
if isinstance(redvar_typ, types.npytypes.Array):
# Add code to get the shape of the array being reduced over.
redshape_var = pfbdr.assign(
rhs=ir.Expr.getattr(redvar, "shape", loc),
typ=types.UniTuple(types.intp, redvar_typ.ndim),
name="redarr_shape",
)
# Add the dimension sizes of the array being reduced over to the tuple of sizes pass to empty.
for j in range(redvar_typ.ndim):
onedimvar = pfbdr.assign(
rhs=ir.Expr.static_getitem(redshape_var, j, None, loc),
typ=types.intp,
name="redshapeonedim",
)
size_var_list.append(onedimvar)
# Empty call takes tuple of sizes. Create here and fill in outer dimension (num threads).
size_var = pfbdr.make_tuple_variable(
size_var_list, name='tuple_size_var',
)
# Add call to empty passing the size var tuple.
empty_call = pfbdr.call(glbl_np_empty, args=[size_var])
redarr_var = pfbdr.assign(
rhs=empty_call, typ=redarrvar_typ, name="redarr",
)
# Remember mapping of original reduction array to the newly created per-worker reduction array.
redarrs[redvar.name] = redarr_var
init_val = parfor_reddict[parfor_redvars[i]][0]
if init_val is not None:
if isinstance(redvar_typ, types.npytypes.Array):
# Create an array of identity values for the reduction.
# First, create a variable for np.full.
full_func_node = pfbdr.bind_global_function(
fobj=np.full,
ftype=get_np_ufunc_typ(np.full),
args=(
types.UniTuple(types.intp, redvar_typ.ndim),
reddtype,
types.DType(reddtype),
),
)
# Then create a var with the identify value.
init_val_var = pfbdr.make_const_variable(
cval=init_val,
typ=reddtype,
name="init_val",
)
# Then, call np.full with the shape of the reduction array and the identity value.
full_call = pfbdr.call(
full_func_node, args=[redshape_var, init_val_var],
)
redtoset = pfbdr.assign(
rhs=full_call,
typ=redvar_typ,
name="redtoset",
)
else:
redtoset = pfbdr.make_const_variable(
cval=init_val,
typ=reddtype,
name="redtoset",
)
else:
redtoset = redvar
# For each thread, initialize the per-worker reduction array to the current reduction array value.
for j in range(thread_count):
index_var = pfbdr.make_const_variable(
cval=j, typ=types.uintp, name="index_var",
)
pfbdr.setitem(obj=redarr_var, index=index_var, val=redtoset)
# compile parfor body as a separate function to be used with GUFuncWrapper
flags = copy.copy(parfor.flags)
flags.set('error_model', 'numpy')
# Can't get here unless flags.set('auto_parallel', ParallelOptions(True))
index_var_typ = typemap[parfor.loop_nests[0].index_variable.name]
# index variables should have the same type, check rest of indices
for l in parfor.loop_nests[1:]:
assert typemap[l.index_variable.name] == index_var_typ
numba.parfors.parfor.sequential_parfor_lowering = True
try:
func, func_args, func_sig, redargstartdim, func_arg_types = _create_gufunc_for_parfor_body(
lowerer, parfor, typemap, typingctx, targetctx, flags, {},
bool(alias_map), index_var_typ, parfor.races)
finally:
numba.parfors.parfor.sequential_parfor_lowering = False
# get the shape signature
func_args = ['sched'] + func_args
num_reductions = len(parfor_redvars)
num_inputs = len(func_args) - len(parfor_output_arrays) - num_reductions
if config.DEBUG_ARRAY_OPT:
print("func_args = ", func_args)
print("num_inputs = ", num_inputs)
print("parfor_outputs = ", parfor_output_arrays)
print("parfor_redvars = ", parfor_redvars)
print("num_reductions = ", num_reductions)
gu_signature = _create_shape_signature(
parfor.get_shape_classes,
num_inputs,
num_reductions,
func_args,
redargstartdim,
func_sig,
parfor.races,
typemap)
if config.DEBUG_ARRAY_OPT:
print("gu_signature = ", gu_signature)
# call the func in parallel by wrapping it with ParallelGUFuncBuilder
loop_ranges = [(l.start, l.stop, l.step) for l in parfor.loop_nests]
if config.DEBUG_ARRAY_OPT:
print("loop_nests = ", parfor.loop_nests)
print("loop_ranges = ", loop_ranges)
call_parallel_gufunc(
lowerer,
func,
gu_signature,
func_sig,
func_args,
func_arg_types,
loop_ranges,
parfor_redvars,
parfor_reddict,
redarrs,
parfor.init_block,
index_var_typ,
parfor.races)
if config.DEBUG_ARRAY_OPT:
sys.stdout.flush()
if nredvars > 0:
# Perform the final reduction across the reduction array created above.
thread_count = get_thread_count()
scope = parfor.init_block.scope
loc = parfor.init_block.loc
# For each reduction variable...
for i in range(nredvars):
name = parfor_redvars[i]
redarr = redarrs[name]
redvar_typ = lowerer.fndesc.typemap[name]
if config.DEBUG_ARRAY_OPT:
print("post-gufunc reduction:", name, redarr, redvar_typ)
if config.DEBUG_ARRAY_OPT_RUNTIME:
res_print_str = "res_print"
strconsttyp = types.StringLiteral(res_print_str)
lhs = pfbldr.make_const_variable(
cval=res_print_str,
typ=strconsttyp,
name="str_const",
)
res_print = ir.Print(args=[lhs, redarr], vararg=None, loc=loc)
lowerer.fndesc.calltypes[res_print] = signature(types.none,
typemap[lhs.name],
typemap[redarr.name])
print("res_print", res_print)
lowerer.lower_inst(res_print)
# For each element in the reduction array created above.
for j in range(thread_count):
# Create index var to access that element.
index_var = pfbdr.make_const_variable(
cval=j, typ=types.uintp, name="index_var",
)
# Read that element from the array into oneelem.
oneelemgetitem = pfbdr.getitem(
obj=redarr, index=index_var, typ=redvar_typ,
)
oneelem = pfbdr.assign(
rhs=oneelemgetitem,
typ=redvar_typ,
name="redelem",
)
init_var = pfbdr.assign_inplace(
rhs=oneelem, typ=redvar_typ, name=name + "#init",
)
if config.DEBUG_ARRAY_OPT_RUNTIME:
res_print_str = "res_print1 for thread " + str(j) + ":"
strconsttyp = types.StringLiteral(res_print_str)
lhs = pfbdr.make_const_variable(
cval=res_print_str,
typ=strconsttyp,
name="str_const",
)
res_print = ir.Print(args=[lhs, index_var, oneelem, init_var, ir.Var(scope, name, loc)],
vararg=None, loc=loc)
lowerer.fndesc.calltypes[res_print] = signature(types.none,
typemap[lhs.name],
typemap[index_var.name],
typemap[oneelem.name],
typemap[init_var.name],
typemap[name])
print("res_print1", res_print)
lowerer.lower_inst(res_print)
# generate code for combining reduction variable with thread output
for inst in parfor_reddict[name][1]:
# If we have a case where a parfor body has an array reduction like A += B
# and A and B have different data types then the reduction in the parallel
# region will operate on those differeing types. However, here, after the
# parallel region, we are summing across the reduction array and that is
# guaranteed to have the same data type so we need to change the reduction
# nodes so that the right-hand sides have a type equal to the reduction-type
# and therefore the left-hand side.
if isinstance(inst, ir.Assign):
rhs = inst.value
# We probably need to generalize this since it only does substitutions in
# inplace_binops.
if (isinstance(rhs, ir.Expr) and rhs.op == 'inplace_binop' and
rhs.rhs.name == init_var.name):
if config.DEBUG_ARRAY_OPT:
print("Adding call to reduction", rhs)
if rhs.fn == operator.isub:
rhs.fn = operator.iadd
rhs.immutable_fn = operator.add
if rhs.fn == operator.itruediv or rhs.fn == operator.ifloordiv:
rhs.fn = operator.imul
rhs.immutable_fn = operator.mul
if config.DEBUG_ARRAY_OPT:
print("After changing sub to add or div to mul", rhs)
# Get calltype of rhs.
ct = lowerer.fndesc.calltypes[rhs]
assert(len(ct.args) == 2)
# Create new arg types replace the second arg type with the reduction var type.
ctargs = (ct.args[0], redvar_typ)
# Update the signature of the call.
ct = ct.replace(args=ctargs)
# Remove so we can re-insert since calltypes is unique dict.
lowerer.fndesc.calltypes.pop(rhs)
# Add calltype back in for the expr with updated signature.
lowerer.fndesc.calltypes[rhs] = ct
lowerer.lower_inst(inst)
# Only process reduction statements post-gufunc execution
# until we see an assignment with a left-hand side to the
# reduction variable's name. This fixes problems with
# cases where there are multiple assignments to the
# reduction variable in the parfor.
if isinstance(inst, ir.Assign):
try:
reduction_var = scope.get_exact(name)
except NotDefinedError:
# Ideally, this shouldn't happen. The redvar name
# missing from scope indicates an error from
# other rewrite passes.
is_same_source_var = name == inst.target.name
else:
# Because of SSA, the redvar and target var of
# the current assignment would be different even
# though they refer to the same source-level var.
redvar_unver_name = reduction_var.unversioned_name
target_unver_name = inst.target.unversioned_name
is_same_source_var = redvar_unver_name == target_unver_name
if is_same_source_var:
# If redvar is different from target var, add an
# assignment to put target var into redvar.
if name != inst.target.name:
pfbdr.assign_inplace(
rhs=inst.target, typ=redvar_typ,
name=name,
)
break
if config.DEBUG_ARRAY_OPT_RUNTIME:
res_print_str = "res_print2 for thread " + str(j) + ":"
strconsttyp = types.StringLiteral(res_print_str)
lhs = pfbdr.make_const_variable(
cval=res_print_str,
typ=strconsttyp,
name="str_const",
)
res_print = ir.Print(args=[lhs, index_var, oneelem, init_var, ir.Var(scope, name, loc)],
vararg=None, loc=loc)
lowerer.fndesc.calltypes[res_print] = signature(types.none,
typemap[lhs.name],
typemap[index_var.name],
typemap[oneelem.name],
typemap[init_var.name],
typemap[name])
print("res_print2", res_print)
lowerer.lower_inst(res_print)
# Cleanup reduction variable
for v in redarrs.values():
lowerer.lower_inst(ir.Del(v.name, loc=loc))
# Restore the original typemap of the function that was replaced temporarily at the
# Beginning of this function.
lowerer.fndesc.typemap = orig_typemap
if config.DEBUG_ARRAY_OPT:
print("_lower_parfor_parallel done")
# A work-around to prevent circular imports
lowering.lower_extensions[parfor.Parfor] = _lower_parfor_parallel
def _create_shape_signature(
get_shape_classes,
num_inputs,
num_reductions,
args,
redargstartdim,
func_sig,
races,
typemap):
'''Create shape signature for GUFunc
'''
if config.DEBUG_ARRAY_OPT:
print("_create_shape_signature", num_inputs, num_reductions, args, redargstartdim)
for i in args[1:]:
print("argument", i, type(i), get_shape_classes(i, typemap=typemap))
num_inouts = len(args) - num_reductions
# maximum class number for array shapes
classes = [get_shape_classes(var, typemap=typemap) if var not in races else (-1,) for var in args[1:]]
class_set = set()
for _class in classes:
if _class:
for i in _class:
class_set.add(i)
max_class = max(class_set) + 1 if class_set else 0
classes.insert(0, (max_class,)) # force set the class of 'sched' argument
class_set.add(max_class)
class_map = {}
# TODO: use prefix + class number instead of single char
alphabet = ord('a')
for n in class_set:
if n >= 0:
class_map[n] = chr(alphabet)
alphabet += 1
alpha_dict = {'latest_alpha' : alphabet}
def bump_alpha(c, class_map):
if c >= 0:
return class_map[c]
else:
alpha_dict['latest_alpha'] += 1
return chr(alpha_dict['latest_alpha'])
gu_sin = []
gu_sout = []
count = 0
syms_sin = ()
if config.DEBUG_ARRAY_OPT:
print("args", args)
print("classes", classes)
for cls, arg in zip(classes, args):
count = count + 1
if cls:
dim_syms = tuple(bump_alpha(c, class_map) for c in cls)
else:
dim_syms = ()
if (count > num_inouts):
# Strip the first symbol corresponding to the number of workers
# so that guvectorize will parallelize across the reduction.
gu_sin.append(dim_syms[redargstartdim[arg]:])
else:
gu_sin.append(dim_syms)
syms_sin += dim_syms
return (gu_sin, gu_sout)
def _print_block(block):
for i, inst in enumerate(block.body):
print(" ", i, " ", inst)
def _print_body(body_dict):
'''Pretty-print a set of IR blocks.
'''
for label, block in body_dict.items():
print("label: ", label)
_print_block(block)
def wrap_loop_body(loop_body):
blocks = loop_body.copy() # shallow copy is enough
first_label = min(blocks.keys())
last_label = max(blocks.keys())
loc = blocks[last_label].loc
blocks[last_label].body.append(ir.Jump(first_label, loc))
return blocks
def unwrap_loop_body(loop_body):
last_label = max(loop_body.keys())
loop_body[last_label].body = loop_body[last_label].body[:-1]
def add_to_def_once_sets(a_def, def_once, def_more):
'''If the variable is already defined more than once, do nothing.
Else if defined exactly once previously then transition this
variable to the defined more than once set (remove it from
def_once set and add to def_more set).
Else this must be the first time we've seen this variable defined
so add to def_once set.
'''
if a_def in def_more:
pass
elif a_def in def_once:
def_more.add(a_def)
def_once.remove(a_def)
else:
def_once.add(a_def)
def compute_def_once_block(block, def_once, def_more, getattr_taken, typemap, module_assigns):
'''Effect changes to the set of variables defined once or more than once
for a single block.
block - the block to process
def_once - set of variable names known to be defined exactly once
def_more - set of variable names known to be defined more than once
getattr_taken - dict mapping variable name to tuple of object and attribute taken
module_assigns - dict mapping variable name to the Global that they came from
'''
# The only "defs" occur in assignments, so find such instructions.
assignments = block.find_insts(ir.Assign)
# For each assignment...
for one_assign in assignments:
# Get the LHS/target of the assignment.
a_def = one_assign.target.name
# Add variable to def sets.
add_to_def_once_sets(a_def, def_once, def_more)
rhs = one_assign.value
if isinstance(rhs, ir.Global):
# Remember assignments of the form "a = Global(...)"
# Is this a module?
if isinstance(rhs.value, pytypes.ModuleType):
module_assigns[a_def] = rhs.value.__name__
if isinstance(rhs, ir.Expr) and rhs.op == 'getattr' and rhs.value.name in def_once:
# Remember assignments of the form "a = b.c"
getattr_taken[a_def] = (rhs.value.name, rhs.attr)
if isinstance(rhs, ir.Expr) and rhs.op == 'call' and rhs.func.name in getattr_taken:
# If "a" is being called then lookup the getattr definition of "a"
# as above, getting the module variable "b" (base_obj)
# and the attribute "c" (base_attr).
base_obj, base_attr = getattr_taken[rhs.func.name]
if base_obj in module_assigns:
# If we know the definition of the module variable then get the module
# name from module_assigns.
base_mod_name = module_assigns[base_obj]
if not is_const_call(base_mod_name, base_attr):
# Calling a method on an object could modify the object and is thus
# like a def of that object. We call is_const_call to see if this module/attribute
# combination is known to not modify the module state. If we don't know that
# the combination is safe then we have to assume there could be a modification to
# the module and thus add the module variable as defined more than once.
add_to_def_once_sets(base_obj, def_once, def_more)
else:
# Assume the worst and say that base_obj could be modified by the call.
add_to_def_once_sets(base_obj, def_once, def_more)
if isinstance(rhs, ir.Expr) and rhs.op == 'call':
# If a mutable object is passed to a function, then it may be changed and
# therefore can't be hoisted.
# For each argument to the function...
for argvar in rhs.args:
# Get the argument's type.
if isinstance(argvar, ir.Var):
argvar = argvar.name
avtype = typemap[argvar]
# If that type doesn't have a mutable attribute or it does and it's set to
# not mutable then this usage is safe for hoisting.
if getattr(avtype, 'mutable', False):
# Here we have a mutable variable passed to a function so add this variable
# to the def lists.
add_to_def_once_sets(argvar, def_once, def_more)
def compute_def_once_internal(loop_body, def_once, def_more, getattr_taken, typemap, module_assigns):
'''Compute the set of variables defined exactly once in the given set of blocks
and use the given sets for storing which variables are defined once, more than
once and which have had a getattr call on them.
'''
# For each block...
for label, block in loop_body.items():
# Scan this block and effect changes to def_once, def_more, and getattr_taken
# based on the instructions in that block.
compute_def_once_block(block, def_once, def_more, getattr_taken, typemap, module_assigns)
# Have to recursively process parfors manually here.
for inst in block.body:
if isinstance(inst, parfor.Parfor):
# Recursively compute for the parfor's init block.
compute_def_once_block(inst.init_block, def_once, def_more, getattr_taken, typemap, module_assigns)
# Recursively compute for the parfor's loop body.
compute_def_once_internal(inst.loop_body, def_once, def_more, getattr_taken, typemap, module_assigns)
def compute_def_once(loop_body, typemap):
'''Compute the set of variables defined exactly once in the given set of blocks.
'''
def_once = set() # set to hold variables defined exactly once
def_more = set() # set to hold variables defined more than once
getattr_taken = {}
module_assigns = {}
compute_def_once_internal(loop_body, def_once, def_more, getattr_taken, typemap, module_assigns)
return def_once
def find_vars(var, varset):
assert isinstance(var, ir.Var)
varset.add(var.name)
return var
def _hoist_internal(inst, dep_on_param, call_table, hoisted, not_hoisted,
typemap, stored_arrays):
if inst.target.name in stored_arrays:
not_hoisted.append((inst, "stored array"))
if config.DEBUG_ARRAY_OPT >= 1:
print("Instruction", inst, " could not be hoisted because the created array is stored.")
return False
uses = set()
visit_vars_inner(inst.value, find_vars, uses)
diff = uses.difference(dep_on_param)
if config.DEBUG_ARRAY_OPT >= 1:
print("_hoist_internal:", inst, "uses:", uses, "diff:", diff)
if len(diff) == 0 and is_pure(inst.value, None, call_table):
if config.DEBUG_ARRAY_OPT >= 1:
print("Will hoist instruction", inst, typemap[inst.target.name])
hoisted.append(inst)
if not isinstance(typemap[inst.target.name], types.npytypes.Array):
dep_on_param += [inst.target.name]
return True
else:
if len(diff) > 0:
not_hoisted.append((inst, "dependency"))
if config.DEBUG_ARRAY_OPT >= 1:
print("Instruction", inst, " could not be hoisted because of a dependency.")
else:
not_hoisted.append((inst, "not pure"))
if config.DEBUG_ARRAY_OPT >= 1:
print("Instruction", inst, " could not be hoisted because it isn't pure.")
return False
def find_setitems_block(setitems, itemsset, block, typemap):
for inst in block.body:
if isinstance(inst, ir.StaticSetItem) or isinstance(inst, ir.SetItem):
setitems.add(inst.target.name)
# If we store a non-mutable object into an array then that is safe to hoist.
# If the stored object is mutable and you hoist then multiple entries in the
# outer array could reference the same object and changing one index would then
# change other indices.
if getattr(typemap[inst.value.name], "mutable", False):
itemsset.add(inst.value.name)
elif isinstance(inst, parfor.Parfor):
find_setitems_block(setitems, itemsset, inst.init_block, typemap)
find_setitems_body(setitems, itemsset, inst.loop_body, typemap)
def find_setitems_body(setitems, itemsset, loop_body, typemap):
"""
Find the arrays that are written into (goes into setitems) and the
mutable objects (mostly arrays) that are written into other arrays
(goes into itemsset).
"""
for label, block in loop_body.items():
find_setitems_block(setitems, itemsset, block, typemap)
def hoist(parfor_params, loop_body, typemap, wrapped_blocks):
dep_on_param = copy.copy(parfor_params)
hoisted = []
not_hoisted = []
# Compute the set of variable defined exactly once in the loop body.
def_once = compute_def_once(loop_body, typemap)
(call_table, reverse_call_table) = get_call_table(wrapped_blocks)
setitems = set()
itemsset = set()
find_setitems_body(setitems, itemsset, loop_body, typemap)
dep_on_param = list(set(dep_on_param).difference(setitems))
if config.DEBUG_ARRAY_OPT >= 1:
print("hoist - def_once:", def_once, "setitems:", setitems, "itemsset:", itemsset, "dep_on_param:", dep_on_param, "parfor_params:", parfor_params)
for label, block in loop_body.items():
new_block = []
for inst in block.body:
if isinstance(inst, ir.Assign) and inst.target.name in def_once:
if _hoist_internal(inst, dep_on_param, call_table,
hoisted, not_hoisted, typemap, itemsset):
# don't add this instruction to the block since it is
# hoisted
continue
elif isinstance(inst, parfor.Parfor):
new_init_block = []
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor")
inst.dump()
for ib_inst in inst.init_block.body:
if (isinstance(ib_inst, ir.Assign) and
ib_inst.target.name in def_once):
if _hoist_internal(ib_inst, dep_on_param, call_table,
hoisted, not_hoisted, typemap, itemsset):
# don't add this instuction to the block since it is hoisted
continue
new_init_block.append(ib_inst)
inst.init_block.body = new_init_block
new_block.append(inst)
block.body = new_block
return hoisted, not_hoisted
def redtyp_is_scalar(redtype):
return not isinstance(redtype, types.npytypes.Array)
def redtyp_to_redarraytype(redtyp):
"""Go from a reducation variable type to a reduction array type used to hold
per-worker results.
"""
redarrdim = 1
# If the reduction type is an array then allocate reduction array with ndim+1 dimensions.
if isinstance(redtyp, types.npytypes.Array):
redarrdim += redtyp.ndim
# We don't create array of array but multi-dimensional reduciton array with same dtype.
redtyp = redtyp.dtype
return types.npytypes.Array(redtyp, redarrdim, "C")
def redarraytype_to_sig(redarraytyp):
"""Given a reduction array type, find the type of the reduction argument to the gufunc.
Scalar and 1D array reduction both end up with 1D gufunc param type since scalars have to
be passed as arrays.
"""
assert isinstance(redarraytyp, types.npytypes.Array)
return types.npytypes.Array(redarraytyp.dtype, max(1, redarraytyp.ndim - 1), redarraytyp.layout)
def legalize_names_with_typemap(names, typemap):
""" We use ir_utils.legalize_names to replace internal IR variable names
containing illegal characters (e.g. period) with a legal character
(underscore) so as to create legal variable names.
The original variable names are in the typemap so we also
need to add the legalized name to the typemap as well.
"""
outdict = legalize_names(names)
# For each pair in the dict of legalized names...
for x, y in outdict.items():
# If the name had some legalization change to it...
if x != y:
# Set the type of the new name the same as the type of the old name.
typemap[y] = typemap[x]
return outdict
def to_scalar_from_0d(x):
if isinstance(x, types.ArrayCompatible):
if x.ndim == 0:
return x.dtype
return x
def _create_gufunc_for_parfor_body(
lowerer,
parfor,
typemap,
typingctx,
targetctx,
flags,
locals,
has_aliases,
index_var_typ,
races):
'''
Takes a parfor and creates a gufunc function for its body.
There are two parts to this function.
1) Code to iterate across the iteration space as defined by the schedule.
2) The parfor body that does the work for a single point in the iteration space.
Part 1 is created as Python text for simplicity with a sentinel assignment to mark the point
in the IR where the parfor body should be added.
This Python text is 'exec'ed into existence and its IR retrieved with run_frontend.
The IR is scanned for the sentinel assignment where that basic block is split and the IR
for the parfor body inserted.
'''
if config.DEBUG_ARRAY_OPT >= 1:
print("starting _create_gufunc_for_parfor_body")
loc = parfor.init_block.loc
# The parfor body and the main function body share ir.Var nodes.
# We have to do some replacements of Var names in the parfor body to make them
# legal parameter names. If we don't copy then the Vars in the main function also
# would incorrectly change their name.
loop_body = copy.copy(parfor.loop_body)
remove_dels(loop_body)
parfor_dim = len(parfor.loop_nests)
loop_indices = [l.index_variable.name for l in parfor.loop_nests]
# Get all the parfor params.
parfor_params = parfor.params
# Get just the outputs of the parfor.
parfor_outputs = numba.parfors.parfor.get_parfor_outputs(parfor, parfor_params)
# Get all parfor reduction vars, and operators.
typemap = lowerer.fndesc.typemap
parfor_redvars, parfor_reddict = numba.parfors.parfor.get_parfor_reductions(
lowerer.func_ir, parfor, parfor_params, lowerer.fndesc.calltypes)
# Compute just the parfor inputs as a set difference.
parfor_inputs = sorted(
list(
set(parfor_params) -
set(parfor_outputs) -
set(parfor_redvars)))
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
print("parfor_outputs = ", parfor_outputs, " ", type(parfor_outputs))
print("parfor_inputs = ", parfor_inputs, " ", type(parfor_inputs))
print("parfor_redvars = ", parfor_redvars, " ", type(parfor_redvars))
races = races.difference(set(parfor_redvars))
for race in races:
msg = ("Variable %s used in parallel loop may be written "
"to simultaneously by multiple workers and may result "
"in non-deterministic or unintended results." % race)
warnings.warn(NumbaParallelSafetyWarning(msg, loc))
replace_var_with_array(races, loop_body, typemap, lowerer.fndesc.calltypes)
# Reduction variables are represented as arrays, so they go under
# different names.
parfor_redarrs = []
parfor_red_arg_types = []
for var in parfor_redvars:
arr = var + "_arr"
parfor_redarrs.append(arr)
redarraytype = redtyp_to_redarraytype(typemap[var])
parfor_red_arg_types.append(redarraytype)
redarrsig = redarraytype_to_sig(redarraytype)
if arr in typemap:
assert(typemap[arr] == redarrsig)
else:
typemap[arr] = redarrsig
# Reorder all the params so that inputs go first then outputs.
parfor_params = parfor_inputs + parfor_outputs + parfor_redarrs
if config.DEBUG_ARRAY_OPT >= 1:
print("parfor_params = ", parfor_params, " ", type(parfor_params))
print("loop_indices = ", loop_indices, " ", type(loop_indices))
print("loop_body = ", loop_body, " ", type(loop_body))
_print_body(loop_body)
# Some Var are not legal parameter names so create a dict of potentially illegal
# param name to guaranteed legal name.
param_dict = legalize_names_with_typemap(parfor_params + parfor_redvars, typemap)
if config.DEBUG_ARRAY_OPT >= 1:
print(
"param_dict = ",
sorted(
param_dict.items()),
" ",
type(param_dict))
# Some loop_indices are not legal parameter names so create a dict of potentially illegal
# loop index to guaranteed legal name.
ind_dict = legalize_names_with_typemap(loop_indices, typemap)
# Compute a new list of legal loop index names.
legal_loop_indices = [ind_dict[v] for v in loop_indices]
if config.DEBUG_ARRAY_OPT >= 1:
print("ind_dict = ", sorted(ind_dict.items()), " ", type(ind_dict))
print(
"legal_loop_indices = ",
legal_loop_indices,
" ",
type(legal_loop_indices))
for pd in parfor_params:
print("pd = ", pd)
print("pd type = ", typemap[pd], " ", type(typemap[pd]))
# Get the types of each parameter.
param_types = [to_scalar_from_0d(typemap[v]) for v in parfor_params]
# Calculate types of args passed to gufunc.
func_arg_types = [typemap[v] for v in (parfor_inputs + parfor_outputs)] + parfor_red_arg_types
# Replace illegal parameter names in the loop body with legal ones.
replace_var_names(loop_body, param_dict)
# remember the name before legalizing as the actual arguments
parfor_args = parfor_params
# Change parfor_params to be legal names.
parfor_params = [param_dict[v] for v in parfor_params]
parfor_params_orig = parfor_params
parfor_params = []
ascontig = False
for pindex in range(len(parfor_params_orig)):
if (ascontig and
pindex < len(parfor_inputs) and
isinstance(param_types[pindex], types.npytypes.Array)):
parfor_params.append(parfor_params_orig[pindex]+"param")
else:
parfor_params.append(parfor_params_orig[pindex])
# Change parfor body to replace illegal loop index vars with legal ones.
replace_var_names(loop_body, ind_dict)
loop_body_var_table = get_name_var_table(loop_body)
sentinel_name = get_unused_var_name("__sentinel__", loop_body_var_table)
if config.DEBUG_ARRAY_OPT >= 1:
print(
"legal parfor_params = ",
parfor_params,
" ",
type(parfor_params))
# Determine the unique names of the scheduling and gufunc functions.
# sched_func_name = "__numba_parfor_sched_%s" % (hex(hash(parfor)).replace("-", "_"))
gufunc_name = "__numba_parfor_gufunc_%s" % (
hex(hash(parfor)).replace("-", "_"))
if config.DEBUG_ARRAY_OPT:
# print("sched_func_name ", type(sched_func_name), " ", sched_func_name)
print("gufunc_name ", type(gufunc_name), " ", gufunc_name)
gufunc_txt = ""
# Create the gufunc function.
gufunc_txt += "def " + gufunc_name + \
"(sched, " + (", ".join(parfor_params)) + "):\n"
for pindex in range(len(parfor_inputs)):
if ascontig and isinstance(param_types[pindex], types.npytypes.Array):
gufunc_txt += (" " + parfor_params_orig[pindex]
+ " = np.ascontiguousarray(" + parfor_params[pindex] + ")\n")
# Add initialization of reduction variables
for arr, var in zip(parfor_redarrs, parfor_redvars):
# If reduction variable is a scalar then save current value to
# temp and accumulate on that temp to prevent false sharing.
if redtyp_is_scalar(typemap[var]):
gufunc_txt += " " + param_dict[var] + \
"=" + param_dict[arr] + "[0]\n"
else:
# The reduction variable is an array so np.copy it to a temp.
gufunc_txt += " " + param_dict[var] + \
"=np.copy(" + param_dict[arr] + ")\n"
# For each dimension of the parfor, create a for loop in the generated gufunc function.
# Iterate across the proper values extracted from the schedule.
# The form of the schedule is start_dim0, start_dim1, ..., start_dimN, end_dim0,
# end_dim1, ..., end_dimN
for eachdim in range(parfor_dim):
for indent in range(eachdim + 1):
gufunc_txt += " "
sched_dim = eachdim
gufunc_txt += ("for " +
legal_loop_indices[eachdim] +
" in range(sched[" +
str(sched_dim) +
"], sched[" +
str(sched_dim +
parfor_dim) +
"] + np.uint8(1)):\n")
if config.DEBUG_ARRAY_OPT_RUNTIME:
for indent in range(parfor_dim + 1):
gufunc_txt += " "
gufunc_txt += "print("
for eachdim in range(parfor_dim):
gufunc_txt += "\"" + legal_loop_indices[eachdim] + "\"," + legal_loop_indices[eachdim] + ","
gufunc_txt += ")\n"
# Add the sentinel assignment so that we can find the loop body position
# in the IR.
for indent in range(parfor_dim + 1):
gufunc_txt += " "
gufunc_txt += sentinel_name + " = 0\n"
# Add assignments of reduction variables (for returning the value)
redargstartdim = {}
for arr, var in zip(parfor_redarrs, parfor_redvars):
# After the gufunc loops, copy the accumulated temp value back to reduction array.
if redtyp_is_scalar(typemap[var]):
gufunc_txt += " " + param_dict[arr] + \
"[0] = " + param_dict[var] + "\n"
redargstartdim[arr] = 1
else:
# After the gufunc loops, copy the accumulated temp array back to reduction array with ":"
gufunc_txt += " " + param_dict[arr] + \
"[:] = " + param_dict[var] + "[:]\n"
redargstartdim[arr] = 0
gufunc_txt += " return None\n"
if config.DEBUG_ARRAY_OPT:
print("gufunc_txt = ", type(gufunc_txt), "\n", gufunc_txt)
# Force gufunc outline into existence.
globls = {"np": np}
locls = {}
exec(gufunc_txt, globls, locls)
gufunc_func = locls[gufunc_name]
if config.DEBUG_ARRAY_OPT:
print("gufunc_func = ", type(gufunc_func), "\n", gufunc_func)
# Get the IR for the gufunc outline.
gufunc_ir = compiler.run_frontend(gufunc_func)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir dump ", type(gufunc_ir))
gufunc_ir.dump()
print("loop_body dump ", type(loop_body))
_print_body(loop_body)
# rename all variables in gufunc_ir afresh
var_table = get_name_var_table(gufunc_ir.blocks)
new_var_dict = {}
reserved_names = [sentinel_name] + \
list(param_dict.values()) + legal_loop_indices
for name, var in var_table.items():
if not (name in reserved_names):
new_var_dict[name] = mk_unique_var(name)
replace_var_names(gufunc_ir.blocks, new_var_dict)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir dump after renaming ")
gufunc_ir.dump()
gufunc_param_types = [types.npytypes.Array(
index_var_typ, 1, "C")] + param_types
if config.DEBUG_ARRAY_OPT:
print(
"gufunc_param_types = ",
type(gufunc_param_types),
"\n",
gufunc_param_types)
gufunc_stub_last_label = max(gufunc_ir.blocks.keys()) + 1
# Add gufunc stub last label to each parfor.loop_body label to prevent
# label conflicts.
loop_body = add_offset_to_labels(loop_body, gufunc_stub_last_label)
# new label for splitting sentinel block
new_label = max(loop_body.keys()) + 1
# If enabled, add a print statement after every assignment.
if config.DEBUG_ARRAY_OPT_RUNTIME:
for label, block in loop_body.items():
new_block = block.copy()
new_block.clear()
loc = block.loc
scope = block.scope
for inst in block.body:
new_block.append(inst)
# Append print after assignment
if isinstance(inst, ir.Assign):
# Only apply to numbers
if typemap[inst.target.name] not in types.number_domain:
continue
# Make constant string
strval = "{} =".format(inst.target.name)
strconsttyp = types.StringLiteral(strval)
lhs = ir.Var(scope, mk_unique_var("str_const"), loc)
assign_lhs = ir.Assign(value=ir.Const(value=strval, loc=loc),
target=lhs, loc=loc)
typemap[lhs.name] = strconsttyp
new_block.append(assign_lhs)
# Make print node
print_node = ir.Print(args=[lhs, inst.target], vararg=None, loc=loc)
new_block.append(print_node)
sig = numba.typing.signature(types.none,
typemap[lhs.name],
typemap[inst.target.name])
lowerer.fndesc.calltypes[print_node] = sig
loop_body[label] = new_block
if config.DEBUG_ARRAY_OPT:
print("parfor loop body")
_print_body(loop_body)
wrapped_blocks = wrap_loop_body(loop_body)
hoisted, not_hoisted = hoist(parfor_params, loop_body, typemap, wrapped_blocks)
start_block = gufunc_ir.blocks[min(gufunc_ir.blocks.keys())]
start_block.body = start_block.body[:-1] + hoisted + [start_block.body[-1]]
unwrap_loop_body(loop_body)
# store hoisted into diagnostics
diagnostics = lowerer.metadata['parfor_diagnostics']
diagnostics.hoist_info[parfor.id] = {'hoisted': hoisted,
'not_hoisted': not_hoisted}
if config.DEBUG_ARRAY_OPT:
print("After hoisting")
_print_body(loop_body)
# Search all the block in the gufunc outline for the sentinel assignment.
for label, block in gufunc_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(
inst,
ir.Assign) and inst.target.name == sentinel_name:
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the sentinel
# but the new block maintains the current block label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after the sentinel.
block.body = block.body[i + 1:]
# But the current block gets a new label.
body_first_label = min(loop_body.keys())
# The previous block jumps to the minimum labelled block of the
# parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc function's
# IR.
for (l, b) in loop_body.items():
gufunc_ir.blocks[l] = b
body_last_label = max(loop_body.keys())
gufunc_ir.blocks[new_label] = block
gufunc_ir.blocks[label] = prev_block
# Add a jump from the last parfor body block to the block containing
# statements after the sentinel.
gufunc_ir.blocks[body_last_label].append(
ir.Jump(new_label, loc))
break
else:
continue
break
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir last dump before renaming")
gufunc_ir.dump()
gufunc_ir.blocks = rename_labels(gufunc_ir.blocks)
remove_dels(gufunc_ir.blocks)
if config.DEBUG_ARRAY_OPT:
print("gufunc_ir last dump")
gufunc_ir.dump()
print("flags", flags)
print("typemap", typemap)
old_alias = flags.noalias
if not has_aliases:
if config.DEBUG_ARRAY_OPT:
print("No aliases found so adding noalias flag.")
flags.noalias = True
kernel_func = compiler.compile_ir(
typingctx,
targetctx,
gufunc_ir,
gufunc_param_types,
types.none,
flags,
locals)
flags.noalias = old_alias
kernel_sig = signature(types.none, *gufunc_param_types)
if config.DEBUG_ARRAY_OPT:
print("finished create_gufunc_for_parfor_body. kernel_sig = ", kernel_sig)
return kernel_func, parfor_args, kernel_sig, redargstartdim, func_arg_types
def replace_var_with_array_in_block(vars, block, typemap, calltypes):
new_block = []
for inst in block.body:
if isinstance(inst, ir.Assign) and inst.target.name in vars:
const_node = ir.Const(0, inst.loc)
const_var = ir.Var(inst.target.scope, mk_unique_var("$const_ind_0"), inst.loc)
typemap[const_var.name] = types.uintp
const_assign = ir.Assign(const_node, const_var, inst.loc)
new_block.append(const_assign)
setitem_node = ir.SetItem(inst.target, const_var, inst.value, inst.loc)
calltypes[setitem_node] = signature(
types.none, types.npytypes.Array(typemap[inst.target.name], 1, "C"), types.intp, typemap[inst.target.name])
new_block.append(setitem_node)
continue
elif isinstance(inst, parfor.Parfor):
replace_var_with_array_internal(vars, {0: inst.init_block}, typemap, calltypes)
replace_var_with_array_internal(vars, inst.loop_body, typemap, calltypes)
new_block.append(inst)
return new_block
def replace_var_with_array_internal(vars, loop_body, typemap, calltypes):
for label, block in loop_body.items():
block.body = replace_var_with_array_in_block(vars, block, typemap, calltypes)
def replace_var_with_array(vars, loop_body, typemap, calltypes):
replace_var_with_array_internal(vars, loop_body, typemap, calltypes)
for v in vars:
el_typ = typemap[v]
typemap.pop(v, None)
typemap[v] = types.npytypes.Array(el_typ, 1, "C")
def call_parallel_gufunc(lowerer, cres, gu_signature, outer_sig, expr_args, expr_arg_types,
loop_ranges, redvars, reddict, redarrdict, init_block, index_var_typ, races):
'''
Adds the call to the gufunc function from the main function.
'''
context = lowerer.context
builder = lowerer.builder
from numba.np.ufunc.parallel import (build_gufunc_wrapper,
get_thread_count,
_launch_threads)
if config.DEBUG_ARRAY_OPT:
print("make_parallel_loop")
print("args = ", expr_args)
print("outer_sig = ", outer_sig.args, outer_sig.return_type,
outer_sig.recvr, outer_sig.pysig)
print("loop_ranges = ", loop_ranges)
print("expr_args", expr_args)
print("expr_arg_types", expr_arg_types)
print("gu_signature", gu_signature)
# Build the wrapper for GUFunc
args, return_type = sigutils.normalize_signature(outer_sig)
llvm_func = cres.library.get_function(cres.fndesc.llvm_func_name)
sin, sout = gu_signature
# These are necessary for build_gufunc_wrapper to find external symbols
_launch_threads()
info = build_gufunc_wrapper(llvm_func, cres, sin, sout,
cache=False, is_parfors=True)
wrapper_name = info.name
cres.library._ensure_finalized()
if config.DEBUG_ARRAY_OPT:
print("parallel function = ", wrapper_name, cres)
# loadvars for loop_ranges
def load_range(v):
if isinstance(v, ir.Var):
return lowerer.loadvar(v.name)
else:
return context.get_constant(types.uintp, v)
num_dim = len(loop_ranges)
for i in range(num_dim):
start, stop, step = loop_ranges[i]
start = load_range(start)
stop = load_range(stop)
assert(step == 1) # We do not support loop steps other than 1
step = load_range(step)
loop_ranges[i] = (start, stop, step)
if config.DEBUG_ARRAY_OPT:
print("call_parallel_gufunc loop_ranges[{}] = ".format(i), start,
stop, step)
cgutils.printf(builder, "loop range[{}]: %d %d (%d)\n".format(i),
start, stop, step)
# Commonly used LLVM types and constants
byte_t = lc.Type.int(8)
byte_ptr_t = lc.Type.pointer(byte_t)
byte_ptr_ptr_t = lc.Type.pointer(byte_ptr_t)
intp_t = context.get_value_type(types.intp)
uintp_t = context.get_value_type(types.uintp)
intp_ptr_t = lc.Type.pointer(intp_t)
uintp_ptr_t = lc.Type.pointer(uintp_t)
zero = context.get_constant(types.uintp, 0)
one = context.get_constant(types.uintp, 1)
one_type = one.type
sizeof_intp = context.get_abi_sizeof(intp_t)
# Prepare sched, first pop it out of expr_args, outer_sig, and gu_signature
expr_args.pop(0)
sched_sig = sin.pop(0)
if config.DEBUG_ARRAY_OPT:
print("Parfor has potentially negative start", index_var_typ.signed)
if index_var_typ.signed:
sched_type = intp_t
sched_ptr_type = intp_ptr_t
else:
sched_type = uintp_t
sched_ptr_type = uintp_ptr_t
# Call do_scheduling with appropriate arguments
dim_starts = cgutils.alloca_once(
builder, sched_type, size=context.get_constant(
types.uintp, num_dim), name="dims")
dim_stops = cgutils.alloca_once(
builder, sched_type, size=context.get_constant(
types.uintp, num_dim), name="dims")
for i in range(num_dim):
start, stop, step = loop_ranges[i]
if start.type != one_type:
start = builder.sext(start, one_type)
if stop.type != one_type:
stop = builder.sext(stop, one_type)
if step.type != one_type:
step = builder.sext(step, one_type)
# substract 1 because do-scheduling takes inclusive ranges
stop = builder.sub(stop, one)
builder.store(
start, builder.gep(
dim_starts, [
context.get_constant(
types.uintp, i)]))
builder.store(stop, builder.gep(dim_stops,
[context.get_constant(types.uintp, i)]))
sched_size = get_thread_count() * num_dim * 2
sched = cgutils.alloca_once(
builder, sched_type, size=context.get_constant(
types.uintp, sched_size), name="sched")
debug_flag = 1 if config.DEBUG_ARRAY_OPT else 0
scheduling_fnty = lc.Type.function(
intp_ptr_t, [uintp_t, sched_ptr_type, sched_ptr_type, uintp_t, sched_ptr_type, intp_t])
if index_var_typ.signed:
do_scheduling = builder.module.get_or_insert_function(scheduling_fnty,
name="do_scheduling_signed")
else:
do_scheduling = builder.module.get_or_insert_function(scheduling_fnty,
name="do_scheduling_unsigned")
get_num_threads = builder.module.get_or_insert_function(
lc.Type.function(lc.Type.int(types.intp.bitwidth), []),
name="get_num_threads")
num_threads = builder.call(get_num_threads, [])
with cgutils.if_unlikely(builder, builder.icmp_signed('<=', num_threads,
num_threads.type(0))):
cgutils.printf(builder, "num_threads: %d\n", num_threads)
context.call_conv.return_user_exc(builder, RuntimeError,
("Invalid number of threads. "
"This likely indicates a bug in Numba.",))
builder.call(
do_scheduling, [
context.get_constant(
types.uintp, num_dim), dim_starts, dim_stops, num_threads,
sched, context.get_constant(
types.intp, debug_flag)])
# Get the LLVM vars for the Numba IR reduction array vars.
redarrs = [lowerer.loadvar(redarrdict[x].name) for x in redvars]
nredvars = len(redvars)
ninouts = len(expr_args) - nredvars
if config.DEBUG_ARRAY_OPT:
for i in range(get_thread_count()):
cgutils.printf(builder, "sched[" + str(i) + "] = ")
for j in range(num_dim * 2):
cgutils.printf(
builder, "%d ", builder.load(
builder.gep(
sched, [
context.get_constant(
types.intp, i * num_dim * 2 + j)])))
cgutils.printf(builder, "\n")
# ----------------------------------------------------------------------------
# Prepare arguments: args, shapes, steps, data
all_args = [lowerer.loadvar(x) for x in expr_args[:ninouts]] + redarrs
num_args = len(all_args)
num_inps = len(sin) + 1
args = cgutils.alloca_once(
builder,
byte_ptr_t,
size=context.get_constant(
types.intp,
1 + num_args),
name="pargs")
array_strides = []
# sched goes first
builder.store(builder.bitcast(sched, byte_ptr_t), args)
array_strides.append(context.get_constant(types.intp, sizeof_intp))
red_shapes = {}
rv_to_arg_dict = {}
# followed by other arguments
for i in range(num_args):
arg = all_args[i]
var = expr_args[i]
aty = expr_arg_types[i]
dst = builder.gep(args, [context.get_constant(types.intp, i + 1)])
if i >= ninouts: # reduction variables
ary = context.make_array(aty)(context, builder, arg)
strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
ary_shapes = cgutils.unpack_tuple(builder, ary.shape, aty.ndim)
# Start from 1 because we skip the first dimension of length num_threads just like sched.
for j in range(1, len(strides)):
array_strides.append(strides[j])
red_shapes[i] = ary_shapes[1:]
builder.store(builder.bitcast(ary.data, byte_ptr_t), dst)
elif isinstance(aty, types.ArrayCompatible):
if var in races:
typ = context.get_data_type(
aty.dtype) if aty.dtype != types.boolean else lc.Type.int(1)
rv_arg = cgutils.alloca_once(builder, typ)
builder.store(arg, rv_arg)
builder.store(builder.bitcast(rv_arg, byte_ptr_t), dst)
rv_to_arg_dict[var] = (arg, rv_arg)
array_strides.append(context.get_constant(types.intp, context.get_abi_sizeof(typ)))
else:
ary = context.make_array(aty)(context, builder, arg)
strides = cgutils.unpack_tuple(builder, ary.strides, aty.ndim)
for j in range(len(strides)):
array_strides.append(strides[j])
builder.store(builder.bitcast(ary.data, byte_ptr_t), dst)
else:
if i < num_inps:
# Scalar input, need to store the value in an array of size 1
typ = context.get_data_type(
aty) if aty != types.boolean else lc.Type.int(1)
ptr = cgutils.alloca_once(builder, typ)
builder.store(arg, ptr)
else:
# Scalar output, must allocate
typ = context.get_data_type(
aty) if aty != types.boolean else lc.Type.int(1)
ptr = cgutils.alloca_once(builder, typ)
builder.store(builder.bitcast(ptr, byte_ptr_t), dst)
# ----------------------------------------------------------------------------
# Next, we prepare the individual dimension info recorded in gu_signature
sig_dim_dict = {}
occurances = []
occurances = [sched_sig[0]]
sig_dim_dict[sched_sig[0]] = context.get_constant(types.intp, 2 * num_dim)
assert len(expr_args) == len(all_args)
assert len(expr_args) == len(expr_arg_types)
assert len(expr_args) == len(sin + sout)
assert len(expr_args) == len(outer_sig.args[1:])
for var, arg, aty, gu_sig in zip(expr_args, all_args,
expr_arg_types, sin + sout):
if isinstance(aty, types.npytypes.Array):
i = aty.ndim - len(gu_sig)
else:
i = 0
if config.DEBUG_ARRAY_OPT:
print("var =", var, "gu_sig =", gu_sig, "type =", aty, "i =", i)
for dim_sym in gu_sig:
if config.DEBUG_ARRAY_OPT:
print("var = ", var, " type = ", aty)
if var in races:
sig_dim_dict[dim_sym] = context.get_constant(types.intp, 1)
else:
ary = context.make_array(aty)(context, builder, arg)
shapes = cgutils.unpack_tuple(builder, ary.shape, aty.ndim)
sig_dim_dict[dim_sym] = shapes[i]
if not (dim_sym in occurances):
if config.DEBUG_ARRAY_OPT:
print("dim_sym = ", dim_sym, ", i = ", i)
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
occurances.append(dim_sym)
i = i + 1
# ----------------------------------------------------------------------------
# Prepare shapes, which is a single number (outer loop size), followed by
# the size of individual shape variables.
nshapes = len(sig_dim_dict) + 1
shapes = cgutils.alloca_once(builder, intp_t, size=nshapes, name="pshape")
# For now, outer loop size is the same as number of threads
builder.store(num_threads, shapes)
# Individual shape variables go next
i = 1
for dim_sym in occurances:
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, dim_sym + " = %d\n", sig_dim_dict[dim_sym])
builder.store(
sig_dim_dict[dim_sym], builder.gep(
shapes, [
context.get_constant(
types.intp, i)]))
i = i + 1
# ----------------------------------------------------------------------------
# Prepare steps for each argument. Note that all steps are counted in
# bytes.
num_steps = num_args + 1 + len(array_strides)
steps = cgutils.alloca_once(
builder, intp_t, size=context.get_constant(
types.intp, num_steps), name="psteps")
# First goes the step size for sched, which is 2 * num_dim
builder.store(context.get_constant(types.intp, 2 * num_dim * sizeof_intp),
steps)
# The steps for all others are 0, except for reduction results.
for i in range(num_args):
if i >= ninouts: # steps for reduction vars are abi_sizeof(typ)
j = i - ninouts
# Get the base dtype of the reduction array.
redtyp = lowerer.fndesc.typemap[redvars[j]]
red_stride = None
if isinstance(redtyp, types.npytypes.Array):
redtyp = redtyp.dtype
red_stride = red_shapes[i]
typ = context.get_value_type(redtyp)
sizeof = context.get_abi_sizeof(typ)
# Set stepsize to the size of that dtype.
stepsize = context.get_constant(types.intp, sizeof)
if red_stride is not None:
for rs in red_stride:
stepsize = builder.mul(stepsize, rs)
else:
# steps are strides
stepsize = zero
dst = builder.gep(steps, [context.get_constant(types.intp, 1 + i)])
builder.store(stepsize, dst)
for j in range(len(array_strides)):
dst = builder.gep(
steps, [
context.get_constant(
types.intp, 1 + num_args + j)])
builder.store(array_strides[j], dst)
# ----------------------------------------------------------------------------
# prepare data
data = cgutils.get_null_value(byte_ptr_t)
fnty = lc.Type.function(lc.Type.void(), [byte_ptr_ptr_t, intp_ptr_t,
intp_ptr_t, byte_ptr_t])
fn = builder.module.get_or_insert_function(fnty, name=wrapper_name)
context.active_code_library.add_linking_library(info.library)
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "before calling kernel %p\n", fn)
builder.call(fn, [args, shapes, steps, data])
if config.DEBUG_ARRAY_OPT:
cgutils.printf(builder, "after calling kernel %p\n", fn)
for k, v in rv_to_arg_dict.items():
arg, rv_arg = v
only_elem_ptr = builder.gep(rv_arg, [context.get_constant(types.intp, 0)])
builder.store(builder.load(only_elem_ptr), lowerer.getvar(k))
context.active_code_library.add_linking_library(cres.library)
| 43.597345
| 309
| 0.594583
|
794e090caa2cab0073b8095b13a811d3eb13d036
| 1,418
|
py
|
Python
|
examples/plot-layer-types.py
|
lukasz-migas/napari-1d
|
b0f081a8711ae941b3e4b5c58c3aea56bd0e3277
|
[
"BSD-3-Clause"
] | 13
|
2021-08-27T23:01:09.000Z
|
2022-03-22T13:51:35.000Z
|
examples/plot-layer-types.py
|
lukasz-migas/napari-1d
|
b0f081a8711ae941b3e4b5c58c3aea56bd0e3277
|
[
"BSD-3-Clause"
] | 71
|
2021-08-28T13:29:17.000Z
|
2022-03-28T21:22:12.000Z
|
examples/plot-layer-types.py
|
lukasz-migas/napari-1d
|
b0f081a8711ae941b3e4b5c58c3aea56bd0e3277
|
[
"BSD-3-Clause"
] | null | null | null |
"""Display image and 1d plot."""
import numpy as np
import napari_plot
N_POINTS = 1000
N_MIN = 0
N_MAX = 300
def add_line():
"""Line plot"""
x = np.arange(N_POINTS)
y = np.random.randint(N_MIN, N_MAX, N_POINTS)
viewer1d.add_line(np.c_[x, y], name="Line", visible=True)
def add_centroids():
"""Centroids plot"""
x = np.arange(N_POINTS)
y = np.random.randint(N_MIN, N_MAX, N_POINTS)
viewer1d.add_centroids(np.c_[x, y], color=(1.0, 0.0, 1.0, 1.0), name="Centroids", visible=True)
def add_scatter():
"""Centroids plot"""
x = np.random.randint(N_MIN, N_MAX, N_POINTS // 2)
y = np.random.randint(N_MIN, N_POINTS, N_POINTS // 2)
viewer1d.add_scatter(np.c_[x, y], size=5, name="Scatter", visible=True)
def add_region():
"""Region plot"""
regions = [
([25, 50], "vertical"),
([50, 400], "horizontal"),
([80, 90], "vertical"),
]
viewer1d.add_region(regions, face_color=["red", "green", "cyan"], opacity=0.5, name="Spans", visible=True)
def add_infline():
"""Inf line plot"""
viewer1d.add_inf_line(
[50, 15, 250],
orientation=["vertical", "vertical", "horizontal"],
width=3,
color=["red", "orange", "green"],
name="Infinite Line",
visible=True,
)
viewer1d = napari_plot.Viewer()
add_line()
add_centroids()
add_region()
add_scatter()
add_infline()
napari_plot.run()
| 22.507937
| 110
| 0.609309
|
794e0a2e88ffb415c67a84465a0d63dbb175f543
| 32,375
|
py
|
Python
|
aragwas_server/gwasdb/elastic.py
|
grimmlab/AraGWAS
|
447db9bff377085a65aea05c0f09545e9405cc6c
|
[
"MIT"
] | 1
|
2020-04-07T12:46:17.000Z
|
2020-04-07T12:46:17.000Z
|
aragwas_server/gwasdb/elastic.py
|
grimmlab/AraGWAS
|
447db9bff377085a65aea05c0f09545e9405cc6c
|
[
"MIT"
] | null | null | null |
aragwas_server/gwasdb/elastic.py
|
grimmlab/AraGWAS
|
447db9bff377085a65aea05c0f09545e9405cc6c
|
[
"MIT"
] | null | null | null |
from elasticsearch import Elasticsearch, helpers
from elasticsearch_dsl import Search, Q, A
from aragwas.settings import ES_HOST
from gwasdb import serializers
from .parsers import parse_snpeff, parse_lastel
import logging
import json
import os
import numpy as np
import re
import operator
import datetime
from collections import defaultdict
GENE_ID_PATTERN = re.compile('^[a-z]{2}([\\d]{1})G\\w+$', re.IGNORECASE)
es_logger = logging.getLogger('elasticsearch')
es_logger.setLevel(logging.WARNING)
# Get an instance of a logger
logger = logging.getLogger(__name__)
es = Elasticsearch([ES_HOST],timeout=60)
BULK_INDEX_COUNT = 1000
ES_TEMPLATE_PATH = os.path.join(os.path.join(os.path.dirname(__file__),'es_templates'))
def check_server():
"""Check if server is running"""
return es.ping()
def check_genotype_data():
"""Checks if the genotype data is fully indexed"""
GENE_COUNT_TO_CHECK = 33341
SNP_COUNT_TO_CHECK = 10700998
gene_count = es.count('genotype',doc_type='genes')['count']
snps_count = es.count('genotype',doc_type='snps')['count']
if gene_count != GENE_COUNT_TO_CHECK:
raise Exception('Only %s instead of %s genes found' % (gene_count,GENE_COUNT_TO_CHECK))
if snps_count != SNP_COUNT_TO_CHECK:
raise Exception('Only %s instead of %s SNPs found' % (snps_count,SNP_COUNT_TO_CHECK))
def check_indices():
"""Initializes the ES indices"""
# check if index exists
indices_exists = es.indices.exists(
'aragwas') or es.indices.exists('geno_*', allow_no_indices=False)
if indices_exists:
raise Exception('Indices already exist. Delete before you continue')
# create the indices
with open(os.path.join(ES_TEMPLATE_PATH, 'es_aragwas.json'), 'r') as fh:
aragwas_settings = json.load(fh)
with open(os.path.join(ES_TEMPLATE_PATH,'es_genotype.json'), 'r') as fh:
genotype_settings = json.load(fh)
# put index template
es.indices.put_template('aragwas', aragwas_settings)
# put index template
es.indices.put_template('geno_*', genotype_settings)
def load_snps_by_region(chrom, start, end):
"""Retrieve snp information by region"""
index = _get_index_from_chr(chrom)
search_snps = Search().using(es).doc_type('snps').index(index).filter("range", position={"lte": end, "gte":start})
return {snp.position: snp.to_dict() for snp in search_snps.scan() }
def load_snps(chrom, positions):
"""Retrieve snp information"""
index = _get_index_from_chr(chrom)
if isinstance(positions, np.ndarray):
pos = positions.tolist()
else:
pos = positions
if len(pos) == 0:
return {}
resp = es.mget(body={'ids':pos}, index=index, doc_type='snps')
return {doc['_id']: doc['_source'] if doc['found'] else {} for doc in resp['docs'] }
def autocomplete_genes(term):
"""For autocomplete searches"""
resp = es.search(index='genotype',doc_type='genes',_source=["suggest", "positions","strand","chr","type"],
body={"suggest": {
"gene-suggest": {
"prefix":term,
"completion": {
"field": "suggest"
}
}
}})
return [{'id':option['_id'], 'name': option['text'],'strand': option['_source']['strand'], 'chr': option['_source']['chr'], 'type': option['_source']['type'], 'positions':option['_source']['positions']} for option in resp['suggest']['gene-suggest'][0]['options']]
def load_gene_by_id(id):
"""Retrive genes by id"""
matches = GENE_ID_PATTERN.match(id)
if not matches:
raise Exception('Wrong Gene ID %s' % id)
chrom = matches.group(1)
doc = es.get('geno_chr%s' % chrom, id, doc_type='genes', _source=['name','chr','positions','type','strand', 'isoforms'], realtime=False)
if not doc['found']:
raise Exception('Gene with ID %s not found' %id)
gene = doc['_source']
gene['id'] = doc['_id']
# # Potentially, load KO associated phenos if any
# if return_KOs:
# # check if gene has any associated phenotypes
# pass
return gene
def load_associations_by_id(id):
"""Retrieve an association by id"""
doc = es.get('aragwas', id, doc_type='associations', _source=['overFDR','overPermutation','overPermutation','maf','mac','score', 'snp', 'study'], realtime=False)
if not doc['found']:
raise Exception('Associations with ID %s not found' %id)
association = doc['_source']
association['id'] = doc['_id']
return association
def load_ko_associations_by_id(id):
"""Retrieve a KO mutation by id"""
doc = es.get('aragwas', id, doc_type='ko_associations', _source=['overPermutation','overBonferroni','maf','mac','score', 'gene', 'study'], realtime=False)
if not doc['found']:
raise Exception('Associations with ID %s not found' %id)
association = doc['_source']
association['id'] = doc['_id']
return association
def load_gene_associations(id):
"""Retrive associations by neighboring gene id"""
matches = GENE_ID_PATTERN.match(id)
if not matches:
raise Exception('Wrong Gene ID %s' % id)
chrom = matches.group(1)
asso_search = Search(using=es).doc_type('snps').source(exclude=['isoforms','GO'])
q = Q({'nested':{'path':'snps.annotations', 'query':{'match':{'snps.annotations.gene_name':id}}}})
asso_search = asso_search.filter(q).sort('score')
results = asso_search[0:min(500, asso_search.count())].execute()
associations = results.to_dict()['hits']['hits']
return [{association['_id']: association['_source']} if association['found'] else {} for association in associations]
def load_gene_snps(id):
"""Retrive associations by neighboring gene id"""
snp_search = Search(using=es).doc_type('snps')
q = Q({'nested':{'path':'annotations', 'query':{'match':{'annotations.gene_name':id}}}})
snp_search = snp_search.filter(q).sort('position')
results = snp_search[0:min(500, snp_search.count())].execute()
associations = results.to_dict()['hits']['hits']
return [{association['_id']: association['_source']} for association in associations]
def get_top_genes():
"""Retrieve top genes"""
s = Search(using=es, doc_type='associations')
s = s.filter('term', overPermutation='T')
s = s.filter(Q('range', mac={'gte': 6}))
agg = A("terms", field="snp.gene_name")
s.aggs.bucket('gene_count', agg)
agg_results = s.execute().aggregations.gene_count.buckets
return agg_results
def load_filtered_top_genes(filters, start=0, size=50):
"""Retrieves top genes and filter them through the tickable options"""
# First aggregate over associations
s = Search(using=es, doc_type='associations')
if 'chr' in filters and len(filters['chr']) > 0 and len(filters['chr']) < 5:
s = s.filter(Q('bool', should=[Q('term', snp__chr=chrom if len(chrom) > 3 else 'chr%s' % chrom) for chrom in
filters['chr']]))
if 'significant' in filters:
s = s.filter(Q('range', mac={'gte': 6}))
if filters['significant'][0] == "b":
s = s.filter('term', overBonferroni='T')
elif filters['significant'][0] == "p":
s = s.filter('term', overPermutation='T')
agg = A("terms", field="snp.gene_name", size="33341") # Need to check ALL GENES for further lists
s.aggs.bucket('gene_count', agg)
top_genes = s.execute().aggregations.gene_count.buckets
genes = []
for top in top_genes[start:start+size]:
id = top['key']
matches = GENE_ID_PATTERN.match(id)
if not matches:
continue
gene = load_gene_by_id(top['key'])
gene['n_hits'] = top['doc_count']
genes.append(gene)
return genes, len(top_genes)
def load_filtered_top_ko_mutations_genes(filters, start=0, size=50):
"""Retrieves top genes according to number of KO mutations and filter them through the tickable options"""
# First aggregate over associations
s = Search(using=es, doc_type='ko_associations')
if 'chr' in filters and len(filters['chr']) > 0 and len(filters['chr']) < 5:
s = s.filter(Q('bool', should=[Q({'nested':{'path':'gene', 'query':{'match':{'gene.chr':chrom if len(chrom) > 3 else 'chr%s' % chrom}}}}) for chrom in
filters['chr']]))
if 'significant' in filters:
s = s.filter(Q('range', mac={'gte': 6}))
s = s.filter('term', overBonferroni='T') # TODO: change this to permutation once the new indexed scores are in.
agg = A("terms", field="gene.id", size='33341') # Need to check ALL GENES for further lists
s.aggs.bucket('genes', 'nested', path='gene').bucket('gene_count', agg) # Need to have a NESTED query
top_genes = s.execute().aggregations.genes.gene_count.buckets
# The KO associations are already retrieved, just need to assign them to the right gene.
association_dict = defaultdict(list)
for asso in s[0:s.count()].execute().to_dict()['hits']['hits']:
association_dict[asso['_source']['gene']['name']].append(asso['_source'])
genes = []
for top in top_genes[start:start+size]:
id = top['key']
matches = GENE_ID_PATTERN.match(id)
if not matches:
continue
gene = load_gene_by_id(top['key'])
gene['n_hits'] = top['doc_count']
gene['ko_associations'] = association_dict[top['key']]
genes.append(gene)
return genes, len(top_genes)
def get_top_genes_aggregated_filtered_statistics(filters):
s = Search(using=es, doc_type='genes')
if 'chr' in filters and len(filters['chr']) > 0 and len(filters['chr']) < 5:
s = s.filter(Q('bool', should=[Q('term', chr=chrom if len(chrom) > 3 else 'chr%s' % chrom) for chrom in
filters['chr']]))
agg_chr = A("terms", field="chr")
s.aggs.bucket('chr_count', agg_chr)
agg_results = s.execute().aggregations
return agg_results.chr_count.buckets
def get_top_genes_and_snp_type_for_study(study_id):
"""Retrive associations by neighboring gene id"""
s = Search(using=es, doc_type='associations')
s = s.filter(Q('bool', should=[Q('term', study__id=study_id)]))
s = s.filter('term', overPermutation='T')
s = s.filter(Q('range', mac={'gte': 6}))
agg_genes = A("terms", field="snp.gene_name")
# agg_go_terms = A("terms", field="snp.") NOT DOABLE WITH CURRENT FIELDS IN ES
agg_snp_type = A("terms", field="snp.coding")
agg_impact = A(
{"nested": {"path": "snp.annotations"}, "aggs": {"annotations": {"terms": {"field": "snp.annotations.impact"}}}})
agg_annotation = A(
{"nested": {"path": "snp.annotations"},
"aggs": {"annotations": {"terms": {"field": "snp.annotations.effect"}}}})
s.aggs.bucket('gene_count', agg_genes)
s.aggs.bucket('snp_type_count', agg_snp_type)
s.aggs.bucket('impact_count', agg_impact)
s.aggs.bucket('annotation_count', agg_annotation)
s.aggs.bucket('pvalue_hist', 'histogram', field='score', interval='1')
s.aggs.bucket('maf_hist', 'histogram', field='maf', interval='0.1')
agg_results = s.execute().aggregations
results = {'gene_count': agg_results.gene_count.buckets, 'snp_type_count': agg_results.snp_type_count.buckets,
'impact_count': agg_results.impact_count.annotations.buckets, 'annotation_count': agg_results.annotation_count.annotations.buckets,
'pvalue_hist': agg_results.pvalue_hist.buckets, 'maf_hist': agg_results.maf_hist.buckets}
return results
def load_genes_by_region(chrom, start, end, features):
"""Retrieve genes by region"""
index = _get_index_from_chr(chrom)
search_genes = Search().using(es).doc_type('genes').index(index).filter("range", positions={"lte": end, "gte":start})
if not features:
search_genes.source(exclude=['isoforms'])
genes = [gene.to_dict() for gene in search_genes.scan() ]
for gene in genes:
gene['ko_associations'] = load_gene_ko_associations(gene['name'], return_only_significant=True)
return genes
def filter_association_search(s, filters):
if 'score' in filters:
s = s.filter('range', score={'gte': filter})
if 'chr' in filters and len(filters['chr']) > 0 and len(filters['chr']) < 5:
s = s.filter(Q('bool', should=[Q('term', snp__chr=chrom if len(chrom) > 3 else 'chr%s' % chrom) for chrom in filters['chr']]))
if 'maf' in filters and len(filters['maf']) > 0 and len(filters['maf']) < 4:
maf_filters = []
for maf in filters['maf']:
maf = maf.split('-')
if len(maf) > 1:
maf_filters.append(Q('range', maf={'lte': float(maf[1])/100,'gte':float(maf[0])/100}))
else:
if maf[0] == '1':
maf_filters.append(Q('range', maf={'lt':float(maf[0])/100}))
else:
maf_filters.append(Q('range', maf={'gt':float(maf[0])/100}))
s = s.filter(Q('bool',should = maf_filters))
if 'mac' in filters and len(filters['mac']) == 1:
if filters['mac'][0] == '0':
s = s.filter('range', mac={'lte': 5})
else:
s = s.filter('range', mac={'gt': 5})
if 'annotation' in filters and len(filters['annotation']) > 0 and len(filters['annotation']) < 4:
annot_filter = [Q('term', snp__annotations__effect=anno) for anno in filters['annotation']]
s = s.filter(Q('nested', path='snp.annotations', query=Q('bool', should=annot_filter)))
if 'type' in filters and len(filters['type'])==1:
s = s.filter('term', snp__coding='T' if filters['type'][0] == 'genic' else 'F')
if 'study_id' in filters and len(filters['study_id']) > 0:
s = s.filter(Q('bool', should=[Q('term',study__id = study_id) for study_id in filters['study_id']]))
if 'phenotype_id' in filters and len(filters['phenotype_id']) > 0:
s = s.filter(Q('bool', should=[Q('term',study__phenotype__id = phenotype_id) for phenotype_id in filters['phenotype_id']]))
if 'genotype_id' in filters and len(filters['genotype_id']) > 0:
s = s.filter(Q('bool', should=[Q('term',study__genotype__id = genotype_id) for genotype_id in filters['genotype_id']]))
if 'gene_id' in filters and len(filters['gene_id']) > 0:
s = s.filter(Q({'nested': {'path': 'snp.annotations', 'query': {'match': {'snp.annotations.gene_name': filters['gene_id']}}}}) | (Q('range', snp__position={'gte': int(filters['start'])}) & Q('range', snp__position={'gte': int(filters['start'])})))
if 'start' in filters:
s = s.filter('range', snp__position={'gte': int(filters['start'])})
if 'end' in filters:
s = s.filter('range', snp__position={'lte': int(filters['end'])})
if 'significant' in filters and len(filters['significant'])>0:
if filters['significant'][0] == "b":
s = s.filter('term', overBonferroni='T')
elif filters['significant'][0] == "p":
s = s.filter('term', overPermutation='T')
return s
def get_aggregated_filtered_statistics(filters):
s = Search(using=es, doc_type='associations')
s = filter_association_search(s, filters)
agg_chr = A("terms", field="snp.chr")
agg_type = A("terms", field="snp.coding")
agg_annotation = A(
{"nested": {"path": "snp.annotations"}, "aggs": {"annotations": {"terms": {"field": "snp.annotations.effect"}}}})
agg_maf = A("range", field="maf",
ranges=[{"to": 0.01}, {"from": 0.01, "to": 0.05001}, {"from": 0.05001, "to": 0.1001}, {"from": 0.1001}])
agg_mac = A("range", field="mac",
ranges=[{"to": 6}, {"from": 6}])
s.aggs.bucket('maf_count', agg_maf)
s.aggs.bucket('mac_count', agg_mac)
s.aggs.bucket('chr_count', agg_chr)
s.aggs.bucket('type_count', agg_type)
s.aggs.bucket('annotation_count', agg_annotation)
agg_results = s.execute().aggregations
return agg_results.chr_count.buckets, agg_results.maf_count.buckets, agg_results.mac_count.buckets, agg_results.type_count.buckets, agg_results.annotation_count.annotations.buckets
def index_associations(study, associations, thresholds):
"""indexes associations"""
with_permutations = 'permutation_threshold' in thresholds.keys() and thresholds['permutation_threshold']
thresholds_study = [{'name': key, 'value': val} for key, val in thresholds.items() ]
# first filter by chr to fetch annotations
associations.sort(order = 'chr')
annotations = {}
for chrom in range(1, 6):
chrom_pos = associations['position'][np.where(associations['chr'] == str(chrom))]
annotations[str(chrom)] = load_snps(str(chrom),chrom_pos)
documents = []
for assoc in associations:
_id = '%s_%s_%s' % (study.pk, assoc['chr'], assoc['position'])
study_data = serializers.EsStudySerializer(study).data
study_data['thresholds'] = thresholds_study
_source = {'mac': int(assoc['mac']), 'maf': float(assoc['maf']), 'score': float(assoc['score']), 'created': datetime.datetime.now(),'study':study_data, 'overFDR': bool(assoc['score'] > thresholds['bh_threshold'])}
_source['overBonferroni'] = bool(assoc['score'] > thresholds['bonferroni_threshold05'])
if with_permutations:
_source['overPermutation'] = bool(assoc['score'] > thresholds['permutation_threshold'])
snp = annotations[assoc['chr']].get(str(assoc['position']), None)
if snp:
_source['snp'] = snp
documents.append({'_index':'aragwas','_type':'associations','_id': _id, '_source': _source })
if len(documents) == 0:
return 0,0
success, errors = helpers.bulk(es,documents, chunk_size=1000, stats_only=True)
return success, errors
def load_filtered_top_associations(filters, start=0, size=50):
"""Retrieves top associations and filter them through the tickable options"""
s = Search(using=es, doc_type='associations')
s = s.sort('-score')
s = filter_association_search(s, filters)
s = s[start:start+size]
print(json.dumps(s.to_dict()))
result = s.execute()
associations = result['hits']['hits']
return [association['_source'].to_dict() for association in associations], result['hits']['total']
def load_filtered_top_associations_search_after(filters, search_after = ''):
"""Retrieves top associations and filter them through the tickable options"""
s = Search(using=es, doc_type='associations')
s = s.sort('-score', '_uid')
s = filter_association_search(s, filters)
if search_after != '':
search_after = parse_lastel(search_after)
print(search_after)
s = s.extra(search_after=search_after)
s = s[0:25]
print(json.dumps(s.to_dict()))
result = s.execute()
associations = result['hits']['hits']
last_el = ('','')
if len(associations) > 0:
last_el = result['hits']['hits'][-1]['sort']
# Transformation needed to saveguard url transmition
last_el[1] = "-".join(last_el[1].split('#'))
return [association['_source'].to_dict() for association in associations], result['hits']['total'], last_el
def load_filtered_top_ko_associations_search_after(filters, search_after = '', size=50):
"""Retrieves top associations and filter them through the tickable options"""
s = Search(using=es, doc_type='ko_associations')
s = s.sort('-score', '_uid')
# By default, leave out associations with no gene
s = s.filter(Q({'nested':{'path':'gene', 'query':{'exists':{'field':'gene.chr'}}}}))
# # Only need to filter by chromosome, maf or mac
if 'chr' in filters and len(filters['chr']) > 0 and len(filters['chr']) < 5:
s = s.filter(Q('bool', should=[Q({'nested':{'path':'gene', 'query':{'match':{'gene.chr':chrom if len(chrom) > 3 else 'chr%s' % chrom}}}}) for chrom in
filters['chr']]))
if 'significant' in filters:
s = s.filter(Q('range', mac={'gte': 6}))
s = s.filter('term', overBonferroni='T') # TODO: change this to permutation once the new indexed scores are in.
if search_after != '':
search_after = parse_lastel(search_after)
print(search_after)
s = s.extra(search_after=search_after)
s = s[0:size]
result = s.execute()
associations = result['hits']['hits']
last_el = result['hits']['hits'][-1]['sort']
# Transformation needed to saveguard url transmition
last_el[1] = "-".join(last_el[1].split('#'))
return [association['_source'].to_dict() for association in associations], result['hits']['total'], last_el
def get_gwas_overview_bins_data(filters):
"""Collect the data used to plot the gwas heatmap histograms"""
# Check missing filters
filters = check_missing_filters(filters)
# Params: chromosome (list or individual), region (optional, only considered if taking 1 chr), filters?
region_bins = get_bins_for_chr_regions(filters)
combined_data= []
keys = list(region_bins)
chromosome_sizes = {'chr1': 30427671, 'chr2': 19698289, 'chr3': 23459830,'chr4': 18585056, 'chr5': 26975502}
keys.sort()
for key in keys:
if filters['region'][0] == '':
region_final = [0, chromosome_sizes[key]]
else:
region_final = [filters['region'][0],filters['region'][1]]
bin_sze = filters['region_width']
combined_data.append({'chr': key, 'region':region_final, 'bin_sze': bin_sze, 'bins': region_bins[key]})
# Get study list
return {"type":"top", "data":combined_data}
def get_gwas_overview_heatmap_data(filters, num_studies):
"""Collect the data used to plot the gwas heatmap"""
# Check missing filters
filters = check_missing_filters(filters)
# Params: chromosome (list or individual), region (optional, only considered if taking 1 chr), filters?
max_score = dict()
data = dict()
if filters['chrom']=='all':
for i in range(1,6):
chr = 'chr' + str(i)
filters['chrom'] = chr
max_score_temp, data_temp = get_top_hits_for_all_studies(filters, num_studies) # TODO: link parameters from rest endpoint
max_score[chr]=max_score_temp[chr]
data[chr] = data_temp[chr]
# Aggregate over chromosomes
combined_data = combine_data(max_score, data) # For testing: change to data_bis to get faster but more localized points (looks bad)
else:
chr = 'chr'+str(filters['chrom'][-1])
max_score_temp, data_temp = get_top_hits_for_all_studies(filters, num_studies) # TODO: link parameters from rest endpoint
max_score[chr] = max_score_temp[chr]
data[chr] = data_temp[chr]
combined_data = combine_data(max_score, data, region=filters['region'],region_width=filters['region_width'])
# Get study list
return {"type":"top", "scoreRange": [0, max(max_score.values())], "data":combined_data}
def check_missing_filters(filters):
if 'chrom' not in filters.keys():
filters['chrom'] = 'all'
if 'region_width' not in filters.keys():
filters['region_width'] = 250000
if 'threshold' not in filters.keys():
filters['threshold'] = ''
if 'region' not in filters.keys():
filters['region'] = ('','')
if 'maf' not in filters.keys():
filters['maf'] = 0
if 'mac' not in filters.keys():
filters['mac'] = 6
return filters
def get_top_hits_for_all_studies(filters, num_studies):
s = Search(using=es, doc_type='associations')
s = filter_heatmap_search(s, filters)
# First aggregate for studies
s.aggs.bucket('per_chrom', 'terms', field='snp.chr')
# Keep track of the maximum value for each study
s.aggs['per_chrom'].metric('max', 'max', field='score')
# Then aggregate for chromosomes
s.aggs['per_chrom'].bucket('per_study', 'terms', field='study.id', order={'_term':'asc'}, size=num_studies,min_doc_count='0') #TODO: automatically check number of studies
s.aggs['per_chrom']['per_study'].metric('top_N', 'top_hits', size='25', sort={'score':'desc'}, _source=['-score','snp.position'])
# Then for regions (to avoid too many overlapping hits)
s.aggs['per_chrom']['per_study'].bucket('per_region', 'histogram', field='snp.position', interval=str(filters['region_width']))
# Then state what info we want from top_hits (position and score)
s.aggs['per_chrom']['per_study']['per_region'].metric('top', 'top_hits', size='1', sort={'score':'desc'}, _source=['score','snp.position'])
# Aggregate results
agg_results = s.execute().aggregations
# Find max score for
max_score = dict()
data = dict()
data_bis = dict()
for bucket in agg_results.per_chrom.buckets:
max_score[bucket.key] = bucket.max.value
data[bucket.key] = []
# data_bis[bucket.key] = []
for element in bucket.per_study.buckets:
# Combine results and get top 25 per chrom per study:
data[bucket.key].append(get_top_N_per_study(element, 25))
# study_data = []
# for top in element.top_N.hits.hits:
# study_data.append({'pos': top['_source']['snp']['position'],
# 'score': top['_source']['score']})
# data_bis[bucket.key].append(study_data)
return max_score, data #, data_bis
def get_top_N_per_study(bucket, N=25):
hits = []
for element in bucket.per_region.buckets:
if element.top.hits.hits:
hits.append({'pos': element.top.hits.hits[0]['_source']['snp']['position'],'score':element.top.hits.hits[0]['_source']['score']})
hits.sort(key=lambda tup: -tup['score'])
return hits[:N]
def filter_heatmap_search(s, filters):
if filters['chrom'] != 'all':
s = s.filter(Q('bool', should=[Q('term', snp__chr=filters['chrom'] if len(filters['chrom']) > 3 else 'chr%s' % filters['chrom'])]))
if filters['threshold'] == 'FDR':
s = s.filter('term', overFDR='T')
elif filters['threshold'] == 'Bonferroni':
s = s.filter('term', overBonferroni='T')
elif filters['threshold'] == 'permutation':
s = s.filter('term', overPermutation='T')
if filters['maf'] > 0:
s = s.filter(Q('range', maf={'gte':filters['maf']}))
if filters['mac'] > 0:
s = s.filter(Q('range', mac={'gte': filters['mac']}))
if filters['region'][0] != '':
s = s.filter('range', snp__position={'gte': int(filters['region'][0])})
s = s.filter('range', snp__position={'lte': int(filters['region'][1])})
return s
def get_bins_for_chr_regions(filters):
"""Usage:
chrom = indicate the chromosome(s) of interest ('all' or any chromosome),
region_width = indicate the size of the bins for which to count hits,
threshold = indicate the type of threshold to look at (FDR, Bonferroni, permutation or none)
region = indicate a specific window in which to aggregate for, default = ('','') looks at entire chromosome
maf = indicate a minimum maf
mac = indicate a minimum mac
"""
s = Search(using=es, doc_type='associations')
s = filter_heatmap_search(s, filters)
s.aggs.bucket('per_chrom', 'terms', field='snp.chr').bucket('per_region', 'histogram', field='snp.position', interval=str(filters['region_width']))
agg_results = s.execute().aggregations
bin_dict = dict()
for buckets in agg_results.per_chrom.buckets:
bin_dict[buckets['key']] = convert_to_bin_format(buckets['per_region'].buckets)
return bin_dict
def convert_to_bin_format(buckets):
bins = []
for bin in buckets:
bins.append(bin['doc_count'])
return bins
def combine_data(max_scores, data, region=('',''), region_width=10000):
if len(data) != len(max_scores):
raise ValueError('Problem with the size of the dictionaries')
final_data = []
keys = list(data)
chromosome_sizes = {'chr1': 30427671, 'chr2': 19698289, 'chr3': 23459830,'chr4': 18585056, 'chr5': 26975502}
keys.sort()
for key in keys:
scoreRange = [0,max_scores[key]]
if region[0] == '':
region_final = [0, chromosome_sizes[key]]
else:
region_final = [region[0],region[1]]
bin_sze = region_width
final_data.append({'scoreRange': scoreRange, 'chr': key, 'region':region_final, 'bin_sze': bin_sze,
'data':data[key]})
return final_data
def index_genes(genes):
"""Indexes the genes"""
num_genes = len(genes)
documents = [{'_index':'geno_%s' % gene['chr'].lower(),'_type':'genes','_id':gene_id,'_source':gene} for gene_id, gene in genes.items()]
success, errors = helpers.bulk(es,documents,chunk_size=10000,stats_only=True,request_timeout=300)
return success, errors
def index_snps(snpeff_file):
"""indexes the snps"""
success, errors = helpers.bulk(es,_get_snps_document(snpeff_file), stats_only=True, chunk_size=10000)
return success, errors
def _get_association_document(study, top_associations):
for assoc in top_associations:
source = assoc.copy()
source['study'] = study
yield {
'_index':'aragwas','_type':'associations','_id': '%s_%s_%s' % (study.pk, assoc['chr'], assoc['position']),'_source':source
}
def _get_snps_document(snpeff_file):
with open(snpeff_file,'r') as content:
is_custom_snp_eff = True
for row in content:
if row[0] == '#':
is_custom_snp_eff = False
continue
fields = row.split("\t")
snp = parse_snpeff(fields, is_custom_snp_eff)
action = {'_index':'geno_%s' % snp['chr'].lower(),'_type':'snps','_id':snp['position'],'_source':snp}
yield action
def _get_index_from_chr(chrom):
index = 'geno_%s'
if len(chrom) > 3:
index = index % chrom
else:
index = index % 'chr' + chrom
return index
# Need to index KO genes association differently.
def index_ko_associations(study, associations, thresholds):
"""
indexes gene knockout associations
They are stored differently cause they represent associations between genes and phenotypes
"""
with_permutations = 'permutation_threshold' in thresholds.keys() and thresholds['permutation_threshold'] # This will always be FALSE
thresholds_study = [{'name': key, 'value': val} for key, val in thresholds.items() ]
annotations = {}
documents = []
for assoc in associations:
_id = '%s_%s' % (study.pk, assoc['gene'])
study_data = serializers.EsStudySerializer(study).data
study_data['thresholds'] = thresholds_study
_source = {'mac': int(assoc['mac']), 'maf': float(assoc['maf']), 'score': float(assoc['score']), 'beta': float(assoc['beta']),
'se_beta': float(assoc['se_beta']), 'created': datetime.datetime.now(),'study':study_data}
_source['overBonferroni'] = bool(assoc['score'] > thresholds['bonferroni_threshold05'])
if with_permutations:
_source['overPermutation'] = bool(assoc['score'] > thresholds['permutation_threshold'])
try:
gene = load_gene_by_id(assoc['gene'])
except:
gene = {'name': assoc['gene']}
_source['gene'] = gene
documents.append({'_index':'aragwas','_type':'ko_associations','_id': _id, '_source': _source })
if len(documents) == 0:
return 0,0
success, errors = helpers.bulk(es,documents, chunk_size=1000, stats_only=True)
return success, errors
def load_gene_ko_associations(id, return_only_significant=False):
"""Retrieve KO associations by gene id"""
matches = GENE_ID_PATTERN.match(id)
if not matches:
raise Exception('Wrong Gene ID %s' % id)
chrom = matches.group(1)
asso_search = Search(using=es).doc_type('ko_associations')
if return_only_significant:
asso_search = asso_search.filter('term', overBonferroni='T')
# q = Q('bool', should=Q('term',gene__name = id))
q = Q({'nested':{'path':'gene', 'query':{'match':{'gene.name':id}}}})
asso_search = asso_search.filter(q).sort('-score').source(exclude=['gene'])
results = asso_search[0:min(500, asso_search.count())].execute()
ko_associations = results.to_dict()['hits']['hits']
return [association['_source'] for association in ko_associations]
| 47.962963
| 267
| 0.639598
|
794e0b0904bd622ca3d3d71fd2a337d01502688a
| 2,115
|
py
|
Python
|
mixbox/dates.py
|
6un9-h0-Dan/mixbox
|
cbdfc6afab6a9bcb8306686d2c4c34ac496c9271
|
[
"BSD-3-Clause"
] | 8
|
2015-02-24T21:44:45.000Z
|
2020-09-23T14:18:14.000Z
|
mixbox/dates.py
|
6un9-h0-Dan/mixbox
|
cbdfc6afab6a9bcb8306686d2c4c34ac496c9271
|
[
"BSD-3-Clause"
] | 43
|
2015-02-21T00:37:48.000Z
|
2022-03-31T12:35:09.000Z
|
mixbox/dates.py
|
6un9-h0-Dan/mixbox
|
cbdfc6afab6a9bcb8306686d2c4c34ac496c9271
|
[
"BSD-3-Clause"
] | 15
|
2015-07-31T20:26:49.000Z
|
2021-07-23T17:01:48.000Z
|
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
# stdlib
import datetime
# external
import dateutil
import dateutil.parser
import dateutil.tz
def parse_datetime(value):
"""Attempts to parse `value` into an instance of ``datetime.datetime``. If
`value` is ``None``, this function will return ``None``.
Args:
value: A timestamp. This can be a string or datetime.datetime value.
"""
if not value:
return None
elif isinstance(value, datetime.datetime):
return value
return dateutil.parser.parse(value)
def serialize_datetime(value):
"""Attempts to convert `value` into an ISO8601-compliant timestamp string.
If `value` is ``None``, ``None`` will be returned.
Args:
value: A datetime.datetime value.
Returns:
An ISO8601 formatted timestamp string.
"""
if not value:
return None
return value.isoformat()
def parse_date(value):
"""Attempts to parse `value` into an instance of ``datetime.date``. If
`value` is ``None``, this function will return ``None``.
Args:
value: A timestamp. This can be a string, datetime.date, or
datetime.datetime value.
"""
if not value:
return None
if isinstance(value, datetime.date):
return value
return parse_datetime(value).date()
def serialize_date(value):
"""Attempts to convert `value` into an ``xs:date`` string. If `value` is
``None``, ``None`` will be returned.
Args:
value: A date value. This can be a string, datetime.date, or
datetime.datetime object.
Returns:
An ``xs:date`` formatted timestamp string.
"""
if not value:
return None
elif isinstance(value, datetime.datetime):
return value.date().isoformat()
elif isinstance(value, datetime.date):
return value.isoformat()
else:
return parse_date(value).isoformat()
def now():
"""Returns the current UTC datetime.datetime."""
return datetime.datetime.now(tz=dateutil.tz.tzutc())
| 24.310345
| 78
| 0.645863
|
794e0b7d9f38f9748d9838e9e67ec21c3bfd6373
| 8,499
|
py
|
Python
|
scripts/emod3d_scripts/check_emod3d_subdomains.py
|
ucgmsim/slurm_gm_workflow
|
6fd7e11f3c3163dbd219b6783c32fa8085db5d35
|
[
"MIT"
] | null | null | null |
scripts/emod3d_scripts/check_emod3d_subdomains.py
|
ucgmsim/slurm_gm_workflow
|
6fd7e11f3c3163dbd219b6783c32fa8085db5d35
|
[
"MIT"
] | 114
|
2018-10-11T02:49:32.000Z
|
2022-03-30T01:28:21.000Z
|
scripts/emod3d_scripts/check_emod3d_subdomains.py
|
ucgmsim/slurm_gm_workflow
|
6fd7e11f3c3163dbd219b6783c32fa8085db5d35
|
[
"MIT"
] | 2
|
2021-10-05T07:10:20.000Z
|
2022-03-16T23:26:51.000Z
|
"""
Contains functions related to the calculation of emod3d subdomain boundaries.
Functions ported from C contain a number of calls to np.int32 and np.float32 calls to emulate single precision integer and floating point behaviour.
Code ported from emod3d v3.0.8 misc.c. This is consistent with v3.0.7.
While v3.0.4 uses long doubles in place of floats, this does not seem to practically increase the accuracy of calculation.
This check is stricter than necessary as only on rows/columns with stations missing will cause issues when extracting the station waveforms.
"""
import argparse
import numpy as np
def get_start_boundary(n_grid_points, n_subdomains, index_subdomain):
"""
Calculates the starting boundary of the subdomain for a given subdomain index along a velocity model axis
Should have an overlap of 4 with the previous subdomains ending boundary
Does not account for the first subdomain
:param n_grid_points: The number of grid points along the axis
:param n_subdomains: The number of subdomains along the axis
:param index_subdomain: The index of the subdomain being tested. May be an integer or array of integers
:return: The first grid point(s) covered by the given subdomain index(cies)
"""
fslice = np.float32(
np.float32(n_grid_points + (n_subdomains - 1.0) * 4.0)
/ np.float32(n_subdomains)
- 1.0
)
fn1 = np.float32(index_subdomain * (fslice - 3.0))
nx1 = np.int32(fn1 + 0.5)
return nx1
def get_end_boundary(n_grid_points, n_subdomains, index_subdomain):
"""
Calculates the ending boundary of the subdomain for a given subdomain index along a velocity model axis
Should have an overlap of 4 with the next subdomains starting boundary
Does not account for the last subdomain points
:param n_grid_points: The number of grid points along the axis
:param n_subdomains: The number of subdomains along the axis
:param index_subdomain: The index of the subdomain being tested. May be an integer or array of integers
:return: The last grid point(s) covered by the given subdomain index(cies)
"""
fslice = np.float32(
np.float32(n_grid_points + (n_subdomains - 1.0) * 4.0)
/ np.float32(n_subdomains)
- 1.0
)
fn1 = np.float32(index_subdomain * (fslice - 3.0))
fn1 = np.float32(fn1 + fslice)
nx2 = np.int32(fn1 + 0.5)
nx2 = np.int32(nx2 + 1)
return nx2
def get_nproc(
nproc: np.int32,
globnx: np.int32,
globny: np.int32,
globnz: np.int32,
min_nproc: np.int32 = np.int32(1),
nproc_x: np.int32 = np.int32(-1),
nproc_z: np.int32 = np.int32(-1),
):
"""
Ported from the source of emod3d. Casting enforces C like behaviour.
Calculates the number of processes to use along each axis of a velocity model.
The argument min_nproc sets the unit size, allowing for blocks of grid points to be assigned to subdomains, instead of individual points
The nproc_x/z argument are available to mimic options available in the C. Not normally used
:param nproc: The number of processes to be used.
:param globnx: The number of velocity model grid points along the x axis.
:param globny: The number of velocity model grid points along the y axis.
:param globnz: The number of velocity model grid points along the z axis.
:param min_nproc: Multiplier to perform calculations using cubes of min_nproc, defaults to 1.
:param nproc_x: The number of processes to use in the x direction. Set value above -1 to specify the number to use. Defaults to -1.
:param nproc_z: The number of processes to use in the y direction. Set value above -1 to specify the number to use. Defaults to -1.
:return: A tuple containing:
The number of processes along the x axis
The number of processes along the y axis
The number of processes along the z axis
"""
inv_fmp = np.float32(1.0 / min_nproc)
fnp = np.float32(nproc)
fnx = np.float32(globnx)
fny = np.float32(globny)
fnz = np.float32(globnz)
if nproc_z < 0:
nproc_z = np.int32(
inv_fmp * fnz * np.exp(np.log(fnp / (fnx * fny * fnz)) / 3.0) + 0.5
)
if nproc_z < 1:
nproc_z = np.int32(1)
nproc_z = np.int32(min_nproc * nproc_z)
if nproc_x < 0:
nproc_x = np.int32(
inv_fmp * fnx * np.exp(np.log(fnp / (fnx * fny * nproc_z)) / 2.0) + 0.5
)
if nproc_x < 1:
nproc_x = np.int32(1)
nproc_x = np.int32(min_nproc * nproc_x)
nproc_y = np.int32(
inv_fmp * fnp / (np.float32(nproc_x) * np.float32(nproc_z)) + 0.5
)
if nproc_y < 1:
nproc_y = np.int32(1)
nproc_y = np.int32(min_nproc * nproc_y)
nproc_c = nproc_x * nproc_y * nproc_z
if nproc_c != nproc:
# Alternate method of calculating the processes distribution
ip3 = np.int32(np.exp(np.log(fnp) / 3.0) + 0.5)
ipt = np.int32(1)
while 2 * ipt <= ip3 and nproc % ipt == 0 and (nproc / ipt) % 2 == 0:
ipt = np.int32(2 * ipt)
nproc_z = ipt
np2 = np.int32(nproc / nproc_z)
ip2 = np.int32(np.exp(np.log(1.0 * np2) / 2.0) + 0.5)
ipt = np.int32(1)
while 2 * ipt <= ip2 and np2 % ipt == 0 and (np2 / ipt) % 2 == 0:
ipt = np.int32(2 * ipt)
nproc_x = np.int32(ipt)
nproc_y = np.int32(np2 / nproc_x)
return nproc_x, nproc_y, nproc_z
def test_domain(nx, ny, nz, nc):
"""
Tests a given domain size and core count to check for grid points that won't be assigned to any sub domain
:param nx: The number of grid points in the x direction
:param ny: The number of grid points in the y direction
:param nz: The number of grid points in the z direction
:param nc: The number of cores to be used to perform the simulation
:return: Three arrays with the index of any unassigned grid lines. If all three are empty then the simulation will work as expected
"""
nproc_x, nproc_y, nproc_z = get_nproc(nc, nx, ny, nz)
x_indicies = np.arange(nproc_x - 1)
x_n1 = get_start_boundary(nx, nproc_x, x_indicies + 1)
x_n2 = get_end_boundary(nx, nproc_x, x_indicies)
y_indicies = np.arange(nproc_y - 1)
y_n1 = get_start_boundary(ny, nproc_y, y_indicies + 1)
y_n2 = get_end_boundary(ny, nproc_y, y_indicies)
z_indicies = np.arange(nproc_z - 1)
z_n1 = get_start_boundary(nz, nproc_z, z_indicies + 1)
z_n2 = get_end_boundary(nz, nproc_z, z_indicies)
x_mask = np.where(x_n1 + 2 != x_n2 - 2)[0]
y_mask = np.where(y_n1 + 2 != y_n2 - 2)[0]
z_mask = np.where(z_n1 + 2 != z_n2 - 2)[0]
return x_mask, y_mask, z_mask
def load_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"n_cores", type=int, help="The number of cores used to perform the simulation"
)
parser.add_argument(
"nx", type=int, help="The number of grid points along the x axis"
)
parser.add_argument(
"ny", type=int, help="The number of grid points along the y axis"
)
parser.add_argument(
"nz", type=int, help="The number of grid points along the z axis"
)
return parser.parse_args()
def main():
"""
Uses the command line arguments provided to determine if the simulation will have any grid lines that are not associated with a subdomain.
If any x or y girdlines are not associated they are printed to stdout, and the script exits with an exit code of 1
Otherwise the script exits with an exit code of 0
z gridlines are presented if any x or y gridlines are not associated, however alone they are not enough to cause failure
"""
args = load_args()
x, y, z = test_domain(args.nx, args.ny, args.nz, args.n_cores)
if x.size + y.size > 0:
# We only care if there are issues on the surface layer
message_parts = []
if x.size > 0:
message_parts.append("Missed x axis indicies:")
message_parts.append(", ".join(x.astype(str)))
if y.size > 0:
message_parts.append("Missed y axis indicies:")
message_parts.append(", ".join(y.astype(str)))
if z.size > 0:
message_parts.append("Missed z axis indicies:")
message_parts.append(", ".join(z.astype(str)))
print(". ".join(message_parts))
return_code = 1
else:
return_code = 0
exit(return_code)
if __name__ == "__main__":
main()
| 39.714953
| 148
| 0.661019
|
794e0b91c7d4e89e92ab1c32d213aca185c2f386
| 142
|
py
|
Python
|
zeus/api/schemas/token.py
|
conrad-kronos/zeus
|
ddb6bc313e51fb22222b30822b82d76f37dbbd35
|
[
"Apache-2.0"
] | 221
|
2017-07-03T17:29:21.000Z
|
2021-12-07T19:56:59.000Z
|
zeus/api/schemas/token.py
|
conrad-kronos/zeus
|
ddb6bc313e51fb22222b30822b82d76f37dbbd35
|
[
"Apache-2.0"
] | 298
|
2017-07-04T18:08:14.000Z
|
2022-03-03T22:24:51.000Z
|
zeus/api/schemas/token.py
|
conrad-kronos/zeus
|
ddb6bc313e51fb22222b30822b82d76f37dbbd35
|
[
"Apache-2.0"
] | 24
|
2017-07-15T13:46:45.000Z
|
2020-08-16T16:14:45.000Z
|
from marshmallow import Schema, fields
class TokenSchema(Schema):
id = fields.UUID(dump_only=True)
key = fields.Str(dump_only=True)
| 20.285714
| 38
| 0.739437
|
794e0bcfa8aa7f3ba98a1ffab018817125a89e55
| 4,494
|
py
|
Python
|
built-in/ACL_TensorFlow/Official/cv/YOLOv2_for_ACL/scripts/yolov2_postprocess/script/Main.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 12
|
2020-12-13T08:34:24.000Z
|
2022-03-20T15:17:17.000Z
|
built-in/ACL_TensorFlow/Official/cv/YOLOv2_for_ACL/scripts/yolov2_postprocess/script/Main.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 1
|
2022-01-20T03:11:05.000Z
|
2022-01-20T06:53:39.000Z
|
built-in/ACL_TensorFlow/Official/cv/YOLOv2_for_ACL/scripts/yolov2_postprocess/script/Main.py
|
Ascend/modelzoo
|
f018cfed33dbb1cc2110b9ea2e233333f71cc509
|
[
"Apache-2.0"
] | 2
|
2021-07-10T12:40:46.000Z
|
2021-12-17T07:55:15.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import cv2
from PIL import Image
from model_darknet19 import darknet
from decode import decode
from utils import preprocess_image, postprocess, draw_detection
from config import anchors, class_names
import os
def main():
labels_to_names={
0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorbike', 4: 'aeroplane', 5: 'bus', 6: 'train', 7: 'truck',8: 'boat', 9:'traffic light', 10:'fire hydrant', 11:'stop sign',12:'parking meter', 13:'bench', 14:'bird',
15:'cat',16: 'dog',17:'horse', 18:'sheep',19:'cow',20:'elephant',21:'bear',22:'zebra',23:'giraffe',24:'backpack',25:'umbrella',26:'bandbag',27:'tie',28:'suitcase',29:'frisbee',30:'skis',31:'snowboard',32:'sports ball',
33:'kite',34:'baseball bat',35:'baseball glove',36:'skateboard',37:'surfboard',38:'tennis racket',39:'bottle',40:'wine glass',41:'cup',42:'fork',43:'knife',44:'spoon',45:'bowl',46:'banana',47:'apple',48:'sandwich',
49:'orange',50:'broccoli',51:'carrot',52:'hot dog',53:'pizza',54:'donut',55:'cake',56:'chair',57:'couch',58:'pottedplant',59:'bed',60:'diningtable',61:'toilet',62:'tv',63:'laptop',64:'mouse',65:'remote',66:'keyboard',
67:'cellphone',68:'microwave',69:'oven',70:'toaster',71:'sink',72:'refrigerator',73:'book',74:'clock',75:'vase',76:'scissors',77:'teddy bear',78:'hair direr',79:'toothbrush'}
img_dir = "./data/pascal_voc/VOCdevkit/VOC2007_test/JPEGImages"
for filename in os.listdir(img_dir):
input_size = (416,416)
image = cv2.imread(img_dir + '/' + filename)
image_shape = image.shape[:2] #只取wh,channel=3不取
# copy、resize416*416、归一化、在第0维增加存放batchsize维度
image_cp = preprocess_image(image,input_size)
tf.reset_default_graph() #运行到第2张就报错,需要加上这句,清除默认图形堆栈并充值默认图形
# 【1】输入图片进入darknet19网络得到特征图,并进行解码得到:xmin xmax表示的边界框、置信度、类别概率
tf_image = tf.placeholder(tf.float32,[1,input_size[0],input_size[1],3])
model_output = darknet(tf_image) # darknet19网络输出的特征图
output_sizes = input_size[0]//32, input_size[1]//32 # 特征图尺寸是图片下采样32倍
output_decoded = decode(model_output=model_output,output_sizes=output_sizes,
num_class=len(class_names),anchors=anchors) # 解码
model_path = "./yolov2_model/checkpoint_dir/yolo2_coco.ckpt"
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess,model_path)
bboxes,obj_probs,class_probs = sess.run(output_decoded,feed_dict={tf_image:image_cp})
# 【2】筛选解码后的回归边界框——NMS(post process后期处理)
bboxes,scores,class_max_index = postprocess(bboxes,obj_probs,class_probs,image_shape=image_shape)
label_path_txt = "./map_mul/detections_npu/"
with open(os.path.join(label_path_txt + filename.split('.')[0] + '.txt'), 'a+') as f:
for i in range(len(scores)):
if " " in labels_to_names[class_max_index[i]]:
labels_to_name = labels_to_names[class_max_index[i]].split(' ')[0] + labels_to_names[class_max_index[i]].split(' ')[1]
f.write(labels_to_name + " " + str(scores[i]) + " " + str(bboxes[i][0])+ " " + str(bboxes[i][1])+ " " + str(bboxes[i][2])+ " " + str(bboxes[i][3]) + '\n')
else:
f.write(labels_to_names[class_max_index[i]] + " " + str(scores[i]) + " " + str(bboxes[i][0])+ " " + str(bboxes[i][1])+ " " + str(bboxes[i][2])+ " " + str(bboxes[i][3]) + '\n')
# 【3】绘制筛选后的边界框
#print('-----',filename)
#img_detection = draw_detection(image, bboxes, scores, class_max_index, class_names)
#cv2.imwrite(f"./VOC2007_jpeg_demo/" + filename.split('.')[0]+'_' + "detection.jpg", img_detection)
print('YOLO_v2 detection has done!')
#cv2.imshow("detection_results", img_detection)
#cv2.waitKey(0)
if __name__ == '__main__':
main()
| 56.886076
| 222
| 0.650868
|
794e0c3c779d4934b68b718e8aafb84432f2eea0
| 485
|
py
|
Python
|
get_target_ip.py
|
indigo-dc/dodas-docker-img_sshtunnel
|
29326c7673b47079a776d78fe03be628391f13d6
|
[
"Apache-2.0"
] | 2
|
2018-07-17T14:53:35.000Z
|
2018-07-19T09:53:36.000Z
|
get_target_ip.py
|
DODAS-TS/docker-img_sshtunnel
|
29326c7673b47079a776d78fe03be628391f13d6
|
[
"Apache-2.0"
] | null | null | null |
get_target_ip.py
|
DODAS-TS/docker-img_sshtunnel
|
29326c7673b47079a776d78fe03be628391f13d6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import print_function
from sys import argv, exit
def get_target_host(string):
"""Print and return the first address of the target.
Example:
"10.10.42.197 10.10.42.203 10.10.42.204" -> "10.10.42.197"
"10.10.42.203" -> "10.10.42.203"
"""
tmp = string.split(" ")[0]
print(tmp, end="")
return tmp
def main():
get_target_host(argv[1])
if __name__ == '__main__':
exit(main())
| 22.045455
| 66
| 0.606186
|
794e0ce7e1db1d3f9bd3a40ed9d3e49242ebb873
| 7,788
|
py
|
Python
|
src/pallas/proxies.py
|
tomasbedrich/pallas
|
cd414f0ed0c4b5a39da21061b08f9e7a5b674c8b
|
[
"Apache-2.0"
] | null | null | null |
src/pallas/proxies.py
|
tomasbedrich/pallas
|
cd414f0ed0c4b5a39da21061b08f9e7a5b674c8b
|
[
"Apache-2.0"
] | null | null | null |
src/pallas/proxies.py
|
tomasbedrich/pallas
|
cd414f0ed0c4b5a39da21061b08f9e7a5b674c8b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 Akamai Technologies, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Proxies to AWS Athena APIs.
The proxies are internal classes by the :class:`.Athena` client
to issue requests to AWS.
"""
from __future__ import annotations
import logging
from abc import ABCMeta, abstractmethod
from typing import Any, Dict, Mapping, Optional, Sequence, cast
import boto3
from pallas.csv import read_csv
from pallas.info import QueryInfo
from pallas.results import QueryResults
from pallas.storage.s3 import s3_parse_uri, s3_wrap_body
from pallas.utils import truncate_str
logger = logging.getLogger("pallas")
ColumnNames = Sequence[str]
ColumnTypes = Sequence[str]
Row = Sequence[Optional[str]]
class AthenaProxy(metaclass=ABCMeta):
"""
Proxy to AWS Athena.
This is an internal interface that is used by the :class:`.Athena` client.
The :class:`.Boto3Proxy` implementation will be used in most cases,
but it can be replaced by :class:`.FakeProxy` for testing.
"""
@abstractmethod
def start_query_execution(
self,
sql: str,
*,
database: Optional[str] = None,
workgroup: Optional[str] = None,
output_location: Optional[str] = None,
) -> str:
"""
Submit a query.
:param sql: an SQL query to be executed
:param database: a name of Athena database to be queried
:param workgroup: a name of Athena workgroup
:param output_location: URI of output location on S3
:return: execution_id
"""
@abstractmethod
def get_query_execution(self, execution_id: str) -> QueryInfo:
"""
Retrieve information about a query execution.
Returns a status of the query with other information.
"""
@abstractmethod
def get_query_results(self, info: QueryInfo) -> QueryResults:
"""
Retrieve results of a query execution.
Waits until the query execution finishes and downloads results.
"""
@abstractmethod
def stop_query_execution(self, execution_id: str) -> None:
"""
Kill a query execution.
"""
class Boto3Proxy(AthenaProxy):
"""
Proxy to AWS Athena using the boto3 library.
This is an internal class that is used by the :class:`.Athena` client.
It can be replaced by :class:`.FakeProxy` for testing.
"""
_athena_client: Any # boto3 Athena client
_s3_client: Any # boto3 S3 client
def __init__(
self,
*,
region: Optional[str] = None,
athena_client: Optional[Any] = None,
s3_client: Optional[Any] = None,
) -> None:
if athena_client is None:
athena_client = boto3.client("athena", region_name=region)
if s3_client is None:
s3_client = boto3.client("s3")
self._athena_client = athena_client
self._s3_client = s3_client
def start_query_execution(
self,
sql: str,
database: Optional[str] = None,
workgroup: Optional[str] = None,
output_location: Optional[str] = None,
) -> str:
params: Dict[str, Any] = dict(QueryString=sql)
if database is not None:
params.update(QueryExecutionContext={"Database": database})
if workgroup is not None:
params.update(WorkGroup=workgroup)
if output_location is not None:
params.update(ResultConfiguration={"OutputLocation": output_location})
logger.info(f"Athena StartQueryExecution: QueryString={truncate_str(sql)!r}")
response = self._athena_client.start_query_execution(**params)
execution_id = cast(str, response["QueryExecutionId"])
logger.info(f"Athena QueryExecutionId={execution_id!r} started.")
return execution_id
def get_query_execution(self, execution_id: str) -> QueryInfo:
logger.info(f"Athena GetQueryExecution: QueryExecutionId={execution_id!r}")
response = self._athena_client.get_query_execution(
QueryExecutionId=execution_id
)
info = QueryInfo(response["QueryExecution"])
logger.info(f"Athena QueryExecution: {info}")
return info
def get_query_results(self, info: QueryInfo) -> QueryResults:
execution_id = info.execution_id
params = dict(QueryExecutionId=execution_id)
logger.info(f"Athena GetQueryResults: QueryExecutionId={execution_id!r}")
response = self._athena_client.get_query_results(**params)
column_names = _read_column_names(response)
column_types = _read_column_types(response)
if response.get("NextToken"):
logger.info("Athena ResultSet paginated. Will download from S3.")
data = self._download_data(info)
else:
data = _read_data(response)
logger.info(
f"Athena ResultSet complete: {len(data)} rows (including header)"
)
fixed_data = _fix_data(column_names, data)
return QueryResults(column_names, column_types, fixed_data)
def stop_query_execution(self, execution_id: str) -> None:
logger.info(f"Athena StopQueryExecution: QueryExecutionId={execution_id!r}")
self._athena_client.stop_query_execution(QueryExecutionId=execution_id)
def _download_data(self, info: QueryInfo) -> Sequence[Row]:
output_location = info.output_location
bucket, key = s3_parse_uri(output_location)
params = dict(Bucket=bucket, Key=key)
logger.info(f"S3 GetObject:" f" Bucket={bucket!r} Key={key!r}")
response = self._s3_client.get_object(**params)
with s3_wrap_body(response["Body"]) as stream:
data = list(read_csv(stream))
logger.info(f"S3 Body downloaded: {len(data)} rows (including header)")
return data
def _fix_data(column_names: ColumnNames, data: Sequence[Row]) -> Sequence[Row]:
"""
Fix malformed data returned from Athena.
Queries executed by Presto (typically queries with SELECT)
repeat column names in the first row of data,
so we have to remove them.
Queries by Hive (typically queries with DESCRIBE)
do not repeat column names, but all columns are combined to one.
Try to fix both of the above problems here.
"""
if data and data[0] == column_names:
# DQL, SELECT statements executed by Presto
data = data[1:]
elif all(len(row) == 1 for row in data) and len(column_names) > 1:
# DCL, DESCRIBE statements executed by Hive
values = (row[0] for row in data if row[0] is not None)
data = [v.split("\t", maxsplit=len(column_names) - 1) for v in values]
return data
def _read_column_names(response: Mapping[str, Any]) -> ColumnNames:
column_info = response["ResultSet"]["ResultSetMetadata"]["ColumnInfo"]
return tuple(column["Name"] for column in column_info)
def _read_column_types(response: Mapping[str, Any]) -> ColumnTypes:
column_info = response["ResultSet"]["ResultSetMetadata"]["ColumnInfo"]
return tuple(column["Type"] for column in column_info)
def _read_data(response: Mapping[str, Any]) -> Sequence[Row]:
rows = response["ResultSet"]["Rows"]
return [tuple(item.get("VarCharValue") for item in row["Data"]) for row in rows]
| 35.561644
| 85
| 0.673986
|
794e0d28a1303bad42132ea0116c960a95b98724
| 1,964
|
py
|
Python
|
patrole_tempest_plugin/tests/api/compute/test_config_drive_rbac.py
|
TaruniSurampally/testpatrolev
|
7c7fe1cad2967e0be84ca74b9a200ae2fde356db
|
[
"Apache-2.0"
] | null | null | null |
patrole_tempest_plugin/tests/api/compute/test_config_drive_rbac.py
|
TaruniSurampally/testpatrolev
|
7c7fe1cad2967e0be84ca74b9a200ae2fde356db
|
[
"Apache-2.0"
] | null | null | null |
patrole_tempest_plugin/tests/api/compute/test_config_drive_rbac.py
|
TaruniSurampally/testpatrolev
|
7c7fe1cad2967e0be84ca74b9a200ae2fde356db
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 AT&T Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest import test
from patrole_tempest_plugin import rbac_rule_validation
from patrole_tempest_plugin.tests.api.compute import rbac_base
class ConfigDriveRbacTest(rbac_base.BaseV2ComputeRbacTest):
@classmethod
def setup_clients(cls):
super(ConfigDriveRbacTest, cls).setup_clients()
cls.client = cls.servers_client
@classmethod
def skip_checks(cls):
super(ConfigDriveRbacTest, cls).skip_checks()
if not test.is_extension_enabled('os-config-drive', 'compute'):
msg = "%s skipped as os-config-drive extension not enabled." \
% cls.__name__
raise cls.skipException(msg)
@decorators.idempotent_id('55c62ef7-b72b-4970-acc6-05b0a4316e5d')
@rbac_rule_validation.action(
service="nova",
rule="os_compute_api:os-config-drive")
def test_create_test_server_with_config_drive(self):
self.rbac_utils.switch_role(self, toggle_rbac_role=True)
# NOTE(felipemonteiro): This policy action is always enforced,
# regardless whether the config_drive flag is set to true or false.
# However, it has been explicitly set to true below, in case that this
# behavior ever changes in the future.
self.create_test_server(config_drive=True)
| 40.081633
| 78
| 0.717923
|
794e0d4504b8b81638ca4fffa8f5630e42bc53a9
| 5,042
|
py
|
Python
|
python/hostconfig/machines/alberto.py
|
AvciRecep/chaste_2019
|
1d46cdac647820d5c5030f8a9ea3a1019f6651c1
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2020-04-05T12:11:54.000Z
|
2020-04-05T12:11:54.000Z
|
python/hostconfig/machines/alberto.py
|
AvciRecep/chaste_2019
|
1d46cdac647820d5c5030f8a9ea3a1019f6651c1
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
python/hostconfig/machines/alberto.py
|
AvciRecep/chaste_2019
|
1d46cdac647820d5c5030f8a9ea3a1019f6651c1
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 2
|
2020-04-05T14:26:13.000Z
|
2021-03-09T08:18:17.000Z
|
# Configuration
"""Copyright (c) 2005-2019, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Chaste.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import glob
import os
# Check which version of Ubuntu this is
fp = open('/etc/issue')
ubuntu_ver = fp.read().split()[1]
fp.close()
petsc_ver = 3.1
petsc_path = '/usr/lib/petscdir/3.1/'
petsc_2_2_path = ''
petsc_build_name = 'linux-gnu-c-debug'
petsc_build_name_profile = petsc_build_name
petsc_build_name_optimized = 'linux-gnu-c-opt'
dealii_path = None
intel_path = None
icpc = 'icpc'
other_includepaths = ['/usr/include/metis/']
other_libpaths = []
libs_for_petsc = ['petsccontrib', 'X11',
'HYPRE', 'spooles', 'superlu',
'umfpack', 'amd' # Both for Umfpack
]
#Fixes (possibly temporary) for Natty
if ubuntu_ver >= [11,04]:
libs_for_petsc.append(['HYPRE_utilities',
'HYPRE_struct_mv', 'HYPRE_struct_ls',
'HYPRE_sstruct_mv', 'HYPRE_sstruct_ls',
'HYPRE_IJ_mv', 'HYPRE_parcsr_ls', 'dmumps'])
if petsc_ver >= 3:
libs_for_petsc.append('scotch')
else:
libs_for_petsc.append('sidl')
if petsc_ver >= 3.1:
libs_for_petsc.remove('petsccontrib')
boost_libs = ['boost_serialization', 'boost_filesystem']
if ubuntu_ver >= [10,10]:
boost_libs.append('boost_system')
if ubuntu_ver >= [9,10]:
boost_libs = map(lambda l: l+'-mt', boost_libs)
other_libraries = libs_for_petsc + boost_libs + \
['xerces-c',
'hdf5', 'z',
'parmetis', 'metis']
# Figure out which lapack/blas packages are actually installed!
if os.path.exists('/usr/lib/liblapack-3.so'):
blas_lapack = ['lapack-3', 'blas-3']
else:
blas_lapack = ['lapack', 'blas']
tools = {'xsd': '/usr/bin/xsdcxx',
'mpirun': '/usr/bin/mpirun.openmpi',
'mpicxx': '/usr/bin/mpic++.openmpi'}
def Configure(prefs, build):
"""Set up the build configuring.
prefs can specify which version of various libraries we should use, and which optional libraries.
VTK and CVODE support default on if they are installed.
build is an instance of BuildTypes.BuildType.
"""
global use_cvode
global use_vtk
# Extra libraries for VTK output
vtk_include_path = filter(os.path.isdir, glob.glob('/usr/include/vtk-5*'))
use_vtk = int(prefs.get('use-vtk', True))
use_vtk = use_vtk and bool(vtk_include_path)
if use_vtk:
# Note: 10.10 uses VTK 5.4, 10.04 uses 5.2, and early use 5.0
other_includepaths.extend(vtk_include_path)
other_libraries.extend(['vtkIO', 'vtkCommon', 'vtkGraphics', 'z'])
if ubuntu_ver >= [11,10]: # 11.10 uses VTK 5.6
other_libraries.extend(['vtkFiltering'])
# Is CVODE installed?
use_cvode = int(prefs.get('use-cvode', True))
use_cvode = use_cvode and os.path.exists('/usr/lib/libsundials_cvode.so')
if ubuntu_ver <= [9,04]:
# We don't support CVODE 2.4
use_cvode = False
if use_cvode:
DetermineCvodeVersion('/usr/include')
other_libraries.extend(['sundials_cvode', 'sundials_nvecserial'])
# Is Aquila installed?
use_aquila = os.path.exists('/usr/local/lib/libAquila.a')
if use_aquila:
other_libpaths.extend([os.getcwd()+'/projects/alberto/aquila/aquila-build/lib'])
other_libraries.extend(['Aquila', 'Ooura_fft'])
| 37.909774
| 101
| 0.692781
|
794e0d6859301cd169654a327d1cf6f080803eaf
| 1,029
|
py
|
Python
|
pointnet2/models/pvd/modules/functional/backend.py
|
ZhaoyangLyu/Point_Diffusion_Refinement
|
857fcd176dcc9c1a93a9fec27390502fa6c9e29d
|
[
"Apache-2.0"
] | 24
|
2021-12-29T11:28:34.000Z
|
2022-03-27T15:20:46.000Z
|
pointnet2/models/pvd/modules/functional/backend.py
|
ZhaoyangLyu/Point_Diffusion_Refinement
|
857fcd176dcc9c1a93a9fec27390502fa6c9e29d
|
[
"Apache-2.0"
] | 1
|
2021-11-03T08:53:26.000Z
|
2021-11-03T08:53:26.000Z
|
modules/functional/backend.py
|
alexzhou907/PVD
|
9747265a5f141e5546fd4f862bfa66aa59f1bd33
|
[
"MIT"
] | 4
|
2021-11-02T02:17:29.000Z
|
2022-03-20T11:58:29.000Z
|
import os
from torch.utils.cpp_extension import load
_src_path = os.path.dirname(os.path.abspath(__file__))
_backend = load(name='_pvcnn_backend',
extra_cflags=['-O3', '-std=c++17'],
extra_cuda_cflags=['--compiler-bindir=/usr/bin/gcc-8'],
sources=[os.path.join(_src_path,'src', f) for f in [
'ball_query/ball_query.cpp',
'ball_query/ball_query.cu',
'grouping/grouping.cpp',
'grouping/grouping.cu',
'interpolate/neighbor_interpolate.cpp',
'interpolate/neighbor_interpolate.cu',
'interpolate/trilinear_devox.cpp',
'interpolate/trilinear_devox.cu',
'sampling/sampling.cpp',
'sampling/sampling.cu',
'voxelization/vox.cpp',
'voxelization/vox.cu',
'bindings.cpp',
]]
)
__all__ = ['_backend']
| 38.111111
| 71
| 0.504373
|
794e0dc338ab6f223907301e4ce95b9d1befda6c
| 821
|
py
|
Python
|
gui/helpers.py
|
tfrere/music-to-led-server
|
55f64269ce012833508d574ad29e42b0486884f0
|
[
"MIT"
] | 1
|
2021-05-12T23:52:02.000Z
|
2021-05-12T23:52:02.000Z
|
gui/helpers.py
|
tfrere/music-to-led-server
|
55f64269ce012833508d574ad29e42b0486884f0
|
[
"MIT"
] | null | null | null |
gui/helpers.py
|
tfrere/music-to-led-server
|
55f64269ce012833508d574ad29e42b0486884f0
|
[
"MIT"
] | null | null | null |
import os, psutil, math
def clearTerminal():
os.system('cls' if os.name == 'nt' else 'clear')
def rgbToAnsi256(r, g, b):
if (r == g and g == b):
if (r < 8):
return 16
if (r > 248):
return 231
return round(((r - 8) / 247) * 24) + 232
ansi = 16 + (36 * round(r / 255 * 5)) + (6 * round(g / 255 * 5)) + round(b / 255 * 5)
return ansi
def getCpuInPercent():
return psutil.cpu_percent()
def getVirtualMemoryConsumtion():
"""
# {
# "total": 8589934592,
# "available": 2707013632,
# "percent": 68.5,
# "used": 4336054272,
# "free": 39534592,
# "active": 2670501888,
# "inactive": 2652979200,
# "wired": 1665552384
# }
"""
return dict(psutil.virtual_memory()._asdict())
| 24.878788
| 89
| 0.504263
|
794e0e04efc61201f27f5cb5188839358329d0d1
| 2,979
|
py
|
Python
|
tools/dummymaker/dummymaker/__main__.py
|
yamanalab/SecureSideEffectSearch
|
e223ad0f8cc5b5097af5a6da841f128e4783bd26
|
[
"Apache-2.0"
] | null | null | null |
tools/dummymaker/dummymaker/__main__.py
|
yamanalab/SecureSideEffectSearch
|
e223ad0f8cc5b5097af5a6da841f128e4783bd26
|
[
"Apache-2.0"
] | 3
|
2021-11-02T11:56:25.000Z
|
2021-11-03T10:06:39.000Z
|
tools/dummymaker/dummymaker/__main__.py
|
yamanalab/SecureSideEffectSearch
|
e223ad0f8cc5b5097af5a6da841f128e4783bd26
|
[
"Apache-2.0"
] | null | null | null |
from dummymaker import config
from dummymaker import population_stats
import json
import csv
from datetime import datetime
import numpy as np
class DummyMaker(object):
def __init__(self):
self.config = config.Config()
self.popstats = population_stats.PopulationStats()
def _run_impl(self):
dummy = {'records': []}
# id,medicine_id,symptom_id,symptom_orthographical_variant_id,hit,evaluation,shop_id,patient_id,age,gender,note,created_at
for rec_id in range(self.config.required_count):
record = {'id': rec_id + 1}
record['medicine_id'] = self.config.med_dist.get_problist(20)
record['patient_id'], gender_age = self.popstats.get_res_line()
record['gender'] = 1 if gender_age.gender == "male" else 2
record['age'] = gender_age.age
record['symptom_id'] = self.config.side_dist.get_problist(5)
dummy['records'].append(record)
if self.config.write_method == 'csv':
with open('data.csv', 'w+') as f:
w = csv.writer(f, delimiter=",")
headers = [
"id", "medicine_id", "symptom_id",
"symptom_orthographical_variant_id", "hit", "evaluation",
"shop_id", "patient_id", "age", "gender", "note",
"created_at"
]
w.writerow(headers)
for rec in dummy['records']:
for medicine_id in rec['medicine_id']:
for symptom_id in rec['symptom_id']:
now = datetime.now()
w.writerow([
str(x) for x in [
rec['id'],
medicine_id + 1,
symptom_id + 1,
"",
# TODO(musaprg): Stop using rand function, and consider based on the frequency of the side-effect
np.random.randint(0, 2),
"",
"myownshop",
rec['patient_id'] + 1,
rec['age'],
rec['gender'],
"",
now.strftime("%Y-%m-%d %H:%M:%S")
]
])
elif self.config.write_method == 'json':
with open('data.json', 'w+') as f:
json.dump(dummy, f, indent=2)
else:
raise AttributeError(
'Please set correct output method. "{}" is not amongst ["csv", "json"].'
.format(self.config.write_method))
def _main():
dummy_maker = DummyMaker()
dummy_maker._run_impl()
if __name__ == '__main__':
_main()
| 39.72
| 133
| 0.461564
|
794e0faae7afe5e5491f6f0f3f25d4508817db69
| 81,116
|
py
|
Python
|
django/db/models/query.py
|
louiseGrandjonc/django
|
54ea290e5bbd19d87bd8dba807738eeeaf01a362
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2019-11-22T07:10:33.000Z
|
2019-11-22T07:10:33.000Z
|
django/db/models/query.py
|
louiseGrandjonc/django
|
54ea290e5bbd19d87bd8dba807738eeeaf01a362
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
django/db/models/query.py
|
louiseGrandjonc/django
|
54ea290e5bbd19d87bd8dba807738eeeaf01a362
|
[
"PSF-2.0",
"BSD-3-Clause"
] | 1
|
2019-11-09T18:53:01.000Z
|
2019-11-09T18:53:01.000Z
|
"""
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import operator
import warnings
from collections import namedtuple
from functools import lru_cache
from itertools import chain
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import DateField, DateTimeField, sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import Case, Expression, F, Value, When
from django.db.models.fields import AutoField
from django.db.models.functions import Cast, Trunc
from django.db.models.query_utils import FilteredRelation, InvalidQuery, Q
from django.db.models.sql.constants import CURSOR, GET_ITERATOR_CHUNK_SIZE
from django.db.utils import NotSupportedError
from django.utils import timezone
from django.utils.functional import cached_property, partition
from django.utils.version import get_version
# The maximum number of results to fetch in a get() query.
MAX_GET_RESULTS = 21
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
class BaseIterable:
def __init__(self, queryset, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE):
self.queryset = queryset
self.chunked_fetch = chunked_fetch
self.chunk_size = chunk_size
class ModelIterable(BaseIterable):
"""Iterable that yields a model instance for each row."""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
related_populators = get_related_populators(klass_info, select, db)
known_related_objects = [
(field, related_objs, operator.attrgetter(*[
field.attname
if from_field == 'self' else
queryset.model._meta.get_field(from_field).attname
for from_field in field.from_fields
])) for field, related_objs in queryset._known_related_objects.items()
]
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model.
for field, rel_objs, rel_getter in known_related_objects:
# Avoid overwriting objects loaded by, e.g., select_related().
if field.is_cached(obj):
continue
rel_obj_id = rel_getter(obj)
try:
rel_obj = rel_objs[rel_obj_id]
except KeyError:
pass # May happen in qs1 | qs2 scenarios.
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
indexes = range(len(names))
for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):
yield {names[i]: row[i] for i in indexes}
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False) that yields a tuple
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if queryset._fields:
# extra(select=...) cols are always at the start of the row.
names = [
*query.extra_select,
*query.values_select,
*query.annotation_select,
]
fields = [*queryset._fields, *(f for f in query.annotation_select if f not in queryset._fields)]
if fields != names:
# Reorder according to fields.
index_map = {name: idx for idx, name in enumerate(names)}
rowfactory = operator.itemgetter(*[index_map[f] for f in fields])
return map(
rowfactory,
compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
)
return compiler.results_iter(tuple_expected=True, chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size)
class NamedValuesListIterable(ValuesListIterable):
"""
Iterable returned by QuerySet.values_list(named=True) that yields a
namedtuple for each row.
"""
@staticmethod
@lru_cache()
def create_namedtuple_class(*names):
# Cache namedtuple() with @lru_cache() since it's too slow to be
# called for every QuerySet evaluation.
return namedtuple('Row', names)
def __iter__(self):
queryset = self.queryset
if queryset._fields:
names = queryset._fields
else:
query = queryset.query
names = [*query.extra_select, *query.values_select, *query.annotation_select]
tuple_class = self.create_namedtuple_class(*names)
new = tuple.__new__
for row in super().__iter__():
yield new(tuple_class, row)
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that yields single
values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter(chunked_fetch=self.chunked_fetch, chunk_size=self.chunk_size):
yield row[0]
class QuerySet:
"""Represent a lazy database lookup for a set of objects."""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = ()
self._prefetch_done = False
self._known_related_objects = {} # {rel_field: {pk: rel_obj}}
self._iterable_class = ModelIterable
self._fields = None
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""Don't populate the QuerySet's cache."""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
# Force the cache to be fully populated.
self._fetch_all()
return {**self.__dict__, DJANGO_VERSION_PICKLE_KEY: get_version()}
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = (
"Pickled queryset instance's Django version %s does not "
"match the current version %s." % (pickled_version, current_version)
)
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return '<%s %r>' % (self.__class__.__name__, data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler.execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql.compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __getitem__(self, k):
"""Retrieve an item or slice from the set of results."""
if not isinstance(k, (int, slice)):
raise TypeError(
'QuerySet indices must be integers or slices, not %s.'
% type(k).__name__
)
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._chain()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._chain()
qs.query.set_limits(k, k + 1)
qs._fetch_all()
return qs._result_cache[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._chain()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
query = self if self.query.can_filter() else self.model._base_manager.filter(pk__in=self.values('pk'))
combined = query._chain()
combined._merge_known_related_objects(other)
if not other.query.can_filter():
other = other.model._base_manager.filter(pk__in=other.values('pk'))
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def _iterator(self, use_chunked_fetch, chunk_size):
yield from self._iterable_class(self, chunked_fetch=use_chunked_fetch, chunk_size=chunk_size)
def iterator(self, chunk_size=2000):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
if chunk_size <= 0:
raise ValueError('Chunk size must be strictly positive.')
use_chunked_fetch = not connections[self.db].settings_dict.get('DISABLE_SERVER_SIDE_CURSORS')
return self._iterator(use_chunked_fetch, chunk_size)
def aggregate(self, *args, **kwargs):
"""
Return a dictionary containing the calculations (aggregation)
over the current queryset.
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
self._validate_values_are_expressions((*args, *kwargs.values()), method_name='aggregate')
for arg in args:
# The default_alias property raises TypeError if default_alias
# can't be set automatically or AttributeError if it isn't an
# attribute.
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.chain()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs)
def count(self):
"""
Perform a SELECT COUNT() and return the number of records as an
integer.
If the QuerySet is already fully cached, return the length of the
cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Perform the query and return a single object matching the given
keyword arguments.
"""
clone = self._chain() if self.query.combinator else self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
limit = None
if not clone.query.select_for_update or connections[clone.db].features.supports_select_for_update_with_limit:
limit = MAX_GET_RESULTS
clone.query.set_limits(high=limit)
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
'get() returned more than one %s -- it returned %s!' % (
self.model._meta.object_name,
num if not limit or num < limit else 'more than %s' % (limit - 1),
)
)
def create(self, **kwargs):
"""
Create a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None, ignore_conflicts=False):
"""
Insert each of the instances into the database. Do *not* call
save() on each of the instances, do not send any pre/post_save
signals, and do not set the primary key attribute if it is an
autoincrement field (except if features.can_return_rows_from_bulk_insert=True).
Multi-table models are not supported.
"""
# When you bulk insert you don't get the primary keys back (if it's an
# autoincrement, except if can_return_rows_from_bulk_insert=True), so
# you can't insert into the child tables which references this. There
# are two workarounds:
# 1) This could be implemented if you didn't have an autoincrement pk
# 2) You could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back and then doing a single bulk
# insert into the childmost table.
# We currently set the primary keys on the objects when using
# PostgreSQL via the RETURNING ID clause. It should be possible for
# Oracle as well, but the semantics for extracting the primary keys is
# trickier so it's not done yet.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
opts = self.model._meta
fields = opts.concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
returned_columns = self._batched_insert(
objs_with_pk, fields, batch_size, ignore_conflicts=ignore_conflicts,
)
for obj_with_pk, results in zip(objs_with_pk, returned_columns):
for result, field in zip(results, opts.db_returning_fields):
if field != opts.pk:
setattr(obj_with_pk, field.attname, result)
for obj_with_pk in objs_with_pk:
obj_with_pk._state.adding = False
obj_with_pk._state.db = self.db
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
returned_columns = self._batched_insert(
objs_without_pk, fields, batch_size, ignore_conflicts=ignore_conflicts,
)
if connection.features.can_return_rows_from_bulk_insert and not ignore_conflicts:
assert len(returned_columns) == len(objs_without_pk)
for obj_without_pk, results in zip(objs_without_pk, returned_columns):
for result, field in zip(results, opts.db_returning_fields):
setattr(obj_without_pk, field.attname, result)
obj_without_pk._state.adding = False
obj_without_pk._state.db = self.db
return objs
def bulk_update(self, objs, fields, batch_size=None):
"""
Update the given fields in each of the given objects in the database.
"""
if batch_size is not None and batch_size < 0:
raise ValueError('Batch size must be a positive integer.')
if not fields:
raise ValueError('Field names must be given to bulk_update().')
objs = tuple(objs)
if any(obj.pk is None for obj in objs):
raise ValueError('All bulk_update() objects must have a primary key set.')
fields = [self.model._meta.get_field(name) for name in fields]
if any(not f.concrete or f.many_to_many for f in fields):
raise ValueError('bulk_update() can only be used with concrete fields.')
if any(f.primary_key for f in fields):
raise ValueError('bulk_update() cannot be used with primary key fields.')
if not objs:
return
# PK is used twice in the resulting update query, once in the filter
# and once in the WHEN. Each field will also have one CAST.
max_batch_size = connections[self.db].ops.bulk_batch_size(['pk', 'pk'] + fields, objs)
batch_size = min(batch_size, max_batch_size) if batch_size else max_batch_size
requires_casting = connections[self.db].features.requires_casted_case_in_updates
batches = (objs[i:i + batch_size] for i in range(0, len(objs), batch_size))
updates = []
for batch_objs in batches:
update_kwargs = {}
for field in fields:
when_statements = []
for obj in batch_objs:
attr = getattr(obj, field.attname)
if not isinstance(attr, Expression):
attr = Value(attr, output_field=field)
when_statements.append(When(pk=obj.pk, then=attr))
case_statement = Case(*when_statements, output_field=field)
if requires_casting:
case_statement = Cast(case_statement, output_field=field)
update_kwargs[field.attname] = case_statement
updates.append(([obj.pk for obj in batch_objs], update_kwargs))
with transaction.atomic(using=self.db, savepoint=False):
for pks, update_kwargs in updates:
self.filter(pk__in=pks).update(**update_kwargs)
bulk_update.alters_data = True
def get_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, creating one if necessary.
Return a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
# The get() needs to be targeted at the write database in order
# to avoid potential transaction consistency problems.
self._for_write = True
try:
return self.get(**kwargs), False
except self.model.DoesNotExist:
params = self._extract_model_params(defaults, **kwargs)
return self._create_object_from_params(kwargs, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Look up an object with the given kwargs, updating one with defaults
if it exists, otherwise create a new one.
Return a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
self._for_write = True
with transaction.atomic(using=self.db):
try:
obj = self.select_for_update().get(**kwargs)
except self.model.DoesNotExist:
params = self._extract_model_params(defaults, **kwargs)
# Lock the row so that a concurrent update is blocked until
# after update_or_create() has performed its save.
obj, created = self._create_object_from_params(kwargs, params, lock=True)
if created:
return obj, created
for k, v in defaults.items():
setattr(obj, k, v() if callable(v) else v)
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params, lock=False):
"""
Try to create an object using passed params. Used by get_or_create()
and update_or_create().
"""
try:
with transaction.atomic(using=self.db):
params = {k: v() if callable(v) else v for k, v in params.items()}
obj = self.create(**params)
return obj, True
except IntegrityError as e:
try:
qs = self.select_for_update() if lock else self
return qs.get(**lookup), False
except self.model.DoesNotExist:
pass
raise e
def _extract_model_params(self, defaults, **kwargs):
"""
Prepare `params` for creating a model instance based on the given
kwargs; for use by get_or_create() and update_or_create().
"""
defaults = defaults or {}
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
property_names = self.model._meta._property_names
invalid_params = []
for param in params:
try:
self.model._meta.get_field(param)
except exceptions.FieldDoesNotExist:
# It's okay to use a model's property if it has a setter.
if not (param in property_names and getattr(self.model, param).fset):
invalid_params.append(param)
if invalid_params:
raise exceptions.FieldError(
"Invalid field name(s) for model %s: '%s'." % (
self.model._meta.object_name,
"', '".join(sorted(invalid_params)),
))
return params
def _earliest(self, *fields):
"""
Return the earliest object according to fields (if given) or by the
model's Meta.get_latest_by.
"""
if fields:
order_by = fields
else:
order_by = getattr(self.model._meta, 'get_latest_by')
if order_by and not isinstance(order_by, (tuple, list)):
order_by = (order_by,)
if order_by is None:
raise ValueError(
"earliest() and latest() require either fields as positional "
"arguments or 'get_latest_by' in the model's Meta."
)
assert not self.query.is_sliced, \
"Cannot change a query once a slice has been taken."
obj = self._chain()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering(*order_by)
return obj.get()
def earliest(self, *fields):
return self._earliest(*fields)
def latest(self, *fields):
return self.reverse()._earliest(*fields)
def first(self):
"""Return the first object of a query or None if no match is found."""
for obj in (self if self.ordered else self.order_by('pk'))[:1]:
return obj
def last(self):
"""Return the last object of a query or None if no match is found."""
for obj in (self.reverse() if self.ordered else self.order_by('-pk'))[:1]:
return obj
def in_bulk(self, id_list=None, *, field_name='pk'):
"""
Return a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
"""
assert not self.query.is_sliced, \
"Cannot use 'limit' or 'offset' with in_bulk"
if field_name != 'pk' and not self.model._meta.get_field(field_name).unique:
raise ValueError("in_bulk()'s field_name must be a unique field but %r isn't." % field_name)
if id_list is not None:
if not id_list:
return {}
filter_key = '{}__in'.format(field_name)
batch_size = connections[self.db].features.max_query_params
id_list = tuple(id_list)
# If the database has a limit on the number of query parameters
# (e.g. SQLite), retrieve objects in batches if necessary.
if batch_size and batch_size < len(id_list):
qs = ()
for offset in range(0, len(id_list), batch_size):
batch = id_list[offset:offset + batch_size]
qs += tuple(self.filter(**{filter_key: batch}).order_by())
else:
qs = self.filter(**{filter_key: id_list}).order_by()
else:
qs = self._chain()
return {getattr(obj, field_name): obj for obj in qs}
def delete(self):
"""Delete the records in the current QuerySet."""
assert not self.query.is_sliced, \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._chain()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Delete objects found from the given queryset in single direct SQL
query. No signals are sent and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Update all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert not self.query.is_sliced, \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.chain(sql.UpdateQuery)
query.add_update_values(kwargs)
# Clear any annotations so that they won't be present in subqueries.
query.annotations = {}
with transaction.mark_for_rollback_on_error(using=self.db):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update() that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert not self.query.is_sliced, \
"Cannot update a query once a slice has been taken."
query = self.query.chain(sql.UpdateQuery)
query.add_update_fields(values)
# Clear any annotations so that they won't be present in subqueries.
query.annotations = {}
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def explain(self, *, format=None, **options):
return self.query.explain(using=self.db, format=format, **options)
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
qs = RawQuerySet(raw_query, model=self.model, params=params, translations=translations, using=using)
qs._prefetch_related_lookups = self._prefetch_related_lookups[:]
return qs
def _values(self, *fields, **expressions):
clone = self._chain()
if expressions:
clone = clone.annotate(**expressions)
clone._fields = fields
clone.query.set_values(fields)
return clone
def values(self, *fields, **expressions):
fields += tuple(expressions)
clone = self._values(*fields, **expressions)
clone._iterable_class = ValuesIterable
return clone
def values_list(self, *fields, flat=False, named=False):
if flat and named:
raise TypeError("'flat' and 'named' can't be used together.")
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
field_names = {f for f in fields if not hasattr(f, 'resolve_expression')}
_fields = []
expressions = {}
counter = 1
for field in fields:
if hasattr(field, 'resolve_expression'):
field_id_prefix = getattr(field, 'default_alias', field.__class__.__name__.lower())
while True:
field_id = field_id_prefix + str(counter)
counter += 1
if field_id not in field_names:
break
expressions[field_id] = field
_fields.append(field_id)
else:
_fields.append(field)
clone = self._values(*_fields, **expressions)
clone._iterable_class = (
NamedValuesListIterable if named
else FlatValuesListIterable if flat
else ValuesListIterable
)
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Return a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ('year', 'month', 'week', 'day'), \
"'kind' must be one of 'year', 'month', 'week', or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Trunc(field_name, kind, output_field=DateField()),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Return a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ('year', 'month', 'week', 'day', 'hour', 'minute', 'second'), \
"'kind' must be one of 'year', 'month', 'week', 'day', 'hour', 'minute', or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=Trunc(field_name, kind, output_field=DateTimeField(), tzinfo=tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""Return an empty QuerySet."""
clone = self._chain()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Return a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._chain()
def filter(self, *args, **kwargs):
"""
Return a new QuerySet instance with the args ANDed to the existing
set.
"""
self._not_support_combined_queries('filter')
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Return a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
self._not_support_combined_queries('exclude')
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert not self.query.is_sliced, \
"Cannot filter a query once a slice has been taken."
clone = self._chain()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Return a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object or a dictionary of keyword lookup
arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q):
clone = self._chain()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def _combinator_query(self, combinator, *other_qs, all=False):
# Clone the query to inherit the select list and everything
clone = self._chain()
# Clear limits and ordering so they can be reapplied
clone.query.clear_ordering(True)
clone.query.clear_limits()
clone.query.combined_queries = (self.query,) + tuple(qs.query for qs in other_qs)
clone.query.combinator = combinator
clone.query.combinator_all = all
return clone
def union(self, *other_qs, all=False):
# If the query is an EmptyQuerySet, combine all nonempty querysets.
if isinstance(self, EmptyQuerySet):
qs = [q for q in other_qs if not isinstance(q, EmptyQuerySet)]
return qs[0]._combinator_query('union', *qs[1:], all=all) if qs else self
return self._combinator_query('union', *other_qs, all=all)
def intersection(self, *other_qs):
# If any query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
for other in other_qs:
if isinstance(other, EmptyQuerySet):
return other
return self._combinator_query('intersection', *other_qs)
def difference(self, *other_qs):
# If the query is an EmptyQuerySet, return it.
if isinstance(self, EmptyQuerySet):
return self
return self._combinator_query('difference', *other_qs)
def select_for_update(self, nowait=False, skip_locked=False, of=()):
"""
Return a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
raise ValueError('The nowait option cannot be used with skip_locked.')
obj = self._chain()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
obj.query.select_for_update_skip_locked = skip_locked
obj.query.select_for_update_of = of
return obj
def select_related(self, *fields):
"""
Return a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, clear the list.
"""
self._not_support_combined_queries('select_related')
if self._fields is not None:
raise TypeError("Cannot call select_related() after .values() or .values_list()")
obj = self._chain()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Return a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, append to the list of
prefetch lookups. If prefetch_related(None) is called, clear the list.
"""
self._not_support_combined_queries('prefetch_related')
clone = self._chain()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
for lookup in lookups:
if isinstance(lookup, Prefetch):
lookup = lookup.prefetch_to
lookup = lookup.split(LOOKUP_SEP, 1)[0]
if lookup in self.query._filtered_relations:
raise ValueError('prefetch_related() is not supported with FilteredRelation.')
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
self._not_support_combined_queries('annotate')
self._validate_values_are_expressions(args + tuple(kwargs.values()), method_name='annotate')
annotations = {}
for arg in args:
# The default_alias property may raise a TypeError.
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except TypeError:
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._chain()
names = self._fields
if names is None:
names = set(chain.from_iterable(
(field.name, field.attname) if hasattr(field, 'attname') else (field.name,)
for field in self.model._meta.get_fields()
))
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
if isinstance(annotation, FilteredRelation):
clone.query.add_filtered_relation(annotation, alias)
else:
clone.query.add_annotation(annotation, alias, is_summary=False)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""Return a new QuerySet instance with the ordering changed."""
assert not self.query.is_sliced, \
"Cannot reorder a query once a slice has been taken."
obj = self._chain()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Return a new QuerySet instance that will select only distinct results.
"""
assert not self.query.is_sliced, \
"Cannot create distinct fields once a slice has been taken."
obj = self._chain()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""Add extra SQL fragments to the query."""
self._not_support_combined_queries('extra')
assert not self.query.is_sliced, \
"Cannot change a query once a slice has been taken"
clone = self._chain()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""Reverse the ordering of the QuerySet."""
if self.query.is_sliced:
raise TypeError('Cannot reverse a query once a slice has been taken.')
clone = self._chain()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defer the loading of data for certain fields until they are accessed.
Add the set of deferred fields to any existing set of deferred fields.
The only exception to this is if None is passed in as the only
parameter, in which case removal all deferrals.
"""
self._not_support_combined_queries('defer')
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._chain()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer(). Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
self._not_support_combined_queries('only')
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
for field in fields:
field = field.split(LOOKUP_SEP, 1)[0]
if field in self.query._filtered_relations:
raise ValueError('only() is not supported with FilteredRelation.')
clone = self._chain()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""Select which database this QuerySet should execute against."""
clone = self._chain()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
@property
def ordered(self):
"""
Return True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model (or is empty).
"""
if isinstance(self, EmptyQuerySet):
return True
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
@property
def db(self):
"""Return the database used if this query is executed now."""
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, returning_fields=None, raw=False, using=None, ignore_conflicts=False):
"""
Insert a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model, ignore_conflicts=ignore_conflicts)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(returning_fields)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size, ignore_conflicts=False):
"""
Helper method for bulk_create() to insert objs one batch at a time.
"""
if ignore_conflicts and not connections[self.db].features.supports_ignore_conflicts:
raise NotSupportedError('This database backend does not support ignoring conflicts.')
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
inserted_rows = []
bulk_return = connections[self.db].features.can_return_rows_from_bulk_insert
for item in [objs[i:i + batch_size] for i in range(0, len(objs), batch_size)]:
if bulk_return and not ignore_conflicts:
inserted_columns = self._insert(
item, fields=fields, using=self.db,
returning_fields=self.model._meta.db_returning_fields,
ignore_conflicts=ignore_conflicts,
)
if isinstance(inserted_columns, list):
inserted_rows.extend(inserted_columns)
else:
inserted_rows.append(inserted_columns)
else:
self._insert(item, fields=fields, using=self.db, ignore_conflicts=ignore_conflicts)
return inserted_rows
def _chain(self, **kwargs):
"""
Return a copy of the current QuerySet that's ready for another
operation.
"""
obj = self._clone()
if obj._sticky_filter:
obj.query.filter_is_sticky = True
obj._sticky_filter = False
obj.__dict__.update(kwargs)
return obj
def _clone(self):
"""
Return a copy of the current QuerySet. A lightweight alternative
to deepcopy().
"""
c = self.__class__(model=self.model, query=self.query.chain(), using=self._db, hints=self._hints)
c._sticky_filter = self._sticky_filter
c._for_write = self._for_write
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
c._known_related_objects = self._known_related_objects
c._iterable_class = self._iterable_class
c._fields = self._fields
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self._iterable_class(self))
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicate that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""Check that two QuerySet classes may be merged."""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError(
"Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__
)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def resolve_expression(self, *args, **kwargs):
if self._fields and len(self._fields) > 1:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
raise TypeError('Cannot use multi-field values as a filter value.')
query = self.query.resolve_expression(*args, **kwargs)
query._db = self._db
return query
resolve_expression.queryset_only = True
def _add_hints(self, **hints):
"""
Update hinting information for use by routers. Add new key/values or
overwrite existing key/values.
"""
self._hints.update(hints)
def _has_filters(self):
"""
Check if this QuerySet has any filtering going on. This isn't
equivalent with checking if all objects are present in results, for
example, qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
@staticmethod
def _validate_values_are_expressions(values, method_name):
invalid_args = sorted(str(arg) for arg in values if not hasattr(arg, 'resolve_expression'))
if invalid_args:
raise TypeError(
'QuerySet.%s() received non-expression(s): %s.' % (
method_name,
', '.join(invalid_args),
)
)
def _not_support_combined_queries(self, operation_name):
if self.query.combinator:
raise NotSupportedError(
'Calling QuerySet.%s() after %s() is not supported.'
% (operation_name, self.query.combinator)
)
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return isinstance(instance, QuerySet) and instance.query.is_empty()
class EmptyQuerySet(metaclass=InstanceCheckMeta):
"""
Marker class to checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet:
"""
Provide an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
self._result_cache = None
self._prefetch_related_lookups = ()
self._prefetch_done = False
def resolve_model_init_order(self):
"""Resolve the init field names and value positions."""
converter = connections[self.db].introspection.identifier_converter
model_init_fields = [f for f in self.model._meta.fields if converter(f.column) in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(converter(f.column)) for f in model_init_fields]
model_init_names = [f.attname for f in model_init_fields]
return model_init_names, model_init_order, annotation_fields
def prefetch_related(self, *lookups):
"""Same as QuerySet.prefetch_related()"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = ()
else:
clone._prefetch_related_lookups = clone._prefetch_related_lookups + lookups
return clone
def _prefetch_related_objects(self):
prefetch_related_objects(self._result_cache, *self._prefetch_related_lookups)
self._prefetch_done = True
def _clone(self):
"""Same as QuerySet._clone()"""
c = self.__class__(
self.raw_query, model=self.model, query=self.query, params=self.params,
translations=self.translations, using=self._db, hints=self._hints
)
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
return c
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __iter__(self):
self._fetch_all()
return iter(self._result_cache)
def iterator(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
if self.model._meta.pk.attname not in model_init_names:
raise InvalidQuery('Raw query must include the primary key')
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
if converters:
query = compiler.apply_converters(query, converters)
for values in query:
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.query)
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"""Return the database used if this query is executed now."""
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""Select the database this RawQuerySet should execute against."""
return RawQuerySet(
self.raw_query, model=self.model,
query=self.query.chain(using=alias),
params=self.params, translations=self.translations,
using=alias,
)
@cached_property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
# Ignore translations for nonexistent column names
try:
index = columns.index(query_name)
except ValueError:
pass
else:
columns[index] = model_name
return columns
@cached_property
def model_fields(self):
"""A dict mapping column names to model field names."""
converter = connections[self.db].introspection.identifier_converter
model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
model_fields[converter(column)] = field
return model_fields
class Prefetch:
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if queryset is not None and not issubclass(queryset._iterable_class, ModelIterable):
raise ValueError('Prefetch querysets cannot use values().')
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def __getstate__(self):
obj_dict = self.__dict__.copy()
if self.queryset is not None:
# Prevent the QuerySet from being evaluated
obj_dict['queryset'] = self.queryset._chain(
_result_cache=[],
_prefetch_done=True,
)
return obj_dict
def add_prefix(self, prefix):
self.prefetch_through = prefix + LOOKUP_SEP + self.prefetch_through
self.prefetch_to = prefix + LOOKUP_SEP + self.prefetch_to
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if not isinstance(other, Prefetch):
return NotImplemented
return self.prefetch_to == other.prefetch_to
def __hash__(self):
return hash((self.__class__, self.prefetch_to))
def normalize_prefetch_lookups(lookups, prefix=None):
"""Normalize lookups into Prefetch objects."""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(model_instances, *related_lookups):
"""
Populate prefetched object caches for a list of model instances based on
the lookups/Prefetch instances given.
"""
if not model_instances:
return # nothing to do
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = normalize_prefetch_lookups(reversed(related_lookups))
while all_lookups:
lookup = all_lookups.pop()
if lookup.prefetch_to in done_queries:
if lookup.queryset is not None:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = model_instances
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if not obj_list:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except (AttributeError, TypeError):
# Must be an immutable object from
# values_list(flat=True), for example (TypeError) or
# a QuerySet subclass that isn't returning Model
# instances (AttributeError), either in Django or a 3rd
# party. prefetch_related() doesn't make sense, so quit.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
to_attr = lookup.get_current_to_attr(level)[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr, to_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (prefetch_to in done_queries and lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(reversed(additional_lookups), prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extend(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
if through_attr in getattr(obj, '_prefetched_objects_cache', ()):
# If related objects have been prefetched, use the
# cache rather than the object's through_attr.
new_obj = list(obj._prefetched_objects_cache.get(through_attr))
else:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, find
an object that has a get_prefetch_queryset().
Return a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, through_attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, through_attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, through_attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
if through_attr != to_attr:
# Special case cached_property instances because hasattr
# triggers attribute computation and assignment.
if isinstance(getattr(instance.__class__, to_attr, None), cached_property):
is_fetched = to_attr in instance.__dict__
else:
is_fetched = hasattr(instance, to_attr)
else:
is_fetched = through_attr in instance._prefetched_objects_cache
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects().
Run prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
Return the prefetched objects along with any additional prefetches that
must be done due to prefetch_related lookups found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache or field name to assign to,
# boolean that is True when the previous argument is a cache name vs a field name).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name, is_descriptor = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', ())
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = ()
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
to_attr, as_attr = lookup.get_current_to_attr(level)
# Make sure `to_attr` does not conflict with a field.
if as_attr and instances:
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
model = instances[0].__class__
try:
model._meta.get_field(to_attr)
except exceptions.FieldDoesNotExist:
pass
else:
msg = 'to_attr={} conflicts with a field on the {} model.'
raise ValueError(msg.format(to_attr, model.__name__))
# Whether or not we're prefetching the last part of the lookup.
leaf = len(lookup.prefetch_through.split(LOOKUP_SEP)) - 1 == level
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
if single:
val = vals[0] if vals else None
if as_attr:
# A to_attr has been given for the prefetch.
setattr(obj, to_attr, val)
elif is_descriptor:
# cache_name points to a field name in obj.
# This field is a descriptor for a related object.
setattr(obj, cache_name, val)
else:
# No to_attr has been given for this prefetch operation and the
# cache_name does not point to a descriptor. Store the value of
# the field in the object's field cache.
obj._state.fields_cache[cache_name] = val
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
manager = getattr(obj, to_attr)
if leaf and lookup.queryset is not None:
qs = manager._apply_rel_filters(lookup.queryset)
else:
qs = manager.get_queryset()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator:
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - local_setter, remote_setter: Methods to set cached values on
# the object being populated and on the remote object. Usually
# these are Field.set_cached_value() methods.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
attname_indexes = {select[idx][0].target.attname: idx for idx in select_fields}
model_init_attnames = (f.attname for f in klass_info['model']._meta.concrete_fields)
self.init_list = [attname for attname in model_init_attnames if attname in attname_indexes]
self.reorder_for_init = operator.itemgetter(*[attname_indexes[attname] for attname in self.init_list])
self.model_cls = klass_info['model']
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
self.local_setter = klass_info['local_setter']
self.remote_setter = klass_info['remote_setter']
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
self.local_setter(from_obj, obj)
if obj is not None:
self.remote_setter(obj, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
| 42.182007
| 119
| 0.616044
|
794e0fe4ec5e7f4a6bedb1c91c1b82416ea0e764
| 7,556
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/relay/latest/hybrid_connection.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/relay/latest/hybrid_connection.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/relay/latest/hybrid_connection.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = ['HybridConnection']
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:relay:HybridConnection'.""", DeprecationWarning)
class HybridConnection(pulumi.CustomResource):
warnings.warn("""The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:relay:HybridConnection'.""", DeprecationWarning)
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
hybrid_connection_name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
requires_client_authorization: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
user_metadata: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Description of hybrid connection resource.
Latest API Version: 2017-04-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] hybrid_connection_name: The hybrid connection name.
:param pulumi.Input[str] namespace_name: The namespace name
:param pulumi.Input[bool] requires_client_authorization: Returns true if client authorization is needed for this hybrid connection; otherwise, false.
:param pulumi.Input[str] resource_group_name: Name of the Resource group within the Azure subscription.
:param pulumi.Input[str] user_metadata: The usermetadata is a placeholder to store user-defined string data for the hybrid connection endpoint. For example, it can be used to store descriptive data, such as a list of teams and their contact information. Also, user-defined configuration settings can be stored.
"""
pulumi.log.warn("HybridConnection is deprecated: The 'latest' version is deprecated. Please migrate to the resource in the top-level module: 'azure-nextgen:relay:HybridConnection'.")
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['hybrid_connection_name'] = hybrid_connection_name
if namespace_name is None and not opts.urn:
raise TypeError("Missing required property 'namespace_name'")
__props__['namespace_name'] = namespace_name
__props__['requires_client_authorization'] = requires_client_authorization
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['user_metadata'] = user_metadata
__props__['created_at'] = None
__props__['listener_count'] = None
__props__['name'] = None
__props__['type'] = None
__props__['updated_at'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:relay:HybridConnection"), pulumi.Alias(type_="azure-nextgen:relay/v20160701:HybridConnection"), pulumi.Alias(type_="azure-nextgen:relay/v20170401:HybridConnection")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(HybridConnection, __self__).__init__(
'azure-nextgen:relay/latest:HybridConnection',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'HybridConnection':
"""
Get an existing HybridConnection resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return HybridConnection(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
The time the hybrid connection was created.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="listenerCount")
def listener_count(self) -> pulumi.Output[int]:
"""
The number of listeners for this hybrid connection. Note that min : 1 and max:25 are supported.
"""
return pulumi.get(self, "listener_count")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="requiresClientAuthorization")
def requires_client_authorization(self) -> pulumi.Output[Optional[bool]]:
"""
Returns true if client authorization is needed for this hybrid connection; otherwise, false.
"""
return pulumi.get(self, "requires_client_authorization")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="updatedAt")
def updated_at(self) -> pulumi.Output[str]:
"""
The time the namespace was updated.
"""
return pulumi.get(self, "updated_at")
@property
@pulumi.getter(name="userMetadata")
def user_metadata(self) -> pulumi.Output[Optional[str]]:
"""
The usermetadata is a placeholder to store user-defined string data for the hybrid connection endpoint. For example, it can be used to store descriptive data, such as a list of teams and their contact information. Also, user-defined configuration settings can be stored.
"""
return pulumi.get(self, "user_metadata")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 46.641975
| 318
| 0.669534
|
794e1012d346d21f58a1eb5c10a60d2220a5451e
| 1,990
|
py
|
Python
|
grow/performance/profile.py
|
akashkalal/grow
|
e4813efecb270e00c52c4bb1cb317766a8c92e29
|
[
"MIT"
] | 335
|
2016-04-02T20:12:21.000Z
|
2022-03-28T18:55:26.000Z
|
grow/performance/profile.py
|
kmcnellis/grow
|
4787f5a01681ef0800e9b4388a56cdbc48209368
|
[
"MIT"
] | 784
|
2016-04-01T16:56:41.000Z
|
2022-03-05T01:25:34.000Z
|
grow/performance/profile.py
|
kmcnellis/grow
|
4787f5a01681ef0800e9b4388a56cdbc48209368
|
[
"MIT"
] | 54
|
2016-05-03T13:06:15.000Z
|
2021-09-24T04:46:23.000Z
|
"""Code timing for profiling."""
import time
class Timer(object):
"""Times code to see how long it takes using a context manager."""
def __init__(self, key, label=None, meta=None):
self._time = time
self.key = key
self.label = label or key
self.meta = meta
self.start = None
self.end = None
def __enter__(self):
return self.start_timer()
def __exit__(self, *args):
self.stop_timer()
def __repr__(self):
if self.label != self.key:
return '<Timer:{} {} : {}>'.format(self.key, self.label, self.duration)
return '<Timer:{} : {}>'.format(self.key, self.duration)
@property
def duration(self):
"""Duration of timer."""
return self.end - self.start
def export(self):
"""Export the timer data."""
return {
'key': self.key,
'label': self.label,
'meta': self.meta,
'start': self.start,
'end': self.end,
}
def start_timer(self):
"""Starts the timer."""
self.start = self._time.time()
return self
def stop_timer(self):
"""Stops the timer."""
self.end = self._time.time()
return self
class Profile(object):
"""Keeps track of all of the timer usage."""
def __init__(self):
self.timers = []
def __iter__(self):
for timer in self.timers:
yield timer
def __len__(self):
return len(self.timers)
def add_timer(self, timer):
"""Adds a new timer."""
if timer is None:
return
self.timers.append(timer)
return timer
def timer(self, *args, **kwargs):
"""Create a new timer."""
timer = Timer(*args, **kwargs)
self.timers.append(timer)
return timer
def export(self):
"""Export the timer data for each timer created."""
return [t.export() for t in self.timers]
| 23.975904
| 83
| 0.541709
|
794e106bbee42cb5c23641ec7958bcfcb073fbec
| 6,808
|
py
|
Python
|
kolter_wong/models.py
|
anonymous2398384/provable_robustness_max_linear_regions
|
529165d9047261813bc068997415f668c9675119
|
[
"BSD-3-Clause"
] | 34
|
2019-03-10T22:16:24.000Z
|
2021-09-23T22:22:27.000Z
|
kolter_wong/models.py
|
anonymous2398384/provable_robustness_max_linear_regions
|
529165d9047261813bc068997415f668c9675119
|
[
"BSD-3-Clause"
] | 2
|
2019-09-24T16:18:55.000Z
|
2021-03-06T20:57:33.000Z
|
kolter_wong/models.py
|
anonymous2398384/provable_robustness_max_linear_regions
|
529165d9047261813bc068997415f668c9675119
|
[
"BSD-3-Clause"
] | 9
|
2019-03-13T17:35:36.000Z
|
2021-01-15T02:37:23.000Z
|
import numpy as np
import scipy.io
import torch
import torch.nn as nn
import math
import data
from kolter_wong.convex_adversarial import Dense, DenseSequential
from kolter_wong.custom_layers import Conv2dUntiedBias
def select_model(model_type, n_in, n_out):
h_in, w_in, c_in = (28, 28, 1) if n_in == 28*28*1 else (32, 32, 3)
if 'fc' in model_type:
n_h_layers = int(model_type.split('fc')[-1])
if model_type == 'fc10': # manual hack to have the same model as we reported
n_hs = [124, 104, 104, 104, 104, 104, 104, 104, 86, 86]
else:
n_hs = n_h_layers * [1024]
n_hs = [n_in] + n_hs + [n_out]
model = fc(n_hs)
elif model_type == 'cnn_lenet_avgpool':
model = lenet_avgpool(h_in, w_in, c_in, n_out)
elif model_type == 'cnn_lenet_small':
model = lenet_small(h_in, w_in, c_in, n_out)
elif model_type == 'cnn_lenet_large':
model = lenet_large(h_in, w_in, c_in, n_out)
else:
raise ValueError('wrong model_type')
return model
def fc(n_hs):
layers = [Flatten()]
for i in range(len(n_hs) - 2):
layers.append(nn.Linear(n_hs[i], n_hs[i + 1]))
layers.append(nn.ReLU())
layers.append(nn.Linear(n_hs[-2], n_hs[-1]))
model = nn.Sequential(*layers)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def lenet_avgpool(h_in, w_in, c_in, n_out):
model = nn.Sequential(
Conv2dUntiedBias(24, 24, c_in, 16, 5, stride=1, padding=0),
# nn.Conv2d(1, 16, 5, stride=1, padding=0),
nn.ReLU(),
nn.Conv2d(16, 16, 2, stride=2, padding=0, bias=None), # aka nn.AvgPool2d(2, stride=2),
Conv2dUntiedBias(8, 8, 16, 32, 5, stride=1, padding=0),
# nn.Conv2d(16, 32, 5, stride=1, padding=0),
nn.ReLU(),
nn.Conv2d(32, 32, 2, stride=2, padding=0, bias=None), # aka nn.AvgPool2d(2, stride=2),
Flatten(),
nn.Linear(4 * 4 * 32, n_out)
)
# Proper default init (not needed if we just evaluate with KW code)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
return model
def lenet_small(h_in, w_in, c_in, n_out):
model = nn.Sequential(
nn.Conv2d(c_in, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(32 * h_in//4 * w_in//4, 100),
nn.ReLU(),
nn.Linear(100, n_out)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def lenet_large(h_in, w_in, c_in, n_out):
model = nn.Sequential(
nn.Conv2d(c_in, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, stride=1, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(64 * h_in//4 * w_in//4, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, n_out)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def resnet(N=5, factor=10):
"""
Original CIFAR-10 ResNet proposed in He et al.
:param N:
:param factor:
:return:
"""
def block(in_filters, out_filters, k, downsample):
if not downsample:
k_first = 3
skip_stride = 1
k_skip = 1
else:
k_first = 4
skip_stride = 2
k_skip = 2
return [
Dense(nn.Conv2d(in_filters, out_filters, k_first, stride=skip_stride, padding=1)),
nn.ReLU(),
Dense(nn.Conv2d(in_filters, out_filters, k_skip, stride=skip_stride, padding=0),
None,
nn.Conv2d(out_filters, out_filters, k, stride=1, padding=1)),
nn.ReLU()
]
conv1 = [nn.Conv2d(3, 16, 3, stride=1, padding=1), nn.ReLU()]
conv2 = block(16, 16 * factor, 3, False)
for _ in range(N):
conv2.extend(block(16 * factor, 16 * factor, 3, False))
conv3 = block(16 * factor, 32 * factor, 3, True)
for _ in range(N - 1):
conv3.extend(block(32 * factor, 32 * factor, 3, False))
conv4 = block(32 * factor, 64 * factor, 3, True)
for _ in range(N - 1):
conv4.extend(block(64 * factor, 64 * factor, 3, False))
layers = (
conv1 +
conv2 +
conv3 +
conv4 +
[Flatten(),
nn.Linear(64 * factor * 8 * 8, 1000),
nn.ReLU(),
nn.Linear(1000, 10)]
)
model = DenseSequential(
*layers
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
return model
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
def restore_model(sess, model_pt, model_tf, nn_type, device):
vars_pt = list(model_pt.parameters())
Ws, bs = model_tf.W, model_tf.b
vars_tf = []
for W, b in zip(Ws, bs):
vars_tf.append(W)
vars_tf.append(b)
assert len(vars_pt) == len(vars_tf)
for var_pt, var_tf in zip(vars_pt, vars_tf):
var_np = sess.run(var_tf)
if 'weights_conv' in var_tf.name:
var_np = np.transpose(var_np, [3, 2, 0, 1])
elif 'weights_fc1' in var_tf.name:
n_in, n_out = var_np.shape
h = w = int(math.sqrt(var_np.shape[0] / model_tf.n_filters[-1]))
var_np = np.transpose(var_np)
var_np = var_np.reshape([n_out, h, w, model_tf.n_filters[-1]])
var_np = var_np.transpose([0, 3, 1, 2])
var_np = var_np.reshape([n_out, n_in])
elif 'weight' in var_tf.name:
var_np = np.transpose(var_np)
elif 'bias' in var_tf.name:
var_np = var_np.flatten() # needed only for FC
var_pt.data = torch.from_numpy(var_np).to(device)
| 32.113208
| 95
| 0.555964
|
794e11a8f09ccd2a2bf95ae23a2c5231bb528c72
| 4,391
|
py
|
Python
|
src/modules/Permissions/Permissions.py
|
dfirestorm/Discord-Robot
|
5ad6bdf7a9704d73794b688127172a7ed69919f5
|
[
"MIT"
] | null | null | null |
src/modules/Permissions/Permissions.py
|
dfirestorm/Discord-Robot
|
5ad6bdf7a9704d73794b688127172a7ed69919f5
|
[
"MIT"
] | null | null | null |
src/modules/Permissions/Permissions.py
|
dfirestorm/Discord-Robot
|
5ad6bdf7a9704d73794b688127172a7ed69919f5
|
[
"MIT"
] | 1
|
2020-09-27T00:24:32.000Z
|
2020-09-27T00:24:32.000Z
|
import sqlite3
import discord
from discord.ext import commands
from discord.ext.commands.errors import *
DB = 'permissions.db'
class Permissions(commands.Cog):
def __init__(self, client):
client.permission_authority = self
self.client = client
self.cwd = client.config['Bot']['modules_dir'] + 'Permissions/'
conn = sqlite3.connect(self.cwd + DB)
c = conn.cursor()
c.execute('CREATE TABLE IF NOT EXISTS permissions(user int, permission text)')
conn.commit()
conn.close()
@commands.command(help='Clears the permissions of a user')
async def clearperms(self, context, member: discord.Member):
conn = sqlite3.connect(self.cwd + DB)
c = conn.cursor()
c.execute('DELETE FROM permissions WHERE user=?', (member.id,))
conn.commit()
conn.close()
await context.send(f'Permissions cleared for {member.name}.')
@commands.command(help='List your current permissions')
async def perms(self, context):
conn = sqlite3.connect(self.cwd + DB)
c = conn.cursor()
results = c.execute('SELECT permission FROM permissions WHERE user=?', (context.message.author.id,)).fetchall()
perms = [''.join(row) for row in results]
perm_string = '\n'.join(perms)
await context.send(f'Here are your current permissions, {context.message.author.mention}\n```\n{perm_string}\n```')
conn.close()
@commands.command(help='Gives permission to a user')
async def giveperm(self, context, permission, member: discord.Member):
mod_role = self.client.config['Bot']['mod_role']
if mod_role in [role.name.lower() for role in context.message.author.roles]:
conn = sqlite3.connect(self.cwd + DB)
c = conn.cursor()
c.execute('INSERT INTO permissions(user, permission) VALUES (?, ?)', (member.id, permission))
conn.commit()
conn.close()
await context.send(f'Gave permission \'{permission}\' to {member.name}.')
else:
await context.send(f'You do not have permission to do that. Ask for the role {mod_role}.')
@commands.command(help='Revokes permission from a user')
async def takeperm(self, context, permission, member: discord.Member):
mod_role = self.client.config['Bot']['mod_role']
if mod_role in [role.name.lower() for role in context.message.author.roles]:
conn = sqlite3.connect(self.cwd + DB)
c = conn.cursor()
c.execute('DELETE FROM permissions WHERE user=? AND permission=?', (member.id, permission))
conn.commit()
conn.close()
await context.send(f'Revoked permission \'{permission}\' from {member.name}.')
else:
await context.send(f'You do not have permission to do that. Ask for the role {mod_role}.')
@commands.command(help='Gives the appropriate RoBot mod role to a user')
async def mod(self, context, member: discord.Member):
mod_role = self.client.config['Bot']['mod_role']
if mod_role in [role.name.lower() for role in context.message.author.roles]:
await member.add_roles(discord.utils.get((member.guild.roles), name=mod_role))
await context.send(f'{member.name} is now a mod.')
else:
await context.send(f'You do not have permission to do that. Ask for the role {mod_role}.')
@commands.command(help='Removes the appropriate RoBot mod role from a user')
async def unmod(self, context, member: discord.Member):
mod_role = self.client.config['Bot']['mod_role']
if mod_role in [role.name.lower() for role in context.message.author.roles]:
await member.remove_roles(discord.utils.get((member.guild.roles), name=mod_role))
await context.send(f'{member.name} is no longer a mod.')
else:
await context.send(f'You do not have permission to do that. Ask for the role {mod_role}.')
def hasperm(self, permission, member: discord.Member):
conn = sqlite3.connect(self.cwd + DB)
c = conn.cursor()
hasperm = len(c.execute('SELECT * FROM permissions WHERE user=? AND permission=?', (member.id, permission)).fetchall()) == 1
conn.commit()
conn.close()
return hasperm
def setup(client):
client.add_cog(Permissions(client))
| 47.215054
| 132
| 0.64154
|
794e11d52cfee297752c20bb5d126d4c954e18e5
| 2,446
|
py
|
Python
|
api/views/explanation_views.py
|
tiveritz/how-tos-api
|
5dd73fd72ea1f07123ce8d15d2935d9d9e473c8e
|
[
"MIT"
] | null | null | null |
api/views/explanation_views.py
|
tiveritz/how-tos-api
|
5dd73fd72ea1f07123ce8d15d2935d9d9e473c8e
|
[
"MIT"
] | 3
|
2021-05-23T07:57:15.000Z
|
2021-05-28T05:38:17.000Z
|
api/views/explanation_views.py
|
tiveritz/how-tos-api
|
5dd73fd72ea1f07123ce8d15d2935d9d9e473c8e
|
[
"MIT"
] | null | null | null |
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from ..models import Explanation
from ..serializers.explanation_serializers import (ExplanationSerializer,
ExplanationDetailSerializer)
class ExplanationView(APIView):
"""
View to Explanation Text Detail
"""
def get(self, request):
explanations = Explanation.objects.all().order_by('-updated')
serializer = ExplanationSerializer(explanations,
many=True,
context={'request': request})
return Response(serializer.data)
def post(self, request, format=None):
serializer = ExplanationSerializer(data=request.data,
context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data,
status=status.HTTP_201_CREATED)
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
class ExplanationDetailView(APIView):
"""
View to Explanation Detail
"""
def get(self, request, uri_id):
try:
explanation = Explanation.objects.get(uri_id=uri_id)
except Explanation.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = ExplanationDetailSerializer(explanation,
context={'request': request})
return Response(serializer.data)
def patch(self, request, uri_id):
explanation = Explanation.objects.get(uri_id=uri_id)
serializer = ExplanationDetailSerializer(explanation,
data=request.data,
partial=True,
context={'request': request})
if serializer.is_valid():
serializer.save()
return Response(serializer.data,
status=status.HTTP_200_OK)
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, uri_id):
Explanation.objects.get(uri_id=uri_id).delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| 40.766667
| 79
| 0.575634
|
794e1273e3b7622199661f7a5ca2ead5c125bbe5
| 1,168
|
py
|
Python
|
lib/galaxy/model/migrate/versions/0113_update_migrate_tools_table.py
|
innovate-invent/galaxy
|
10aa953a40e171246bdd1804c74e8019da8e8200
|
[
"CC-BY-3.0"
] | 4
|
2018-10-29T18:34:38.000Z
|
2021-09-29T23:30:42.000Z
|
lib/galaxy/model/migrate/versions/0113_update_migrate_tools_table.py
|
innovate-invent/galaxy
|
10aa953a40e171246bdd1804c74e8019da8e8200
|
[
"CC-BY-3.0"
] | 30
|
2016-10-20T15:35:12.000Z
|
2018-10-02T15:59:54.000Z
|
lib/galaxy/model/migrate/versions/0113_update_migrate_tools_table.py
|
innovate-invent/galaxy
|
10aa953a40e171246bdd1804c74e8019da8e8200
|
[
"CC-BY-3.0"
] | 7
|
2016-11-03T19:11:01.000Z
|
2020-05-11T14:23:52.000Z
|
"""
Migration script to update the migrate_tools.repository_path column to point to the new location lib/tool_shed/galaxy_install/migrate.
"""
from __future__ import print_function
import logging
import sys
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
format = "%(name)s %(levelname)s %(asctime)s %(message)s"
formatter = logging.Formatter(format)
handler.setFormatter(formatter)
log.addHandler(handler)
def upgrade(migrate_engine):
print(__doc__)
# Create the table.
try:
cmd = "UPDATE migrate_tools set repository_path='lib/galaxy/tool_shed/migrate';"
migrate_engine.execute(cmd)
except Exception:
log.exception("Updating migrate_tools.repository_path column to point to the new location lib/tool_shed/galaxy_install/migrate failed.")
def downgrade(migrate_engine):
try:
cmd = "UPDATE migrate_tools set repository_path='lib/galaxy/tool_shed/migrate';"
migrate_engine.execute(cmd)
except Exception:
log.exception("Updating migrate_tools.repository_path column to point to the old location lib/galaxy/tool_shed/migrate failed.")
| 34.352941
| 144
| 0.760274
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.