hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6c36540f75ff0aa4e3d1fa481631b799e5a9132c
| 1,041
|
py
|
Python
|
portfolio_pj/portfolio_app/views.py
|
duynb92/portfolio_site
|
f6898e8d1c3a67aa8dc6eafc7e4804e81dc46063
|
[
"MIT"
] | null | null | null |
portfolio_pj/portfolio_app/views.py
|
duynb92/portfolio_site
|
f6898e8d1c3a67aa8dc6eafc7e4804e81dc46063
|
[
"MIT"
] | null | null | null |
portfolio_pj/portfolio_app/views.py
|
duynb92/portfolio_site
|
f6898e8d1c3a67aa8dc6eafc7e4804e81dc46063
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from models import *
# Create your views here.
def index(req):
context = HomeContext("Home", Facade.getSkills(), Facade.getHobbies())
return render(req, 'index.html', context=vars(context))
def profile(req):
profile_context = ProfileContext("Profile", Facade.getProfiles())
return render(req, 'profile.html', context=vars(profile_context))
def portfolio(req):
portfolio_context = PortfolioContext("Portfolio", Facade.getProjects())
return render(req, 'portfolio-gird-3.html', context=vars(portfolio_context))
def service(req):
service_context = ServiceContext("Services", Facade.getServices())
return render(req, 'services.html', context=vars(service_context))
def contact(req):
context = BaseContext("Contact")
return render(req, 'contact-3.html', context=vars(context))
def blog(req):
blog_context = BlogContext("Blog", [])
return render(req, 'blog.html', context=vars(blog_context))
| 33.580645
| 80
| 0.727185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 190
| 0.182517
|
6c3666e9b94187f8c2b912f96ab0492447c6ab94
| 16,981
|
py
|
Python
|
torchfurnace/engine.py
|
tianyu-su/torchfurnace
|
2f4a9a0655a8d3c3e231c86611085f834e03c2f8
|
[
"MIT"
] | 8
|
2020-03-20T13:49:30.000Z
|
2021-12-04T07:41:27.000Z
|
torchfurnace/engine.py
|
tianyu-su/torchfurnace
|
2f4a9a0655a8d3c3e231c86611085f834e03c2f8
|
[
"MIT"
] | null | null | null |
torchfurnace/engine.py
|
tianyu-su/torchfurnace
|
2f4a9a0655a8d3c3e231c86611085f834e03c2f8
|
[
"MIT"
] | 1
|
2020-04-01T11:01:09.000Z
|
2020-04-01T11:01:09.000Z
|
# -*- coding: utf-8 -*-
# Date: 2020/3/17 12:16
"""
an engine for deep learning task
"""
__author__ = 'tianyu'
import abc
import random
import time
import warnings
import numpy as np
import torch.backends.cudnn
import torch.nn.functional as F
import torch.utils.data
from torch.optim.lr_scheduler import StepLR
from .options import Parser
from .tracer import Tracer
from .utils import tracer_component as tc
from .utils.function import *
class Engine(object, metaclass=abc.ABCMeta):
"""
Suggest Overriding Function:
_on_start_epoch: add some your meters for learning
_get_lr_scheduler: define your lr scheduler, default StepLR(step=30,gamma=0.1)
_on_start_batch: define how to read your dataset to return input,target as well as put on right device
_add_on_end_batch_log: add some your log information
_add_on_end_batch_tb: add some your visualization for tensorboard by add_xxx
_add_record: add some record information
_before_evaluate: define your operation before calling _validate evaluation mode
_after_evaluate: define your operation after calling _validate evaluation mode
"""
def __init__(self, parser: Parser, experiment_name='exp'):
self._parser = parser
self._switch_training = True
self._meters = self._status_meter()
self._state = {'best_acc1': -1, 'training_iterations': 0, 'iteration': 0}
self._experiment_name = experiment_name
self._init_learning()
def _status_meter(self):
outer = self
class StatusMeter(object):
def __init__(self):
self._training = Chain()
self._validation = Chain()
def __getattr__(self, item):
if outer._switch_training:
return getattr(self._training, item)
else:
return getattr(self._validation, item)
return StatusMeter()
def _close(self):
self._tracer.close()
def _do_args(self):
if self._args.deterministic:
seed = 1541233595
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
torch.set_printoptions(precision=10)
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if self._args.debug:
self._args.workers = 0
self._args.batch_size = 2
if self._args.gpu is not None:
# torch.backends.cudnn.benchmark = True
import os
os.environ['CUDA_VISIBLE_DEVICES'] = str(self._args.gpu)
# assign 0 because if you code os.environ['CUDA_VISIBLE_DEVICES']=xx,
# all gpu device is 0 in pytorch context, otherwise you will get a
# RuntimeError: CUDA error: invalid device ordinal
self._args.gpu = 0
if self._args.evaluate:
self._args.p_bar = True
self._args.no_tb = False
if self._args.p_bar:
self._args.print_freq = 1
def _warp_loader(self, training, dataset):
return torch.utils.data.DataLoader(dataset, batch_size=self._args.batch_size, num_workers=self._args.workers,
pin_memory=True, shuffle=training)
def _init_learning(self):
self._args = self._parser.parse_args()
self._do_args()
self._tracer = \
Tracer(root_dir=Path(self._args.work_dir), work_name=self._parser.work_name, clean_up=self._args.clean_up) \
.tb_switch(self._args.no_tb) \
.debug_switch(self._args.debug or self._args.p_bar) \
.snap_git_switch(self._args.snapgit) \
.attach(experiment_name=self._experiment_name, override=self._args.nowtime_exp,
logger_name=self._args.logger_name)
if self._args.revert_snapgit:
self._tracer.revert(self._args.revert_snapgit)
@property
def tracer(self):
return self._tracer
def _resume(self, model, optimizer):
"""load more than one model and optimizer, for example GAN"""
for pth, m, optim in zip(self._args.resume, [model] if not isinstance(model, list) else model,
[optimizer] if not isinstance(optimizer, list) else optimizer):
ret = self._tracer.load(tc.Model(
pth, {
'model': m,
'optim': optim
}))
self._args.start_epoch = ret['start_epoch']
self._state['best_acc1'] = ret['best_acc1']
self._args.epochs += self._args.start_epoch
@staticmethod
def _get_lr_scheduler(optimizer: torch.optim.Optimizer) -> list:
return [StepLR(optim, 30, gamma=0.1) for optim in ([optimizer] if not isinstance(optimizer, list) else optimizer)]
@staticmethod
def _on_start_epoch():
"""
add your meters by get_meters function
for example : get_meters(['mine1', 'mine2'])
usage: self._meters[mode].{name}.update() detail in : from .meter import AverageMeter
"""
return get_meters([])
def _add_record(self, ret_forward, batch_size):
"""
self._meters.losses.update(ret['loss'], bs)
"""
pass
def _before_evaluate(self, model):
"""
load checkpoint
"""
for pth, m in zip(self._args.evaluate, [model] if not isinstance(model, list) else model):
if os.path.isfile(pth):
log("=> loading checkpoint '{}'".format(pth))
checkpoint = torch.load(pth, map_location='cpu')
m.load_state_dict(checkpoint['state_dict'])
log("=> loaded checkpoint '{}' (epoch {} Acc@1 {})"
.format(pth, checkpoint['epoch'], checkpoint['best_acc1']))
else:
assert False, "=> no checkpoint found at '{}'".format(pth)
def _after_evaluate(self):
"""
execute something after evaluation
"""
pass
def _on_end_epoch(self, model, optimizer, is_best):
"""save more than one model and optimizer, for example GAN"""
postfix = f'_{self._args.extension}'
if self._args.extension == '': postfix = ''
for m, optim in zip([model] if not isinstance(model, list) else model,
[optimizer] if not isinstance(optimizer, list) else optimizer):
self._tracer.store(tc.Model(
f"{model.__class__.__name__}{postfix}.pth.tar",
{
'epoch': self._state['epoch'] + 1,
'arch': str(m),
'state_dict': m.state_dict(),
'best_acc1': self._state['best_acc1'],
'optimizer': optim.state_dict(),
}, is_best))
def _on_start_batch(self, data):
"""override to adapt yourself dataset __getitem__"""
inp, target = data
if self._args.gpu is not None:
return inp.cuda(self._args.gpu), target.cuda(self._args.gpu), target.size(0)
else:
return inp, target, target.size(0)
def _add_on_end_batch_log(self, training):
""" user can add some log information with _on_start_epoch using all kinds of meters in _on_end_batch"""
if training:
pass
else:
pass
return ""
def _add_on_end_batch_tb(self, training):
""" user can add some tensorboard operations with _on_start_epoch using all kinds of meters"""
if training:
pass
else:
pass
def _on_end_batch(self, data_loader, optimizer=None):
""" print log and visualization"""
training_iterations = self._state['training_iterations']
if self._switch_training:
if self._state['iteration'] != 0 and self._state['iteration'] % self._args.print_freq == 0:
print_process_bar = {'p_bar': self._args.p_bar, 'current_batch': self._state['iteration'], 'total_batch': len(data_loader)}
if self._args.p_bar:
prefix_info = "Epoch:[{0}] "
else:
prefix_info = 'Epoch: [{0}][{1}/{2}]\t'
fix_log = prefix_info + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
'Data {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\t' \
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t' \
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})\t'
fix_log = fix_log.format(
self._state['epoch'], self._state['iteration'], len(data_loader), batch_time=self._meters.batch_time,
data_time=self._meters.data_time, loss=self._meters.losses,
top1=self._meters.top1, top5=self._meters.top5)
log(fix_log + self._add_on_end_batch_log(True), **print_process_bar)
if self._args.no_tb:
self._tracer.tb.add_scalars('data/loss', {
'training': self._meters.losses.avg,
}, training_iterations)
self._tracer.tb.add_scalar('data/epochs', self._state['epoch'], training_iterations)
for oi, optim in enumerate([optimizer] if not isinstance(optimizer, list) else optimizer):
self._tracer.tb.add_scalars(f'data/learning_rate', {f'lr_optim_{oi + 1}': optim.param_groups[-1]['lr']}, training_iterations)
self._tracer.tb.add_scalars('data/precision/top1', {
'training': self._meters.top1.avg,
}, training_iterations)
self._tracer.tb.add_scalars('data/precision/top5', {
'training': self._meters.top5.avg
}, training_iterations)
self._tracer.tb.add_scalars('data/runtime', {
'batch_time': self._meters.batch_time.avg,
'data_time': self._meters.data_time.avg
}, training_iterations)
self._add_on_end_batch_tb(True)
elif not self._args.evaluate:
fix_log = ('Testing: Epoch [{0}] Acc@1 {top1.avg:.3f}\tAcc@5 {top5.avg:.3f}\tLoss {loss.avg:.4f}\t[best:{best_acc}]\t'
.format(self._state['epoch'], top1=self._meters.top1, top5=self._meters.top5,
loss=self._meters.losses, best_acc=self._state['best_acc1']))
log(fix_log + self._add_on_end_batch_log(False), color="green")
if self._args.no_tb:
self._tracer.tb.add_scalars('data/loss', {
'validation': self._meters.losses.avg,
}, training_iterations)
self._tracer.tb.add_scalars('data/precision/top1', {
'validation': self._meters.top1.avg,
}, training_iterations)
self._tracer.tb.add_scalars('data/precision/top5', {
'validation': self._meters.top5.avg
}, training_iterations)
self._add_on_end_batch_tb(False)
@staticmethod
@abc.abstractmethod
def _on_forward(training, model, inp, target, optimizer=None) -> dict:
"""
implement training and validation code here
:param training: bool -> training validation
:param model: one or list
:param inp: batch data
:param target: batch target
:param optimizer: one or list
:return:
"""
""" for example """
# ret can expand but DONT Shrink
ret = {'loss': object, 'acc1': object, 'acc5': object}
# do something
output = model(inp)
loss = F.cross_entropy(output, target)
# compute acc1 acc5
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if training:
optimizer.zero_grad()
loss.backward()
optimizer.step()
ret['loss'] = loss.item()
ret['acc1'] = acc1.item()
ret['acc5'] = acc5.item()
return ret
def _train(self, model, train_loader, optimizer, epoch):
self._switch_training = True
# setup model
[m.train() for m in (model if isinstance(model, list) else [model])]
self._meters.merge(get_meters(['batch_time', 'data_time', 'losses', 'top1', 'top5']))
self._meters.merge(self._on_start_epoch())
end = time.time()
for i, batch in enumerate(train_loader):
self._state['training_iterations'] += 1
self._state['iteration'] = i
self._state['epoch'] = epoch
# measure data loading time
self._meters.data_time.update(time.time() - end)
inp, target, bs = self._on_start_batch(batch)
# compute output
ret = self._on_forward(True, model, inp, target, optimizer)
# record indicators
self._meters.losses.update(ret['loss'], bs)
self._meters.top1.update(ret['acc1'], bs)
self._meters.top5.update(ret['acc5'], bs)
self._add_record(ret, bs)
# measure elapsed time
self._meters.batch_time.update(time.time() - end)
end = time.time()
self._on_end_batch(train_loader, optimizer)
def _validate(self, model, val_loader):
self._switch_training = False
# setup model
[m.eval() for m in (model if isinstance(model, list) else [model])]
self._meters.merge(get_meters(['batch_time', 'losses', 'top1', 'top5']))
self._meters.merge(self._on_start_epoch())
end = time.time()
with torch.no_grad():
for i, batch in enumerate(val_loader):
self._state['iteration'] = i
inp, target, bs = self._on_start_batch(batch)
# compute output
ret = self._on_forward(False, model, inp, target)
# record indicators
self._meters.losses.update(ret['loss'], bs)
self._meters.top1.update(ret['acc1'], bs)
self._meters.top5.update(ret['acc5'], bs)
self._add_record(ret, bs)
# measure elapsed time
self._meters.batch_time.update(time.time() - end)
end = time.time()
self._on_end_batch(val_loader)
return self._meters.top1.avg
def learning(self, model, optimizer, train_dataset, val_dataset):
"""
Core function of engine to organize training process
:param val_dataset: training dataset
:param train_dataset: validation dataset
:param model: one or list
:param optimizer: one or list
"""
# save config
cfg = {f"optimizer{i + 1}": optim for i, optim in enumerate([optimizer] if not isinstance(optimizer, list) else optimizer)}
self._tracer.store(tc.Config({**cfg, **vars(self._args)}))
train_loader = self._warp_loader(True, train_dataset)
val_loader = self._warp_loader(False, val_dataset)
log('==> Start ...', color="red")
if self._args.resume:
self._resume(model, optimizer)
# cuda setup
if self._args.gpu is not None:
[m.cuda(self._args.gpu) for m in (model if isinstance(model, list) else [model])]
if self._args.evaluate:
self._before_evaluate(model)
self._validate(model, val_loader)
self._after_evaluate()
else:
ajlr = None
if self._args.adjust_lr:
ajlr = self._get_lr_scheduler(optimizer)
for epoch in range(self._args.start_epoch, self._args.epochs):
# train for one epoch
self._train(model, train_loader, optimizer, epoch)
# evaluate on validation set
acc1 = self._validate(model, val_loader)
# remember best acc@1 and save checkpoint
is_best = acc1 > self._state['best_acc1']
self._state['best_acc1'] = max(acc1, self._state['best_acc1'])
self._on_end_epoch(model, optimizer, is_best)
if self._args.adjust_lr:
[lr.step() for lr in ajlr]
print(f"Best Acc1:{self._state['best_acc1']}")
self._close()
return self._state['best_acc1']
| 39.955294
| 149
| 0.572876
| 16,537
| 0.973853
| 0
| 0
| 1,508
| 0.088805
| 0
| 0
| 4,399
| 0.259054
|
6c36c7337778993804185f55e34f582ccb3e038c
| 3,736
|
py
|
Python
|
tests/test_ninjadog.py
|
knowsuchagency/ninjadog
|
54f0c98da1006d97b6e39d39d0e4e056288f52d0
|
[
"MIT"
] | 26
|
2017-06-23T02:18:54.000Z
|
2022-02-19T08:45:11.000Z
|
tests/test_ninjadog.py
|
knowsuchagency/ninjadog
|
54f0c98da1006d97b6e39d39d0e4e056288f52d0
|
[
"MIT"
] | 21
|
2017-06-22T07:30:20.000Z
|
2022-03-26T02:23:24.000Z
|
tests/test_ninjadog.py
|
knowsuchagency/ninjadog
|
54f0c98da1006d97b6e39d39d0e4e056288f52d0
|
[
"MIT"
] | 2
|
2018-06-20T01:16:27.000Z
|
2020-07-14T19:55:27.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `ninjadog` package."""
# TODO: test raises ValueError when pug cli can't be found and not passed explicitly to renderer
def test_npm_installed():
from subprocess import Popen
assert Popen(('which', 'npm')).wait() == 0, 'npm must be installed'
def test_pug_cli_exists():
from pathlib import Path
from ninjadog.constants import PUG_CLI_PATH
assert Path(PUG_CLI_PATH).exists()
def test_hello_world():
from ninjadog import render
assert render('h1 hello world') == '<h1>hello world</h1>'
def test_pug_variable():
from ninjadog import render
assert render('h1= title', context={'title': 'hello world'}) == '<h1>hello world</h1>'
def test_jinja2_variable():
from ninjadog import render
assert render('h1 {{ title }}', context={'title': 'hello world'}, with_jinja=True) == '<h1>hello world</h1>'
def test_context():
import ninjadog
context = {'name': 'Derp'}
assert ninjadog.render('h1 hello #{ name }', context=context) == '<h1>hello Derp</h1>'
assert ninjadog.render("h1= name", context=context) == '<h1>Derp</h1>'
def test_conditional():
from textwrap import dedent
import ninjadog
string = dedent("""
if name == 'sam'
h1 hello #{ name }
""")
assert ninjadog.render(string, context={'name': 'sam'}) == '<h1>hello sam</h1>'
string = dedent("""
if person.name == 'sam'
h1 hello #{ person.name }
""")
assert ninjadog.render(string, context={'person': {'name': 'sam'}}) == '<h1>hello sam</h1>'
def test_render_no_string_argument():
from tempfile import NamedTemporaryFile
import ninjadog
string = 'h1 hello'
with NamedTemporaryFile('w+') as tempfile:
tempfile.write(string)
tempfile.seek(0)
assert ninjadog.render(file=tempfile.name) == ninjadog.render(string) == '<h1>hello</h1>'
def test_with_pug_with_jinja2():
from textwrap import dedent
from ninjadog import render
string = dedent("""
if person.name == "Bob"
h1 Hello Bob
else
h1 My name is #{ person.name }
p The persons's uppercase name is {{ person.get('name').upper() }}
p The person's name is #{ person.name }
if animal
h1 This should not output
else
p animal value is false
""").strip()
context = {'person': {'name': 'Bob'}, 'animal': None}
expected_output = dedent("""
<h1>Hello Bob</h1>
<p>The persons's uppercase name is BOB</p>
<p>The person's name is Bob</p>
<p>animal value is false</p>
""").strip()
actual_output = render(string, context=context, pretty=True, with_jinja=True).strip()
assert expected_output == actual_output
def test_cli_string():
from ninjadog.cli import main
from ninjadog.utils import jsonify
context = jsonify({'title': 'hello, world'})
assert main(('string', 'h1= title', '-c', context)) == '<h1>hello, world</h1>'
def test_extends():
from tempfile import gettempdir
from textwrap import dedent
from pathlib import Path
from ninjadog import render
parent_string = dedent("""
h1 Title
block content
""")
child_string = dedent("""
extends parent
block content
h2 Subtitle
""")
parent_path = Path(gettempdir(), 'parent.pug')
child_path = Path(gettempdir(), 'child.pug')
with parent_path.open('w+') as parent, child_path.open('w+') as child:
parent.write(parent_string)
parent.seek(0)
child.write(child_string)
child.seek(0)
assert render(file=child_path) == '<h1>Title</h1><h2>Subtitle</h2>'
assert render(file=str(child_path)) == '<h1>Title</h1><h2>Subtitle</h2>'
| 27.470588
| 112
| 0.635974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,434
| 0.383833
|
6c37074352737689850fbeed83a2fff6562b2609
| 1,610
|
py
|
Python
|
core/views.py
|
Hassan-gholipoor/Todo_App_API
|
19f9c141868fa0b01a11ed2a20f665d97b877340
|
[
"MIT"
] | null | null | null |
core/views.py
|
Hassan-gholipoor/Todo_App_API
|
19f9c141868fa0b01a11ed2a20f665d97b877340
|
[
"MIT"
] | null | null | null |
core/views.py
|
Hassan-gholipoor/Todo_App_API
|
19f9c141868fa0b01a11ed2a20f665d97b877340
|
[
"MIT"
] | null | null | null |
from rest_framework import viewsets, permissions
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework.response import Response
from rest_framework import status
from core.serializers import TodoSerializer, TodoDetailSerializer
from core.models import Todo
class TodoApiViewSet(viewsets.ModelViewSet):
serializer_class = TodoSerializer
authentication_classes = [JWTAuthentication]
permission_classes = [permissions.IsAuthenticated]
queryset = Todo.objects.all()
def _params_to_str(self, qs):
return [str(title) for title in qs.split(',')]
def get_queryset(self):
titles = self.request.query_params.get('titles')
queryset = self.queryset
if titles:
title_to_str = self._params_to_str(titles)
queryset = queryset.filter(title__in=title_to_str).order_by('-title')
return queryset.filter(owner=self.request.user).order_by('-title')
def get_serializer_class(self, *args, **kwargs):
if self.action == 'retrieve':
return TodoDetailSerializer
return self.serializer_class
def create(self, request):
data = request.POST.copy()
data['owner'] = self.request.user.pk
serializer = TodoSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def perform_create(self, serializer):
serializer.save(owner=self.request.user)
| 36.590909
| 81
| 0.709317
| 1,314
| 0.816149
| 0
| 0
| 0
| 0
| 0
| 0
| 44
| 0.027329
|
6c3ba5d9b3babe444d2c4d3c2e6c46f0cd91ef11
| 27
|
py
|
Python
|
ep_ws/devel/lib/python3/dist-packages/realsense2_camera/srv/__init__.py
|
fsrlab/FSR_ROS_SIM
|
f22dfbd19ca1f2f1c7456fc51fb382509f9d7c62
|
[
"MIT"
] | null | null | null |
ep_ws/devel/lib/python3/dist-packages/realsense2_camera/srv/__init__.py
|
fsrlab/FSR_ROS_SIM
|
f22dfbd19ca1f2f1c7456fc51fb382509f9d7c62
|
[
"MIT"
] | null | null | null |
ep_ws/devel/lib/python3/dist-packages/realsense2_camera/srv/__init__.py
|
fsrlab/FSR_ROS_SIM
|
f22dfbd19ca1f2f1c7456fc51fb382509f9d7c62
|
[
"MIT"
] | null | null | null |
from ._DeviceInfo import *
| 13.5
| 26
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6c3c2ae1bdf2d29f699c3d8948c8a02e1af7dcc8
| 788
|
py
|
Python
|
users/forms.py
|
yeezy-na-izi/YlDjango
|
6fd0763183d76e4f7ca4a9686170d0665d7c04e9
|
[
"MIT"
] | 6
|
2022-03-06T10:43:06.000Z
|
2022-03-24T13:00:12.000Z
|
users/forms.py
|
yeezy-na-izi/YlDjango
|
6fd0763183d76e4f7ca4a9686170d0665d7c04e9
|
[
"MIT"
] | 6
|
2022-03-09T13:22:41.000Z
|
2022-03-25T09:21:37.000Z
|
users/forms.py
|
yeezy-na-izi/YlDjango
|
6fd0763183d76e4f7ca4a9686170d0665d7c04e9
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.forms import UserCreationForm
from users.models import User, Profile
from django import forms
class RegistrationForm(UserCreationForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
def clean_password2(self):
cd = self.cleaned_data
if cd['password1'] != cd['password2']:
raise forms.ValidationError('Пароли не совпадают')
return cd['password2']
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('first_name', 'last_name', 'email')
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ('birthday',)
widgets = {
'birthday': forms.DateInput(attrs={'type': 'date'})
}
| 24.625
| 63
| 0.623096
| 676
| 0.839752
| 0
| 0
| 0
| 0
| 0
| 0
| 163
| 0.202484
|
6c3c5ab25d2cf06474ae606ac7def120213405ed
| 2,513
|
py
|
Python
|
kbqa/create_question_data.py
|
terrifyzhao/neo4j_graph
|
71f8ad1530805d0cca7ae2131f81a96a6b519d02
|
[
"Apache-2.0"
] | 3
|
2020-06-01T01:45:44.000Z
|
2021-05-10T06:05:18.000Z
|
kbqa/create_question_data.py
|
terrifyzhao/neo4j_graph
|
71f8ad1530805d0cca7ae2131f81a96a6b519d02
|
[
"Apache-2.0"
] | null | null | null |
kbqa/create_question_data.py
|
terrifyzhao/neo4j_graph
|
71f8ad1530805d0cca7ae2131f81a96a6b519d02
|
[
"Apache-2.0"
] | 2
|
2021-04-05T03:09:09.000Z
|
2021-09-19T11:29:38.000Z
|
from py2neo import Graph
import numpy as np
import pandas as pd
graph = Graph("http://192.168.50.179:7474", auth=("neo4j", "qwer"))
def create_attribute_question():
company = graph.run('MATCH (n:company) RETURN n.name as name').to_ndarray()
person = graph.run('MATCH (n:person) RETURN n.name as name').to_ndarray()
questions = []
for c in company:
c = c[0].strip()
question = f"{c}的收益"
questions.append(question)
question = f"{c}的收入"
questions.append(question)
for p in person:
p = p[0].strip()
question = f"{p}的年龄是几岁"
questions.append(question)
question = f"{p}多大"
questions.append(question)
question = f"{p}几岁"
questions.append(question)
return questions
def create_entity_question():
questions = []
for _ in range(250):
for op in ['大于', '等于', '小于', '是', '有']:
profit = np.random.randint(10000, 10000000, 1)[0]
question = f"收益{op}{profit}的公司有哪些"
questions.append(question)
profit = np.random.randint(10000, 10000000, 1)[0]
question = f"哪些公司收益{op}{profit}"
questions.append(question)
for _ in range(250):
for op in ['大于', '等于', '小于', '是', '有']:
profit = np.random.randint(20, 60, 1)[0]
question = f"年龄{op}{profit}的人有哪些"
questions.append(question)
profit = np.random.randint(20, 60, 1)[0]
question = f"哪些人年龄{op}{profit}"
questions.append(question)
return questions
def create_relation_question():
relation = graph.run('MATCH (n)-[r]->(m) RETURN n.name as name, type(r) as r').to_ndarray()
questions = []
for r in relation:
if str(r[1]) in ['董事', '监事']:
question = f"{r[0]}的{r[1]}是谁"
questions.append(question)
else:
question = f"{r[0]}的{r[1]}"
questions.append(question)
question = f"{r[0]}的{r[1]}是啥"
questions.append(question)
question = f"{r[0]}的{r[1]}什么"
questions.append(question)
return questions
q1 = create_entity_question()
q2 = create_attribute_question()
q3 = create_relation_question()
df = pd.DataFrame()
df['question'] = q1 + q2 + q3
df['label'] = [0] * len(q1) + [1] * len(q2) + [2] * len(q3)
df.to_csv('question_classification.csv', encoding='utf_8_sig', index=False)
| 29.22093
| 96
| 0.54994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 625
| 0.235228
|
6c3ca74700c452639c1abd59ef05386a970cf094
| 1,095
|
py
|
Python
|
src/detect_utils.py
|
iglaweb/HippoYD
|
da2c40be8017c43a7b7b6c029e2df30cf7d54932
|
[
"Apache-2.0"
] | 7
|
2021-07-02T03:57:20.000Z
|
2022-03-20T13:23:32.000Z
|
src/detect_utils.py
|
filipul1s/HippoYD
|
da2c40be8017c43a7b7b6c029e2df30cf7d54932
|
[
"Apache-2.0"
] | null | null | null |
src/detect_utils.py
|
filipul1s/HippoYD
|
da2c40be8017c43a7b7b6c029e2df30cf7d54932
|
[
"Apache-2.0"
] | 3
|
2021-07-02T16:07:28.000Z
|
2022-03-20T13:23:33.000Z
|
import cv2
from scipy.spatial import distance as dist
def mouth_aspect_ratio(mouth) -> float:
# compute the euclidean distances between the two sets of
# vertical mouth landmarks (x, y)-coordinates
A = dist.euclidean(mouth[2], mouth[10]) # 51, 59
B = dist.euclidean(mouth[4], mouth[8]) # 53, 57
# compute the euclidean distance between the horizontal
# mouth landmark (x, y)-coordinates
C = dist.euclidean(mouth[0], mouth[6]) # 49, 55
# compute the mouth aspect ratio
mar = (A + B) / (2.0 * C)
return mar
def resize_img(frame_crop, max_width, max_height):
height, width = frame_crop.shape[:2]
# only shrink if img is bigger than required
if max_height < height or max_width < width:
# get scaling factor
scaling_factor = max_height / float(height)
if max_width / float(width) < scaling_factor:
scaling_factor = max_width / float(width)
# resize image
frame_crop = cv2.resize(frame_crop, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA)
return frame_crop
| 36.5
| 117
| 0.675799
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 326
| 0.297717
|
6c3cce245cb8dd51640bae04fe6b64d1a7249903
| 3,626
|
py
|
Python
|
rna_format.py
|
thedinak/Genetics-to-Therapuetics
|
f38cc76ceb8b9217b3f4b19f985a255c1c1dd98d
|
[
"MIT"
] | null | null | null |
rna_format.py
|
thedinak/Genetics-to-Therapuetics
|
f38cc76ceb8b9217b3f4b19f985a255c1c1dd98d
|
[
"MIT"
] | null | null | null |
rna_format.py
|
thedinak/Genetics-to-Therapuetics
|
f38cc76ceb8b9217b3f4b19f985a255c1c1dd98d
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
import tarfile
import glob
import json
def unzip_rna_seq_data(file_name, desired_folder_name):
''' Downloaded RNA files are tarfiles, this unzips them'''
if 'tar' in file_name:
open_tar = tarfile.open(file_name)
open_tar.extractall(f'{desired_folder_name}')
open_tar.close()
else:
print('Not a tarfile')
def unzip_individual_rna_seq_files(root_dir):
''' Tarfile unzip results in gz files, which need to be further unzipped'''
files_to_unpack = []
dfs = []
meta_data_file = ''.join(glob.glob('**/**metadata.cart**', recursive=True))
with open(meta_data_file, 'r') as f:
meta_data = json.load(f)
convert_filename_caseuuid = {meta_data[i]['file_id']:
meta_data[i]['associated_entities'][0]
['case_id'] for i in range(0, len(meta_data))}
# dictionary of file_id:case_id
for directory in os.listdir(root_dir):
try:
for filename in os.listdir(os.path.join(root_dir, directory)):
if ".gz" in filename:
files_to_unpack.append(os.path.join(root_dir,
directory, filename))
except NotADirectoryError:
continue
for file in files_to_unpack:
dfs.append(pd.read_csv
(file, compression='gzip', sep="\t", names=['gene',
convert_filename_caseuuid[os.path.split(os.path.dirname
(file))[1]]],
index_col='gene'))
# these dfs already have the correct case id name
return files_to_unpack, dfs, convert_filename_caseuuid
def concat_all_rna_seq(dfs):
''' Takes each individual rna seq file and concatenates them into one '''
rna_seq_data = pd.concat(dfs, join="outer", axis=1).T
if type(rna_seq_data.index[0]) == str:
rna_seq_data.reset_index(inplace=True)
return rna_seq_data
def convert_ensg_to_gene_name(dataframe_with_genes):
'''TCGA data is listed with ensemble names, this converts to gene
names for greater readability '''
change_name_file = 'mart_export.txt'
gene_names = {}
with open(change_name_file) as fh:
for line in fh:
ensg, gene_name = line.split(',', 1)
gene_names[gene_name.split('.')[0]] = ensg
dataframe = (dataframe_with_genes.rename
(columns=lambda x: x.split('.')[0]).rename(
columns=gene_names))
genes = dataframe.columns[1:-1].tolist()
return dataframe, genes, gene_names
def concat_rna_to_clinical_data(clinical_dataframe, rna_dataframe):
''' Combines clinical data and the rna seq data. Clinical dataframe should
have bcr_patient_uuid as the index. '''
full_data = pd.merge(rna_dataframe, clinical_dataframe,
how='right', left_on=['index'],
right_on=['bcr_patient_uuid'])
return full_data
def limit_full_data_for_pca(full_data, genes):
''' Removes rna seq files where there is no drug name available and limits
columns to rna seq data, drug name and vital status '''
limit_full_data = (full_data.loc[(full_data.standard_drugs != '')
& (full_data.standard_drugs != '[not available]')
& (full_data.standard_drugs != '[unknown]')].copy())
limit_full_data.dropna(subset=['index'], inplace=True)
columns_needed = genes+['standard_drugs', 'vital_status']
return limit_full_data.loc[:, columns_needed]
| 39.413043
| 79
| 0.619967
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 910
| 0.250965
|
6c3cdcc2642ae1e7ae2f269889189d138f16d4af
| 7,268
|
py
|
Python
|
fasturl/fasturl.py
|
evite/django-fasturls
|
52e397c5f4b4b2b7d6c5cd2bf9cc8cac1b4efa9b
|
[
"MIT"
] | null | null | null |
fasturl/fasturl.py
|
evite/django-fasturls
|
52e397c5f4b4b2b7d6c5cd2bf9cc8cac1b4efa9b
|
[
"MIT"
] | null | null | null |
fasturl/fasturl.py
|
evite/django-fasturls
|
52e397c5f4b4b2b7d6c5cd2bf9cc8cac1b4efa9b
|
[
"MIT"
] | null | null | null |
import re
from collections import OrderedDict
from django.conf.urls import url as django_url, include
from django.core.urlresolvers import RegexURLResolver, RegexURLPattern
from django.utils.encoding import force_text
import logging
# Using FastUrl has a couple of caveats:
# 1. FastUrl tries to keep the resolution order the same as declared, but we cannot guarantee that the order will
# be exactly the same which could cause the wrong view to be returned if you have urlpatterns that overlap.
# 2. Detection of regexes within urlpatterns is very ad-hock, it would be easy to deliberately cause it to fail, but
# in practice it should cover most cases. Any errors should occur during url building rather than at resolution time
# Usage:
# Build your urlpatterns using 'FastUrl' instead of 'url' and then rebuild your urlpatterns with
# urlpatterns = render_fast_urls(urlpatterns)
class StartsWithResolver(RegexURLResolver):
"""
Python regexs are pretty slow, so this class checks if the string looks like it matches before
passing it through to the regular resolver class
"""
def __init__(self, regex, view, kwargs=None):
urlconf_module, app_name, namespace = view
super(StartsWithResolver, self).__init__(regex, urlconf_module, kwargs, app_name=app_name, namespace=namespace)
self.pattern = regex
if self.pattern[0] == "^":
self.pattern = self.pattern[1:]
self.passthrough = False
for char in "$()[]<>*?\\":
if char in self.pattern:
self.passthrough = True
else:
self.passthrough = True
def resolve(self, path):
if not self.passthrough:
path = force_text(path) # path may be a reverse_lazy object
if not path.startswith(self.pattern):
return False
return super(StartsWithResolver, self).resolve(path)
class FastUrl(object):
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def add_to_tree(self, tree):
# This does some super ad-hock detection of regex patterns and tries to re-join any regexes that
# were split in the middle
words = re.split('/', self._args[0])
for i in range(len(words) - 2, 0, -1):
if words[i] and words[i + 1] and (words[i][-1] == "^" or words[i + 1][0] == "?"):
words = words[:i] + [words[i] + "/" + words[i + 1]] + words[i + 2:]
new_words = []
parens_index = -1
parens = 0
for i, word in enumerate(words):
if "(" in words[i]:
if parens == 0:
parens_index = i
parens += word.count('(')
if "[" in words[i]:
if parens == 0:
parens_index = i
parens += word.count('[')
if ")" in words[i]:
parens -= word.count(')')
if "]" in words[i]:
parens -= word.count(']')
if parens_index < 0:
new_words.append(word)
elif parens == 0:
new_words.append('/'.join(words[parens_index:i+1]))
parens_index = -1
if parens_index != -1:
raise RuntimeError("Mismatched parentheses in urlpattern {}".format(self._args[0]))
words = new_words
if words[-1] in ("?", "?$", "$"):
words = words[:-2] + [words[-2] + "/" + words[-1]]
entry = tree
for word in words[:-1]:
if not entry.get(word):
entry[word] = OrderedDict()
entry = entry[word]
processed_include = False
# For include(...) processing. we add the urls to the tree instead of instantiating a RegexURLResolver
if isinstance(self._args[1], (list, tuple)):
urlconf_module, app_name, namespace = self._args[1]
if not app_name and not namespace:
processed_include = True
word = words[-1]
if not entry.get(word):
entry[word] = OrderedDict()
for url in urlconf_module.urlpatterns:
_add_url_to_tree(entry, url)
if not processed_include:
if words[-1] in entry:
logging.error("Duplicate entry for urlpattern {}".format(self._args[0]))
entry[words[-1]] = (self._args, self._kwargs)
def _is_django_regex(ob):
if isinstance(ob, RegexURLPattern) or isinstance(ob, RegexURLResolver):
return True
return False
def _add_url_to_tree(tree, url):
if isinstance(url, FastUrl):
url.add_to_tree(tree)
if _is_django_regex(url):
tree[('djangourl', _add_url_to_tree.django_urls)] = url
_add_url_to_tree.django_urls += 1
_add_url_to_tree.django_urls = 0 # counter for django only urls
merged_count = 0
def _merge_single_children(tree):
if not isinstance(tree, dict):
return tree
new_tree = OrderedDict()
for path, param in tree.items():
if isinstance(param, dict):
child = _merge_single_children(param)
if isinstance(child, dict) and len(child) == 1:
new_tree[path + '/' + child.keys()[0]] = child.values()[0]
_merge_single_children.count += 1
else:
new_tree[path] = _merge_single_children(param)
else:
new_tree[path] = param
return new_tree
_merge_single_children.count = 0
def render_fast_urls(urls, debug=False):
url_tree = OrderedDict()
# Expand the url list into the tree structure
for url in urls:
_add_url_to_tree(url_tree, url)
# Merge any entries with only a single child
url_tree = _merge_single_children(url_tree)
# Render the tree back into a list
def render_tree(tree):
new_urls = []
for path, param in tree.items():
if _is_django_regex(param):
new_urls.append(param)
else:
if path and path[0] is not "^":
path = "^" + path
if not path:
path = "^$"
if isinstance(param, dict):
new_urls.append(StartsWithResolver(path + "/", include(render_tree(param))))
else:
p = (path,) + param[0][1:]
new_urls.append(django_url(*p, **param[1]))
return new_urls
urlpatterns = render_tree(url_tree)
if debug:
_print_tree(url_tree, 0)
print ("FastUrl generated {} top level url patterns from {} total urls".format(len(urlpatterns), _count_tree(url_tree)))
print ("There were {} normal django urls.".format(_add_url_to_tree.django_urls))
print ("{} branches were merged".format(_merge_single_children.count))
return urlpatterns
def _print_tree(tree, indent = 0):
if not isinstance(tree, dict):
return
for key in tree.keys():
print (" " * indent + str(key))
_print_tree(tree[key], indent +2)
def _count_tree(tree):
if not isinstance(tree, dict):
return 1
total = 0
for key in tree.keys():
total += _count_tree(tree[key])
return total
| 35.627451
| 128
| 0.589158
| 3,556
| 0.489268
| 0
| 0
| 0
| 0
| 0
| 0
| 1,525
| 0.209824
|
6c3d59a46c15d1afca1d52fd4d95d34b6fd700b1
| 6,679
|
py
|
Python
|
experiments/2_training.py
|
helenacuesta/multif0-estimation-polyvocals
|
4960f5415f8a170f2ff8d5b776bfd4cb5576d3ba
|
[
"MIT"
] | 36
|
2020-09-13T12:30:41.000Z
|
2022-02-15T08:52:58.000Z
|
experiments/2_training.py
|
helenacuesta/multif0-estimation-polyvocals
|
4960f5415f8a170f2ff8d5b776bfd4cb5576d3ba
|
[
"MIT"
] | 6
|
2020-09-04T11:14:14.000Z
|
2022-02-09T23:49:59.000Z
|
experiments/2_training.py
|
helenacuesta/multif0-estimation-polyvocals
|
4960f5415f8a170f2ff8d5b776bfd4cb5576d3ba
|
[
"MIT"
] | null | null | null |
import os
import json
import keras
import numpy as np
import csv
from experiments import config
import utils
import utils_train
import models
import argparse
class Data(object):
"""Class that deals with all the data mess
"""
def __init__(self, data_splits_path, data_path, input_patch_size, batch_size,
active_str, muxrate):
self.data_splits_path = data_splits_path
self.input_patch_size = input_patch_size
self.data_path = data_path
(self.train_set,
self.validation_set,
self.test_set) = self.load_data_splits()
self.train_files = utils_train.get_file_paths(self.train_set, self.data_path)
self.validation_files = utils_train.get_file_paths(
self.validation_set, self.data_path
)
self.test_files = utils_train.get_file_paths(self.test_set, self.data_path)
self.batch_size = batch_size
self.active_str = active_str
self.muxrate = muxrate
def load_data_splits(self):
with open(self.data_splits_path, 'r') as fhandle:
data_splits = json.load(fhandle)
return data_splits['train'], data_splits['validate'], data_splits['test']
def get_train_generator(self):
"""return a training data generator
"""
return utils_train.keras_generator(
self.train_files, self.input_patch_size,
self.batch_size, self.active_str, self.muxrate
)
def get_validation_generator(self):
"""return a validation data generator
"""
return utils_train.keras_generator(
self.validation_files, self.input_patch_size,
self.batch_size, self.active_str, self.muxrate
)
def get_test_generator(self):
"""return a test data generator
"""
return utils_train.keras_generator(
self.test_files, self.input_patch_size,
self.batch_size, self.active_str, self.muxrate
)
def load_data(load_path):
with open(load_path, 'r') as fp:
data = json.load(fp)
return data
def create_data_splits(path_to_metadata_file, exper_dir):
metadata = load_data(path_to_metadata_file)
utils.create_data_split(metadata,
os.path.join(exper_dir, 'data_splits.json'))
def train(model, model_save_path, data_splits_file, batch_size, active_str, muxrate):
#data_path = utils.data_path_multif0()
data_path = config.data_save_folder
input_patch_size = (360, 50)
data_splits_path = os.path.join(config.data_save_folder, data_splits_file)
## DATA MESS SETUP
dat = Data(
data_splits_path, data_path, input_patch_size,
batch_size, active_str, muxrate
)
# instantiate train and validation generators
train_generator = dat.get_train_generator()
validation_generator = dat.get_validation_generator()
model.compile(
loss=utils_train.bkld,
metrics=['mse', utils_train.soft_binary_accuracy],
optimizer='adam'
)
print(model.summary(line_length=80))
# hopefully fit model
history = model.fit_generator(
train_generator, config.SAMPLES_PER_EPOCH, epochs=config.NB_EPOCHS, verbose=1,
validation_data=validation_generator, validation_steps=config.NB_VAL_SAMPLES,
callbacks=[
keras.callbacks.ModelCheckpoint(
model_save_path, save_best_only=True, verbose=1),
keras.callbacks.ReduceLROnPlateau(patience=5, verbose=1),
keras.callbacks.EarlyStopping(patience=25, verbose=1)
]
)
model.load_weights(model_save_path)
return model, history, dat
def run_evaluation(exper_dir, save_key, history, dat, model):
(save_path, _, plot_save_path,
model_scores_path, _, _
) = utils_train.get_paths(exper_dir, save_key)
## Results plots
print("plotting results...")
utils_train.plot_metrics_epochs(history, plot_save_path)
## Evaluate
print("getting model metrics...")
utils_train.get_model_metrics(dat, model, model_scores_path)
print("getting best threshold...")
thresh = utils_train.get_best_thresh(dat, model)
print("scoring multif0 metrics on test sets...")
utils_train.score_on_test_set(model, save_path, dat, thresh)
def experiment(save_key, model, data_splits_file, batch_size, active_str, muxrate):
"""
This should be common code for all experiments
"""
exper_dir = config.exper_output
(save_path, _, plot_save_path,
model_scores_path, _, _
) = utils_train.get_paths(exper_dir, save_key)
model_save_path = '/scratch/hc2945/data/models/'
if not os.path.exists(model_save_path):
os.mkdir(model_save_path)
model_save_path = os.path.join(model_save_path, "{}.pkl".format(save_key))
'''
# create data splits file if it doesnt exist
if not os.path.exists(
os.path.join(exper_dir, 'data_splits.json')):
create_data_splits(path_to_metadata_file='./mtracks_info.json', exper_dir=exper_dir)
'''
model, history, dat = train(model, model_save_path, data_splits_file,
batch_size, active_str, muxrate)
run_evaluation(exper_dir, save_key, history, dat, model)
print("Done! Results saved to {}".format(save_path))
def main(args):
batch_size = 32
active_str = 100
muxrate = 32
save_key = args.save_key
data_splits_file = args.data_splits_file
if args.model_name == 'model1':
model = models.build_model1()
elif args.model_name == 'model2':
model = models.build_model2()
elif args.model_name == 'model3':
model = models.build_model3()
else:
print("Specified model does not exist. Please choose an valid model: model1, model2 or model3.")
return
experiment(save_key, model, data_splits_file, batch_size, active_str, muxrate)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Train specified model with training set.")
parser.add_argument("--model",
dest='model_name',
type=str,
help="Name of the model you want to train.")
parser.add_argument("--save_key",
dest='save_key',
type=str,
help="String to save model-related data.")
parser.add_argument("--data_splits_file",
dest='data_splits_file',
type=str,
help="Filename of the data splits file to use in the experiment.")
main(parser.parse_args())
| 28.421277
| 104
| 0.658332
| 1,833
| 0.274442
| 0
| 0
| 0
| 0
| 0
| 0
| 1,252
| 0.187453
|
6c3eef3ce318f9f2ea78b8b3df0a26bfa302ee81
| 106
|
py
|
Python
|
src/pythonFEA/defaults.py
|
honzatomek/pythonFEA
|
c851c20800a06cc2084ef53dfd2ab67e7dfbc3b7
|
[
"MIT"
] | null | null | null |
src/pythonFEA/defaults.py
|
honzatomek/pythonFEA
|
c851c20800a06cc2084ef53dfd2ab67e7dfbc3b7
|
[
"MIT"
] | null | null | null |
src/pythonFEA/defaults.py
|
honzatomek/pythonFEA
|
c851c20800a06cc2084ef53dfd2ab67e7dfbc3b7
|
[
"MIT"
] | null | null | null |
# DEFUALT SETUP FOR NUMBERS
DEFAULT_FLOAT = float
# DEFAULT SETUP FOR STRINGS
DEFAULT_LABEL_LENGTH = 120
| 17.666667
| 27
| 0.801887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 54
| 0.509434
|
6c3f1a1b4560f11557e8a7fa31b050b56c6becc0
| 6,666
|
py
|
Python
|
backend/validators/models.py
|
Cryptorubic/rubic-validator
|
88fd90d15da1fad538667c375189e2625d045ab0
|
[
"MIT"
] | null | null | null |
backend/validators/models.py
|
Cryptorubic/rubic-validator
|
88fd90d15da1fad538667c375189e2625d045ab0
|
[
"MIT"
] | null | null | null |
backend/validators/models.py
|
Cryptorubic/rubic-validator
|
88fd90d15da1fad538667c375189e2625d045ab0
|
[
"MIT"
] | null | null | null |
from logging import exception, info
from requests import post as request_post
from requests.exceptions import RequestException
from typing import Union
from uuid import UUID
from django.conf import settings
from django.db.models import (
CASCADE,
CharField,
ForeignKey,
OneToOneField,
)
from web3.types import HexBytes
from base.models import AbstractBaseModel
from base.support_functions.base import bytes_to_base58
from contracts.models import Contract
from backend.consts import DEFAULT_CRYPTO_ADDRESS, NETWORK_NAMES
from networks.models import (
Transaction,
CustomRpcProvider,
NearRpcProvider,
)
from networks.types import HASH_LIKE
class ValidatorSwap(AbstractBaseModel):
"""
ValidatorSwap model which used for creating and
sending signatures to relayer.
- contract - Contract instance on which transaction was found
- transaction - Transaction instance of found transaction while scanning
- signature - hashed params signed by Validator private key
- status - current status of swap
"""
STATUS_CREATED = 'created'
STATUS_WAITING_FOR_DATA = 'waiting for data'
STATUS_SIGNATURE_CREATED = 'signature created'
STATUS_SIGNATURE_SEND = 'signature send'
STATUS_SUCCESS = 'success'
_STATUSES = (
(STATUS_CREATED, STATUS_CREATED.upper()),
(STATUS_WAITING_FOR_DATA, STATUS_WAITING_FOR_DATA.upper()),
(STATUS_SIGNATURE_CREATED, STATUS_SIGNATURE_CREATED.upper()),
(STATUS_SIGNATURE_SEND, STATUS_SIGNATURE_SEND.upper()),
(STATUS_SUCCESS, STATUS_SUCCESS.upper()),
)
contract = ForeignKey(
to=Contract,
on_delete=CASCADE,
related_name='contract_validator_swaps',
verbose_name='Contract',
)
transaction = OneToOneField(
to=Transaction,
on_delete=CASCADE,
related_name='validator_swap_transaction',
verbose_name='Transaction',
)
signature = CharField(
max_length=255,
blank=True,
default='',
verbose_name='Signature',
)
status = CharField(
max_length=255,
choices=_STATUSES,
default=STATUS_CREATED,
verbose_name='Status',
)
class Meta:
db_table = 'validator_swaps'
ordering = '-_created_at',
def __str__(self) -> str:
return (
f'Validator swap with transaction hash \"{self.transaction.hash}\"'
)
def send_signature_to_relayer(self):
"""
Sends created by Validator signature.
"""
params = {
'password': settings.PRIVATE_PASSWORD_FOR_SIGNATURE_API,
}
payload = {
'validatorName': settings.VALIDATOR_NAME,
'signature': self.signature,
'fromContractNum': self.contract.blockchain_id,
'fromTxHash': self.transaction.hash,
'eventName': self.transaction.event_data.get('event', ''),
}
try:
response = request_post(
url=f"{settings.RELAYER_URL}/api/trades/signatures/",
params=params,
json=payload,
)
if response.status_code != 200:
exception("Could not send signature to relayer")
return
self.status = self.STATUS_SIGNATURE_SEND
self.save()
message = (
f'Signature \"{self.signature}\" of validator '
f'\"{settings.VALIDATOR_NAME}\" send to '
f'{settings.RELAYER_URL}'
)
info(message)
except RequestException as exception_error:
exception(exception_error)
pass
@classmethod
def get_swap_by_transaction_id(cls, transaction_id: UUID):
return cls.objects.filter(transaction__id=transaction_id).first()
@classmethod
def create_swap(
cls,
rpc_provider: Union[CustomRpcProvider, NearRpcProvider],
contract: Contract,
txn_hash: HASH_LIKE,
event: dict,
):
"""
Save ValidatorSwap instance in DataBase
:param rpc_provider: custom rpc provider of source network
:param contract: Contract object of source network
:param txn_hash: hash of the found transaction
:param event: event data of transaction
"""
if isinstance(txn_hash, HexBytes):
txn_hash = txn_hash.hex()
source_transaction = Transaction.get_transaction(
network_id=contract.network.id,
txn_hash=txn_hash,
)
info(source_transaction)
to_contract = Contract.get_contract_by_blockchain_id(
blockchain_id=source_transaction.data.get('params')[0],
)
if contract.network.title != NETWORK_NAMES['near']:
event_data = contract.get_event(event)
source_transaction.event_data = event_data
if to_contract.network.title in (
NETWORK_NAMES['solana'],
):
transaction_params = list(source_transaction.data['params'])
transaction_params[6] = bytes_to_base58(
string=transaction_params[6]
)
second_path = list(transaction_params[3])
for i in range(len(second_path)):
second_path[i] = bytes_to_base58(
string=second_path[i],
)
transaction_params[3] = second_path
source_transaction.data['params'] = transaction_params
elif to_contract.network.title in (
NETWORK_NAMES['near'],
):
transaction_params = list(source_transaction.data['params'])
transaction_params[6] = DEFAULT_CRYPTO_ADDRESS
# second_path = list(transaction_params[3])
#
# for i in range(len(second_path)):
# second_path[i] = bytes_to_base58(
# string=second_path[i],
# )
#
# transaction_params[3] = second_path
#
# source_transaction.data['params'] = transaction_params
source_transaction.save(
update_fields=(
'event_data',
'data',
'_created_at',
)
)
validator_swap = ValidatorSwap.get_swap_by_transaction_id(
transaction_id=source_transaction.id
)
if not validator_swap:
validator_swap = ValidatorSwap.objects.create(
contract=contract,
transaction=source_transaction,
)
return validator_swap
| 29.495575
| 79
| 0.611461
| 5,995
| 0.89934
| 0
| 0
| 2,953
| 0.442994
| 0
| 0
| 1,583
| 0.237474
|
6c40a91da29b8a959cf350b71661cacacc596d6d
| 494
|
py
|
Python
|
practise/remove_zero.py
|
mengyangbai/leetcode
|
e7a6906ecc5bce665dec5d0f057b302a64d50f40
|
[
"MIT"
] | null | null | null |
practise/remove_zero.py
|
mengyangbai/leetcode
|
e7a6906ecc5bce665dec5d0f057b302a64d50f40
|
[
"MIT"
] | null | null | null |
practise/remove_zero.py
|
mengyangbai/leetcode
|
e7a6906ecc5bce665dec5d0f057b302a64d50f40
|
[
"MIT"
] | null | null | null |
class Solution(object):
def moveZeroes(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
n = 0
k = len(nums)
for i in range(k-n):
if nums[i]==0:
while k-1-n >= i or nums[k-1-n]==0:
n+=1
nums[i],nums[k-1-n]=nums[k-1-n],nums[i]
if __name__ == "__main__":
a = Solution()
nums = [0]
a.moveZeroes(nums)
| 27.444444
| 74
| 0.465587
| 409
| 0.827935
| 0
| 0
| 0
| 0
| 0
| 0
| 130
| 0.263158
|
6c42601ba0916dd0c025e30a21fda4322eb4b154
| 2,838
|
py
|
Python
|
scripts/train_agent.py
|
weepingwillowben/reward-surfaces
|
f27211faf3784df3305972b7cad65002fd57d7bf
|
[
"MIT"
] | null | null | null |
scripts/train_agent.py
|
weepingwillowben/reward-surfaces
|
f27211faf3784df3305972b7cad65002fd57d7bf
|
[
"MIT"
] | null | null | null |
scripts/train_agent.py
|
weepingwillowben/reward-surfaces
|
f27211faf3784df3305972b7cad65002fd57d7bf
|
[
"MIT"
] | 2
|
2021-10-03T14:51:38.000Z
|
2021-11-10T02:54:26.000Z
|
import argparse
from reward_surfaces.agents.make_agent import make_agent
import torch
import json
import os
from glob import glob
def main():
parser = argparse.ArgumentParser(description='Train an agent and keep track of important information.')
parser.add_argument('save_dir', type=str, help="Directory where checkpoints will be saved")
parser.add_argument('agent_name', type=str, help="One of 'rainbow', 'SB3_OFF', 'SB3_ON', or 'SB3_HER'")
parser.add_argument('env', type=str, help="Environment name")
parser.add_argument('device', type=str, help="Device used for training ('cpu' or 'cuda')")
parser.add_argument('hyperparameters', type=str, help="Dictionary of hyperparameters for training. Should include the intended training algorithm (E.g. {'ALGO': 'PPO'})")
parser.add_argument('--save_freq', type=int, default=10000, help="Training steps between each saved checkpoint.")
parser.add_argument('--resume', action='store_true', help="Continue training from last checkpoint")
args = parser.parse_args()
assert args.agent_name in ['rainbow', 'SB3_OFF', 'SB3_ON', 'SB3_HER'], "Name must be one of 'rainbow', 'SB3_OFF', 'SB3_ON', or 'SB3_HER'"
torch.set_num_threads(1)
zip_path = ""
timesteps = 0
pretraining = None
if args.resume:
subdirs = glob(args.save_dir+"/*/")
for i, subdir in enumerate(subdirs):
parts = subdir.split("/")
subdirs[i] = ""
for part in parts:
if part.isdigit():
subdirs[i] = int(part)
subdirs = sorted(list(filter(lambda a: a != "", subdirs)))
latest_checkpoint = subdirs.pop()
timesteps = int(latest_checkpoint)
zip_path = args.save_dir + "/" + latest_checkpoint + "/checkpoint.zip"
best_path = args.save_dir + "/best/checkpoint.zip"
pretraining = {
"latest": zip_path,
"best": best_path,
"trained_steps": timesteps,
}
print(zip_path)
# trainer = SB3HerPolicyTrainer(robo_env_fn,HER("MlpPolicy",robo_env_fn(),model_class=TD3,device="cpu",max_episode_length=100))
print(args.resume)
agent, steps = make_agent(args.agent_name, args.env, args.save_dir, json.loads(args.hyperparameters),
pretraining=pretraining, device=args.device)
os.makedirs(args.save_dir, exist_ok=True)
hyperparams = json.loads(args.hyperparameters)
run_info = {
"agent_name": args.agent_name,
"env": args.env,
"hyperparameters": hyperparams,
}
run_info_fname = os.path.join(args.save_dir, "info.json")
with open(run_info_fname, 'w') as file:
file.write(json.dumps(run_info, indent=4))
agent.train(steps, args.save_dir, save_freq=args.save_freq)
if __name__ == "__main__":
main()
| 41.130435
| 174
| 0.65821
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 875
| 0.308316
|
6c43b369587320014577c2dea259fb1b216358eb
| 103
|
py
|
Python
|
tests/test_ladder.py
|
devonwa/ladder2x
|
a8604fb61eaa193d9a6e0239474a6c0af1bc2b49
|
[
"Unlicense"
] | null | null | null |
tests/test_ladder.py
|
devonwa/ladder2x
|
a8604fb61eaa193d9a6e0239474a6c0af1bc2b49
|
[
"Unlicense"
] | null | null | null |
tests/test_ladder.py
|
devonwa/ladder2x
|
a8604fb61eaa193d9a6e0239474a6c0af1bc2b49
|
[
"Unlicense"
] | null | null | null |
"""Tests on the base ladder structure."""
import pytest
if __name__ == "__main__":
pytest.main()
| 14.714286
| 41
| 0.669903
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 51
| 0.495146
|
6c441485e7e7ad06c0126fe73345924ccb66fe07
| 390
|
py
|
Python
|
courses/urls.py
|
office-for-students/wagtail-CMS
|
98789c279edf48f2bbedb5415437da3317f0e12b
|
[
"MIT"
] | 4
|
2019-06-04T07:18:44.000Z
|
2020-06-15T22:27:36.000Z
|
courses/urls.py
|
office-for-students/wagtail-CMS
|
98789c279edf48f2bbedb5415437da3317f0e12b
|
[
"MIT"
] | 38
|
2019-05-09T13:14:56.000Z
|
2022-03-12T00:54:57.000Z
|
courses/urls.py
|
office-for-students/wagtail-CMS
|
98789c279edf48f2bbedb5415437da3317f0e12b
|
[
"MIT"
] | 3
|
2019-09-26T14:32:36.000Z
|
2021-05-06T15:48:01.000Z
|
from django.conf.urls import url
from django.urls import path
from courses.views import courses_detail
from courses.views.translate import get_translations
urlpatterns = [
url(r'(?P<institution_id>[\w\-]+?)/(?P<course_id>[\w\-\~\$()]+?)/(?P<kis_mode>[\w\-]+?)/', courses_detail,
name='courses_detail'),
path('translations/', get_translations, name='course_translation')
]
| 32.5
| 110
| 0.697436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 136
| 0.348718
|
6c44a6f087fd346f5832a3d385363862360f4ae8
| 447
|
py
|
Python
|
opencypher/tests/ast/test_ordering.py
|
globality-corp/opencypher
|
b60bf526fb6d5ea6c731aab867f714f3e10f629b
|
[
"Apache-2.0"
] | 6
|
2019-01-31T18:55:46.000Z
|
2020-12-02T14:53:45.000Z
|
opencypher/tests/ast/test_ordering.py
|
globality-corp/opencypher
|
b60bf526fb6d5ea6c731aab867f714f3e10f629b
|
[
"Apache-2.0"
] | 1
|
2020-12-04T00:18:20.000Z
|
2020-12-04T00:18:20.000Z
|
opencypher/tests/ast/test_ordering.py
|
globality-corp/opencypher
|
b60bf526fb6d5ea6c731aab867f714f3e10f629b
|
[
"Apache-2.0"
] | 1
|
2019-03-17T03:46:26.000Z
|
2019-03-17T03:46:26.000Z
|
from hamcrest import assert_that, equal_to, is_
from opencypher.ast import Expression, NonEmptySequence, Order, SortItem, SortOrder
def test_order():
ast = Order(
items=NonEmptySequence[SortItem](
SortItem(
expression=Expression("foo"),
order=SortOrder.DESCENDING,
),
),
)
assert_that(
str(ast),
is_(equal_to("ORDER BY foo DESCENDING")),
)
| 23.526316
| 83
| 0.590604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 0.067114
|
6c46983292689e2b0a8072d0c4aba99c6bfefd5f
| 444
|
py
|
Python
|
TD3/test.py
|
chenoly/DRL-MindSpore
|
7e3434f2ca326a76d150903fd2ed8e8a32de5cea
|
[
"MIT"
] | null | null | null |
TD3/test.py
|
chenoly/DRL-MindSpore
|
7e3434f2ca326a76d150903fd2ed8e8a32de5cea
|
[
"MIT"
] | null | null | null |
TD3/test.py
|
chenoly/DRL-MindSpore
|
7e3434f2ca326a76d150903fd2ed8e8a32de5cea
|
[
"MIT"
] | null | null | null |
from Model import Critic
from mindspore import Tensor
from mindspore import load_param_into_net
import copy
C1 = Critic(state_dim=2, action_dim=1)
C2 = Critic(state_dim=2, action_dim=1)
# C1.load_parameter_slice(C2.parameters_dict())
# load_param_into_net(C1, C2.parameters_dict())
c1_ = C1.parameters_dict()
c2_ = C2.parameters_dict()
for p, p1 in zip(c1_, c2_):
print(Tensor(c1_[p]))
print(Tensor(c2_[p1]))
print(c2_[p1].clone())
| 29.6
| 47
| 0.747748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 94
| 0.211712
|
6c46b6f196085ed15758fd855c1d14b7c05e52f5
| 351
|
py
|
Python
|
reflectivipy/wrappers/expr_flatwrapper.py
|
StevenCostiou/reflectivipy
|
750ed93cfb463304958e590d895c76169caa4b98
|
[
"MIT"
] | 10
|
2019-01-18T17:45:18.000Z
|
2019-10-05T08:58:17.000Z
|
reflectivipy/wrappers/expr_flatwrapper.py
|
StevenCostiou/reflectivipy
|
750ed93cfb463304958e590d895c76169caa4b98
|
[
"MIT"
] | null | null | null |
reflectivipy/wrappers/expr_flatwrapper.py
|
StevenCostiou/reflectivipy
|
750ed93cfb463304958e590d895c76169caa4b98
|
[
"MIT"
] | null | null | null |
from .flatwrapper import FlatWrapper
class ExprFlatWrapper(FlatWrapper):
def flat_wrap(self):
self.reset_wrapping()
if self.should_wrap_children(self.original_node):
self.body.extend(self.original_node.value.wrapper.flat_wrap())
else:
self.body.append(self.original_node)
return self.body
| 29.25
| 74
| 0.68661
| 311
| 0.88604
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6c489fd8b4623ac06e1c59f92467d3fce08e9f03
| 1,742
|
py
|
Python
|
cricdb_data.py
|
ravi2013167/coursera-site
|
e78f10c9fa941a834f83853479ea3ee67eeacc64
|
[
"MIT"
] | null | null | null |
cricdb_data.py
|
ravi2013167/coursera-site
|
e78f10c9fa941a834f83853479ea3ee67eeacc64
|
[
"MIT"
] | null | null | null |
cricdb_data.py
|
ravi2013167/coursera-site
|
e78f10c9fa941a834f83853479ea3ee67eeacc64
|
[
"MIT"
] | null | null | null |
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from cricdb_setup import Team, Player, Base, Batsman, Bowler, Fielder, PlayerStrength, PlayerWeakness, PlayerMoment, Video
engine = create_engine('sqlite:///cricdb.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
Team1 = Team(id = 1, name='India')
session.add(Team1)
session.commit()
# Create dummy player
Player1 = Player(id = 1, team_id = 1, name="Virat Kohli", country="India", info='Born Nov 05, 1988 (28 years) Birth Place Delhi Nickname Kohli Height 5 ft 9 in (175 cm) Role Batsman Batting Style Right Handed Bat Bowling Style Right-arm medium', career='blank', batting_style='blank', bowling_style='blank',
picture='vk.jpg')
session.add(Player1)
session.commit()
# Menu for UrbanBurger
Batsman1 = Batsman(id=1, stance_type="front on", foot_position="front foot", shot="straight drive")
session.add(Batsman1)
session.commit()
Video1 = Video(id=1, video_type='batsman', video_name='front on front foot straight drive', video_url='google.com')
session.add(Video1)
session.commit()
print ("added menu items!")
| 40.511628
| 308
| 0.74225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 943
| 0.541332
|
6c4921ee958b3c93f23ee76186c1ec8331428083
| 1,006
|
py
|
Python
|
catalog/bindings/gmd/dq_evaluation_method_type_code_property_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/dq_evaluation_method_type_code_property_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
catalog/bindings/gmd/dq_evaluation_method_type_code_property_type.py
|
NIVANorge/s-enda-playground
|
56ae0a8978f0ba8a5546330786c882c31e17757a
|
[
"Apache-2.0"
] | null | null | null |
from dataclasses import dataclass, field
from typing import Optional, Union
from bindings.gmd.dq_evaluation_method_type_code import DqEvaluationMethodTypeCode
from bindings.gmd.nil_reason_enumeration_value import NilReasonEnumerationValue
__NAMESPACE__ = "http://www.isotc211.org/2005/gmd"
@dataclass
class DqEvaluationMethodTypeCodePropertyType:
class Meta:
name = "DQ_EvaluationMethodTypeCode_PropertyType"
dq_evaluation_method_type_code: Optional[DqEvaluationMethodTypeCode] = field(
default=None,
metadata={
"name": "DQ_EvaluationMethodTypeCode",
"type": "Element",
"namespace": "http://www.isotc211.org/2005/gmd",
},
)
nil_reason: Optional[Union[str, NilReasonEnumerationValue]] = field(
default=None,
metadata={
"name": "nilReason",
"type": "Attribute",
"namespace": "http://www.isotc211.org/2005/gco",
"pattern": r"other:\w{2,}",
},
)
| 32.451613
| 82
| 0.667992
| 701
| 0.696819
| 0
| 0
| 712
| 0.707753
| 0
| 0
| 274
| 0.272366
|
6c4c00831838cc942a656d3b8ca70c1fdf886a13
| 3,964
|
py
|
Python
|
spark/ReqTwisted.py
|
wensheng/spark
|
ab47107d000f0670f4cfe131637f72471a04cfb2
|
[
"MIT"
] | null | null | null |
spark/ReqTwisted.py
|
wensheng/spark
|
ab47107d000f0670f4cfe131637f72471a04cfb2
|
[
"MIT"
] | null | null | null |
spark/ReqTwisted.py
|
wensheng/spark
|
ab47107d000f0670f4cfe131637f72471a04cfb2
|
[
"MIT"
] | null | null | null |
#import time
from spark.ReqBase import ReqBase
class ReqTwisted(ReqBase):
""" specialized on Twisted requests """
def __init__(self, req, reactor, properties={}):
self.twistedreq = req
self.http_accept_language = self.twistedreq.getHeader('Accept-Language')
#cookie give me major problem!
self.saved_cookies={}
cookietxt = self.twistedreq.getHeader("cookie")
if cookietxt:
for c in cookietxt.split(';'):
cook = c.lstrip()
eqs=cook.find('=')
k=cook[0:eqs]
v=cook[eqs+1:]
self.saved_cookies[k] = v
self.reactor = reactor
self._have_ct = 0
self._have_status = 0
self.server_protocol = self.twistedreq.clientproto
self.server_name = self.twistedreq.getRequestHostname().split(':')[0]
self.server_port = str(self.twistedreq.getHost()[2])
self.is_ssl = self.twistedreq.isSecure()
if self.server_port != ('80', '443')[self.is_ssl]:
self.http_host = self.server_name + ':' + self.server_port
else:
self.http_host = self.server_name
#self.script_name = [v for v in self.twistedreq.prepath[:-1] if v != '']
self.script_name = [v for v in self.twistedreq.prepath if v != '']
self.path_info = [v for v in self.twistedreq.postpath if v != '']
self.request_method = self.twistedreq.method
self.remote_host = self.twistedreq.getClient()
self.remote_addr = self.twistedreq.getClientIP()
self.http_user_agent = self.twistedreq.getHeader('User-Agent')
self.request_uri = self.twistedreq.uri
self.url = self.http_host + self.request_uri # was: self.server_name + self.request_uri
qindex = self.request_uri.find('?')
if qindex != -1:
query_string = self.request_uri[qindex+1:]
else:
self.query_string = ''
ReqBase.__init__(self)
def run(self):
ReqBase.run(self)
def get_form(self):
args = {}
for key,values in self.twistedreq.args.items():
if isinstance(values, list) and len(values)==1:
values = values[0]
args[key] = values
return args
def get_vars(self):
pass
def read(self, n=None):
""" Read from input stream.
"""
self.twistedreq.content.seek(0, 0)
if n is None:
rd = self.twistedreq.content.read()
else:
rd = self.twistedreq.content.read(n)
#print "request.RequestTwisted.read: data=\n" + str(rd)
return rd
def write(self, data):
for piece in data:
self.twistedreq.write(piece)
#if self.header_type == 'html':
# self.twistedreq.write(str(time.time()-self.pagestart_time))
def finish(self):
self.twistedreq.finish()
# Headers ----------------------------------------------------------
def appendHttpHeader(self, header):
self.user_headers.append(header)
def __setHttpHeader(self, header):
#if type(header) is unicode:
# header = header.encode('ascii')
key, value = header.split(':',1)
value = value.lstrip()
self.twistedreq.setHeader(key, value)
def xml_headers(self, more_headers=[]):
if getattr(self, 'sent_headers', None):
return
self.sent_headers = 1
self.__setHttpHeader("Content-type: application/rss+xml;charset=utf-8")
def http_headers(self, more_headers=[]):
if getattr(self, 'sent_headers', None):
return
self.sent_headers = 1
have_ct = 0
# set http headers
for header in more_headers + getattr(self, 'user_headers', []):
if header.lower().startswith("content-type:"):
# don't send content-type multiple times!
if have_ct: continue
have_ct = 1
self.__setHttpHeader(header)
if not have_ct:
self.__setHttpHeader("Content-type: text/html;charset=utf-8")
def redirect(self, addr):
if isinstance(addr, unicode):
addr = addr.encode('ascii')
self.twistedreq.redirect(addr)
def setResponseCode(self, code, message=None):
self.twistedreq.setResponseCode(code, message)
def get_cookie(self, coname):
return self.saved_cookies.get(coname,'')
def set_cookie(self, coname, codata, expires=None):
if expires:
self.twistedreq.addCookie(coname, codata, expires)
else:
self.twistedreq.addCookie(coname, codata)
| 29.362963
| 89
| 0.688951
| 3,916
| 0.987891
| 0
| 0
| 0
| 0
| 0
| 0
| 787
| 0.198537
|
6c4cbca2cb07bcccddf7a558df7b93567d90c79c
| 11,093
|
py
|
Python
|
alpha_transform/AlphaTransformUtility.py
|
michaelriedl/alpha-transform
|
add5818b168551cb0c2138c65101c9cdac2bf3d9
|
[
"MIT"
] | 13
|
2016-12-21T03:25:57.000Z
|
2022-03-15T03:25:04.000Z
|
alpha_transform/AlphaTransformUtility.py
|
michaelriedl/alpha-transform
|
add5818b168551cb0c2138c65101c9cdac2bf3d9
|
[
"MIT"
] | 4
|
2020-07-11T09:49:51.000Z
|
2021-12-03T07:07:34.000Z
|
alpha_transform/AlphaTransformUtility.py
|
michaelriedl/alpha-transform
|
add5818b168551cb0c2138c65101c9cdac2bf3d9
|
[
"MIT"
] | 7
|
2018-09-23T10:58:24.000Z
|
2021-09-05T01:13:57.000Z
|
r"""
This module contains several utility functions which can be used e.g.
for thresholding the alpha-shearlet coefficients or for using the
alpha-shearlet transform for denoising.
Finally, it also contains the functions :func:`my_ravel` and :func:`my_unravel`
which can be used to convert the alpha-shearlet coefficients into a
1-dimensional vector and back. This is in particular convenient for the
subsampled transform, where this conversion is not entirely trivial, since the
different "coefficient images" have varying dimensions.
"""
import os.path
import math
import numpy as np
import numexpr as ne
import scipy.ndimage
def find_free_file(file_template):
r"""
This function finds the first nonexistent ("free") file obtained by
"counting upwards" using the passed template/pattern.
**Required Parameter**
:param string file_template:
This should be a string whose ``format()`` method can be called
using only an integer argument, e.g. ``'/home/test_{0:0>2d}.txt'``,
which would result in ``find_free_file`` consecutively checking
the following files for existence:
`/home/test_00.txt,`
`/home/test_01.txt, ...`
**Return value**
:return:
``file_template.format(i)`` for the first value of ``i`` for which
the corresponding file does not yet exist.
"""
i = 0
while os.path.isfile(file_template.format(i)):
i += 1
return file_template.format(i)
def threshold(coeffs, thresh_value, mode):
r"""
Given a set of coefficients, this function performs a thresholding
procedure, i.e., either soft or hard thresholding.
**Required parameters**
:param coeffs:
The coefficients to be thresholded.
Either a three-dimensional :class:`numpy.ndarray` or a generator
producing two dimensional :class:`numpy.ndarray` objects.
:param float thresh_value:
The thresholding cutoff :math:`c` for the coefficients, see also
``mode`` for more details.
:param string mode:
Either ``'hard'`` or ``'soft'``. This parameter determines whether
the hard thresholding operator
.. math::
\Lambda_cx
=\begin{cases}
x, & \text{if }|x|\geq c,\\
0, & \text{if }|x|<c,
\end{cases}
or the soft thresholding operator
.. math::
\Lambda_cx
=\begin{cases}
x\cdot \frac{|x|-c}{|x|}, & \text{if }|x|\geq c,\\
0, & \text{if }|x|<c
\end{cases}
is applied to each entry of the coefficients.
**Return value**
:return:
A generator producing the thresholded coefficients. Each
thresholded "coefficient image", i.e., each thresholded
2-dimensional array, is produced in turn.
"""
if mode == 'hard':
for coeff in coeffs:
ev_string = 'coeff * (real(abs(coeff)) >= thresh_value)'
yield ne.evaluate(ev_string)
# yield coeff * (np.abs(coeff) >= thresh_value)
elif mode == 'soft':
for coeff in coeffs:
ev_string = ('(real(abs(coeff)) - thresh_value) * '
'(real(abs(coeff)) >= thresh_value)')
large_values = ne.evaluate(ev_string)
# large_values = np.maximum(np.abs(coeff) - thresh_value, 0)
ev_str_2 = 'coeff * large_values / (large_values + thresh_value)'
yield ne.evaluate(ev_str_2)
# yield coeff * large_values / (large_values + thresh_value)
else:
raise ValueError("'mode' must be 'hard' or 'soft'")
def scale_gen(trafo):
r"""
**Required parameter**
:param trafo:
An object of class :class:`AlphaTransform.AlphaShearletTransform`.
**Return value**
:return:
A generator producing integers. The i-th produced integer
is the *scale* (starting from -1 for the low-pass part) of the i-th
alpha-shearlet associated to ``trafo``.
Hence, if ``coeff = trafo.transform(im)``, then the following iteration
produces the associated scale to each "coefficient image"::
for scale, c in zip(scale_gen(trafo), coeff):
...
"""
indices_gen = iter(trafo.indices)
next(indices_gen)
yield -1
for index in indices_gen:
yield index[0]
def denoise(img, trafo, noise_lvl, multipliers=None):
r"""
Given a noisy image :math:`\tilde f`, this function performs a denoising
procedure based on shearlet thresholding. More precisely:
#. A scale dependent threshold parameter :math:`c=(c_j)_j` is calculated
according to :math:`c_j=m_j\cdot \lambda / \sqrt{N_1\cdot N_2}`, where
:math:`m_j` is a multiplier for the jth scale, :math:`\lambda` is the
noise level present in the image :math:`\tilde f` and
:math:`N_1\times N_2` are its dimensions.
#. The alpha-shearlet transform of :math:`\tilde f` is calculated
using ``trafo``.
#. Hard thesholding with threshold parameter (cutoff) :math:`c` is
performed on alpha-shearlet coefficients, i.e., for each scale ``j``,
each of the coefficients belonging to the jth scale is set to zero if
its absolute value is smaller than :math:`c_j` and otherwise it is
left unchanged.
#. The (pseudo)-inverse of the alpha-shearlet transform is applied to the
thresholded coefficients and this reconstruction is the return value
of the function.
**Required parameters**
:param numpy.ndarray img:
The “image” (2 dimensional array) that should be denoised.
:param trafo:
An object of class :class:`AlphaTransform.AlphaShearletTransform`.
This object is used to calculate the (inverse) alpha-shearlet
transform during the denoising procedure.
The dimension of the transform and of ``img`` need to coincide.
:param float noise_lvl:
The (presumed) noise level present in ``img``.
If ``img = img_clean + noise``, then ``noise_lvl`` should be
approximately equal to the :math:`\ell^2` norm of ``noise``.
In particular, if ``im`` is obtained by adding Gaussian noise with
standard deviation :math:`\sigma` (in each entry) to a noise free
image :math:`f`, then the noise level :math:`\lambda` is given by
:math:`\lambda= \sigma\cdot \sqrt{N_1\cdot N_2}`; see also
:func:`AdaptiveAlpha.optimize_denoising`.
**Keyword parameter**
:param list multipliers:
A list of multipliers (floats) for each scale. ``multipliers[j]``
determines the value of :math:`m_j` and thus of the cutoff
:math:`c_j = m_j \cdot \lambda / \sqrt{N_1 \cdot N_2}` for scale ``j``.
In particular, ``len(multipliers)`` needs
to be equal to the number of the scales of ``trafo``.
**Return value**
:return:
The denoised image, i.e., the result of the denoising procedure
described above.
"""
coeff_gen = trafo.transform_generator(img, do_norm=True)
if multipliers is None:
# multipliers = [1] + ([2.5] * (trafo.num_scales - 1)) + [5]
multipliers = [3] * trafo.num_scales + [4]
width = trafo.width
height = trafo.height
thresh_lvls = [multi * noise_lvl / math.sqrt(width * height)
for multi in multipliers]
thresh_coeff = (coeff * (np.abs(coeff) >= thresh_lvls[scale + 1])
for (coeff, scale) in zip(coeff_gen, scale_gen(trafo)))
recon = trafo.inverse_transform(thresh_coeff, real=True, do_norm=True)
return recon
def image_load(path):
r"""
Given a '.npy' or '.png' file, this function loads the file and returns
its content as a two-dimensional :class:`numpy.ndarray` of :class:`float`
values.
For '.png' images, the pixel values are normalized to be between 0 and 1
(instead of between 0 and 255) and color images are converted to
grey-scale.
**Required parameter**
:param string path:
Path to the image to be converted, either of a '.png' or '.npy' file.
**Return value**
:return:
The loaded image as a two-dimensional :class:`numpy.ndarray`.
"""
image_extension = path[path.rfind('.'):]
if image_extension == '.npy':
return np.array(np.load(path), dtype='float64')
elif image_extension == '.png':
return np.array(scipy.ndimage.imread(path, flatten=True) / 255.0,
dtype='float64')
else:
raise ValueError("This function can only load .png or .npy files.")
def _print_listlist(listlist):
for front, back, l in zip(['['] + ([' '] * (len(listlist) - 1)),
([''] * (len(listlist) - 1)) + [']'],
listlist):
print(front + str(l) + back)
def my_ravel(coeff):
r"""
The subsampled alpha-shearlet transform returns a list of differently
sized(!) two-dimensional arrays. Likewise, the fully sampled transform
yields a three dimensional numpy array containing the coefficients.
The present function can be used (in both cases) to convert this list into
a single *one-dimensional* numpy array.
.. note::
In order to invert this conversion to a one-dimensional array,
use the associated function :func:`my_unravel`. Precisely,
:func:`my_unravel` satisfies
``my_unravel(my_trafo, my_ravel(coeff)) == coeff``,
if coeff is obtained from calling ``my_trafo.transform(im)``
for some image ``im``.
The preceding equality holds at least up to (negligible)
differences (the left-hand side is a generator while the
right-hand side could also be a list).
**Required parameter**
:param list coeff:
A list (or a generator) containing/producing two-dimensional
numpy arrays.
**Return value**
:return:
A one-dimensional :class:`numpy.ndarray` from which **coeff** can
be reconstructed.
"""
return np.concatenate([c.ravel() for c in coeff])
def my_unravel(trafo, coeff):
r"""
This method is a companion method to :func:`my_ravel`.
See the documentation of that function for more details.
**Required parameters**
:param trafo:
An object of class :class:`AlphaTransform.AlphaShearletTransform`.
:param numpy.ndarray coeff:
A one-dimensional numpy array, obtained via
``my_ravel(coeff_unrav)``, where ``coeff_unrav`` is of the same
dimensions as the output of ``trafo.transform(im)``, where
``im`` is an image.
**Return value**
:return:
A generator producing the same values as ``coeff_unrav``, i.e.,
an "unravelled" version of ``coeff``.
"""
coeff_sizes = [spec.shape for spec in trafo.spectrograms]
split_points = np.cumsum([spec.size for spec in trafo.spectrograms])
return (c.reshape(size)
for size, c in zip(coeff_sizes, np.split(coeff, split_points)))
| 34.557632
| 79
| 0.63166
| 0
| 0
| 2,934
| 0.264396
| 0
| 0
| 0
| 0
| 8,719
| 0.785708
|
6c50ce676f3a6dc75c4d1900f6d996ce7fd69ed7
| 2,692
|
py
|
Python
|
tests/provider/dwd/radar/test_api_latest.py
|
waltherg/wetterdienst
|
3c5c63b5b8d3e19511ad789bb499bdaa9b1976d9
|
[
"MIT"
] | 1
|
2021-09-01T12:53:09.000Z
|
2021-09-01T12:53:09.000Z
|
tests/provider/dwd/radar/test_api_latest.py
|
waltherg/wetterdienst
|
3c5c63b5b8d3e19511ad789bb499bdaa9b1976d9
|
[
"MIT"
] | null | null | null |
tests/provider/dwd/radar/test_api_latest.py
|
waltherg/wetterdienst
|
3c5c63b5b8d3e19511ad789bb499bdaa9b1976d9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import re
from datetime import datetime
import pytest
from tests.provider.dwd.radar import station_reference_pattern_unsorted
from wetterdienst.provider.dwd.radar import DwdRadarValues
from wetterdienst.provider.dwd.radar.metadata import DwdRadarDate, DwdRadarParameter
from wetterdienst.provider.dwd.radar.sites import DwdRadarSite
from wetterdienst.util.datetime import round_minutes
@pytest.mark.xfail(reason="Out of service", strict=True)
@pytest.mark.remote
def test_radar_request_composite_latest_rx_reflectivity():
"""
Example for testing radar COMPOSITES latest.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.RX_REFLECTIVITY,
start_date=DwdRadarDate.LATEST,
)
buffer = next(request.query())[1]
payload = buffer.getvalue()
month_year = datetime.utcnow().strftime("%m%y")
header = (
f"RX......10000{month_year}BY 8101..VS 3SW ......PR E\\+00INT 5GP 900x 900MS " # noqa:E501,B950
f"..<{station_reference_pattern_unsorted}>" # noqa:E501,B950
)
assert re.match(bytes(header, encoding="ascii"), payload[:160])
@pytest.mark.remote
def test_radar_request_composite_latest_rw_reflectivity():
"""
Example for testing radar COMPOSITES (RADOLAN) latest.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.RW_REFLECTIVITY,
start_date=DwdRadarDate.LATEST,
)
results = list(request.query())
if len(results) == 0:
raise pytest.skip("Data currently not available")
buffer = results[0][1]
payload = buffer.getvalue()
month_year = datetime.utcnow().strftime("%m%y")
header = (
f"RW......10000{month_year}"
f"BY16201..VS 3SW ......PR E-01INT 60GP 900x 900MF 00000001MS "
f"..<{station_reference_pattern_unsorted}>"
)
assert re.match(bytes(header, encoding="ascii"), payload[:160])
@pytest.mark.remote
def test_radar_request_site_latest_dx_reflectivity():
"""
Example for testing radar SITES latest.
"""
request = DwdRadarValues(
parameter=DwdRadarParameter.DX_REFLECTIVITY,
start_date=DwdRadarDate.LATEST,
site=DwdRadarSite.BOO,
)
buffer = next(request.query())[1]
payload = buffer.getvalue()
timestamp_aligned = round_minutes(datetime.utcnow(), 5)
month_year = timestamp_aligned.strftime("%m%y")
header = f"DX......10132{month_year}BY.....VS 2CO0CD4CS0EP0.80.80.80.80.80.80.80.8MS" # noqa:E501,B950
assert re.match(bytes(header, encoding="ascii"), payload[:160])
| 30.247191
| 108
| 0.69688
| 0
| 0
| 0
| 0
| 2,150
| 0.798663
| 0
| 0
| 798
| 0.296434
|
6c5274b4da8bf2db8410e4efcd81dcd874ad4000
| 710
|
py
|
Python
|
tests/conftest.py
|
transferwise/cloudflare-exporter
|
d5efd4e9068bf9896a16ec6913d3345e3754d7c8
|
[
"Apache-2.0"
] | 1
|
2021-08-06T15:09:26.000Z
|
2021-08-06T15:09:26.000Z
|
tests/conftest.py
|
transferwise/cloudflare-exporter
|
d5efd4e9068bf9896a16ec6913d3345e3754d7c8
|
[
"Apache-2.0"
] | 16
|
2021-09-20T04:10:29.000Z
|
2022-03-14T04:26:01.000Z
|
tests/conftest.py
|
transferwise/cloudflare-exporter
|
d5efd4e9068bf9896a16ec6913d3345e3754d7c8
|
[
"Apache-2.0"
] | 2
|
2021-08-21T18:48:15.000Z
|
2021-11-19T16:52:25.000Z
|
# -*- coding: utf-8 -*-
import pytest
import json
from pathlib import Path
@pytest.fixture
def accounts_httpRequests1hGroupsFixture(scope="session"):
with open("tests/data/accounts/httpRequests1hGroups.json") as data:
res = json.load(data)
return res
@pytest.fixture
def zones_httpRequests1hGroupsFixture(scope="session"):
with open("tests/data/zones/httpRequests1hGroups.json") as data:
res = json.load(data)
return res
@pytest.fixture
def test_fixture(scope="session"):
with open("cloudflare_exporter/gql/accounts.httpRequests1hGroups.graphql") as data:
# query = data.read()
query = "".join(line.rstrip().lstrip() for line in data)
return query
| 26.296296
| 87
| 0.712676
| 0
| 0
| 0
| 0
| 626
| 0.88169
| 0
| 0
| 227
| 0.319718
|
6c5353e05ae0337f97754129d22ee251e890227f
| 4,529
|
py
|
Python
|
scripts/delay_analysis.py
|
welvin21/pysimt
|
6250b33dc518b3195da4fc9cc8d32ba7ada958c0
|
[
"MIT"
] | 34
|
2020-09-21T10:49:57.000Z
|
2022-01-08T04:50:42.000Z
|
scripts/delay_analysis.py
|
welvin21/pysimt
|
6250b33dc518b3195da4fc9cc8d32ba7ada958c0
|
[
"MIT"
] | 2
|
2021-01-08T03:52:51.000Z
|
2021-09-10T07:45:05.000Z
|
scripts/delay_analysis.py
|
welvin21/pysimt
|
6250b33dc518b3195da4fc9cc8d32ba7ada958c0
|
[
"MIT"
] | 5
|
2021-04-23T09:30:51.000Z
|
2022-01-09T08:40:45.000Z
|
#!/usr/bin/env python
import os
import sys
import glob
import argparse
from pathlib import Path
from collections import defaultdict
from hashlib import sha1
import numpy as np
import sacrebleu
import tabulate
from pysimt.metrics.simnmt import AVPScorer, AVLScorer, CWMScorer, CWXScorer
"""This script should be run from within the parent folder where each pysimt
experiment resides."""
def read_lines_from_file(fname):
lines = []
with open(fname) as f:
for line in f:
lines.append(line.strip())
return lines
def compute_bleu(fname, refs):
hyps = open(fname).read()
hashsum = sha1(hyps.encode('utf-8')).hexdigest()
parent = fname.parent
cached_bleu = parent / f'.{fname.name}__{hashsum}'
if os.path.exists(cached_bleu):
return float(open(cached_bleu).read().strip().split()[2])
else:
bleu = sacrebleu.corpus_bleu(
hyps.strip().split('\n'), refs, tokenize='none')
with open(cached_bleu, 'w') as f:
f.write(bleu.format() + '\n')
return float(bleu.format().split()[2])
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='delay-analysis',
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Compute delay metrics for multiple runs",
argument_default=argparse.SUPPRESS)
parser.add_argument('-r', '--ref-file', required=True, type=str,
help='The reference file for BLEU evaluation.')
parser.add_argument('act_files', nargs='+',
help='List of action files')
args = parser.parse_args()
refs = [read_lines_from_file(args.ref_file)]
test_set = Path(args.ref_file).name.split('.')[0]
results = {}
# Automatically fetch .acts files
acts = [Path(p) for p in args.act_files]
# unique experiments i.e. nmt and mmt for example
exps = set([p.parent for p in acts])
scorers = [
AVPScorer(add_trg_eos=False),
AVLScorer(add_trg_eos=False),
#CWMScorer(add_trg_eos=False),
#CWXScorer(add_trg_eos=False),
]
for exp in exps:
# get actions for this experiment
exp_acts = [p for p in acts if p.parent == exp]
parts = [p.name.split('.') for p in exp_acts]
# different run prefixes
runs = list(set([p[0] for p in parts]))
# type of decodings i.e. wait if diff, waitk, etc.
types = list(set([p[2] for p in parts]))
# Evaluate baseline consecutive systems as well
baseline_bleus = []
for run in runs:
hyp_fname = f'{exp}/{run}.{test_set}.gs'
if os.path.exists(hyp_fname):
bleu = compute_bleu(Path(hyp_fname), refs)
baseline_bleus.append(bleu)
else:
baseline_bleus.append(-1)
results[exp.name] = {m.name: '0' for m in scorers}
results[exp.name]['Q2AVP'] = '0'
baseline_bleus = np.array(baseline_bleus)
results[exp.name]['BLEU'] = f'{baseline_bleus.mean():2.2f} ({baseline_bleus.std():.4f})'
# Evaluate each decoding type and keep multiple run scores
for typ in types:
scores = defaultdict(list)
for run in runs:
act_fname = f'{exp}/{run}.{test_set}.{typ}.acts'
hyp_fname = f'{exp}/{run}.{test_set}.{typ}.gs'
# Compute BLEU
bleu = compute_bleu(Path(hyp_fname), refs)
scores['BLEU'].append(bleu)
if os.path.exists(act_fname):
# Compute delay metrics
run_scores = [s.compute_from_file(act_fname) for s in scorers]
for sc in run_scores:
scores[sc.name].append(sc.score)
scores['Q2AVP'] = bleu / scores['AVP'][-1]
# aggregate
scores = {k: np.array(v) for k, v in scores.items()}
means = {k: v.mean() for k, v in scores.items()}
sdevs = {k: v.std() for k, v in scores.items()}
str_scores = {m: f'{means[m]:4.2f} ({sdevs[m]:.2f})' for m in scores.keys()}
results[f'{exp.name}_{typ}'] = str_scores
headers = ['Name'] + [sc.name for sc in scorers] + ['BLEU', 'Q2AVP']
results = [[name, *[scores[key] for key in headers[1:]]] for name, scores in results.items()]
# alphabetical sort
results = sorted(results, key=lambda x: x[0].rsplit('_', 1)[-1])
# print
print(tabulate.tabulate(results, headers=headers))
| 34.310606
| 97
| 0.59108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,036
| 0.228748
|
6c56a8517956b8fdd74335b60fe24a921ed77b5c
| 3,713
|
py
|
Python
|
canvas_course_site_wizard/views.py
|
Harvard-University-iCommons/django-canvas-course-site-wizard
|
0210849e959407e5a850188f50756eb69b9a4dc2
|
[
"MIT"
] | null | null | null |
canvas_course_site_wizard/views.py
|
Harvard-University-iCommons/django-canvas-course-site-wizard
|
0210849e959407e5a850188f50756eb69b9a4dc2
|
[
"MIT"
] | 5
|
2018-05-10T19:49:43.000Z
|
2021-01-29T19:39:34.000Z
|
canvas_course_site_wizard/views.py
|
Harvard-University-iCommons/django-canvas-course-site-wizard
|
0210849e959407e5a850188f50756eb69b9a4dc2
|
[
"MIT"
] | null | null | null |
import logging
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.shortcuts import redirect
from .controller import (
create_canvas_course,
start_course_template_copy,
finalize_new_canvas_course,
get_canvas_course_url
)
from .mixins import CourseSiteCreationAllowedMixin
from icommons_ui.mixins import CustomErrorPageMixin
from .exceptions import NoTemplateExistsForSchool
from .models import CanvasCourseGenerationJob
from braces.views import LoginRequiredMixin
logger = logging.getLogger(__name__)
class CanvasCourseSiteCreateView(LoginRequiredMixin, CourseSiteCreationAllowedMixin, CustomErrorPageMixin, TemplateView):
"""
Serves up the canvas course site creation wizard on GET and creates the
course site on POST.
"""
template_name = "canvas_course_site_wizard/canvas_wizard.html"
# This is currently the project-level 500 error page, which has RenderableException logic
custom_error_template_name = "500.html"
def post(self, request, *args, **kwargs):
sis_course_id = self.object.pk
sis_user_id = 'sis_user_id:%s' % request.user.username
# we modified create_canvas_course to return two params when it's called as part of
# the single course creation. This is so we can keep track of the job_id
# for the newly created job record. There's a probably a better way to handle this
# but for now, this works
course, course_job_id = create_canvas_course(sis_course_id, request.user.username)
try:
course_generation_job = start_course_template_copy(self.object, course['id'],
request.user.username, course_job_id=course_job_id)
return redirect('ccsw-status', course_generation_job.pk)
except NoTemplateExistsForSchool:
# If there is no template to copy, immediately finalize the new course
# (i.e. run through remaining post-async job steps)
course_url = finalize_new_canvas_course(course['id'], sis_course_id, sis_user_id)
job = CanvasCourseGenerationJob.objects.get(pk=course_job_id)
job.update_workflow_state(CanvasCourseGenerationJob.STATUS_FINALIZED)
return redirect(course_url)
class CanvasCourseSiteStatusView(LoginRequiredMixin, DetailView):
""" Displays status of course creation job, including progress and result of template copy and finalization """
template_name = "canvas_course_site_wizard/status.html"
model = CanvasCourseGenerationJob
context_object_name = 'content_migration_job'
def get_context_data(self, **kwargs):
"""
get_context_data allows us to pass additional values to the view. In this case we are passing in:
- the canvas course url for a successfully completed job (or None if it hasn't successfully completed)
- simplified job progress status indicators for the template to display success/failure messages
"""
context = super(CanvasCourseSiteStatusView, self).get_context_data(**kwargs)
logger.debug('Rendering status page for course generation job %s' % self.object)
context['canvas_course_url'] = get_canvas_course_url(canvas_course_id=self.object.canvas_course_id)
context['job_failed'] = self.object.workflow_state in [
CanvasCourseGenerationJob.STATUS_FAILED,
CanvasCourseGenerationJob.STATUS_SETUP_FAILED,
CanvasCourseGenerationJob.STATUS_FINALIZE_FAILED
]
context['job_succeeded'] = self.object.workflow_state in [CanvasCourseGenerationJob.STATUS_FINALIZED]
return context
| 51.569444
| 121
| 0.736062
| 3,128
| 0.842445
| 0
| 0
| 0
| 0
| 0
| 0
| 1,285
| 0.346081
|
6c58884fde7690dcd1123dcef567073872ba2ad9
| 7,389
|
py
|
Python
|
brie/utils/count.py
|
huangyh09/brie
|
59563baafcdb95d1d75a81203e5cc29983f66c2f
|
[
"Apache-2.0"
] | 38
|
2017-01-06T00:18:46.000Z
|
2022-01-25T19:44:10.000Z
|
brie/utils/count.py
|
huangyh09/brie
|
59563baafcdb95d1d75a81203e5cc29983f66c2f
|
[
"Apache-2.0"
] | 28
|
2017-01-11T09:12:57.000Z
|
2022-02-14T14:53:48.000Z
|
brie/utils/count.py
|
huangyh09/brie
|
59563baafcdb95d1d75a81203e5cc29983f66c2f
|
[
"Apache-2.0"
] | 12
|
2018-02-13T20:23:00.000Z
|
2022-01-05T18:39:19.000Z
|
import sys
import numpy as np
from .sam_utils import load_samfile, fetch_reads
def _check_SE_event(gene):
"""Check SE event"""
if (len(gene.trans) != 2 or
gene.trans[0].exons.shape[0] != 3 or
gene.trans[1].exons.shape[0] != 2 or
np.mean(gene.trans[0].exons[[0, 2], :] ==
gene.trans[1].exons) != 1):
return False
else:
return True
def _get_segment(exons, read):
"""Get the length of segments by devidinig a read into exons.
The segments include one for each exon and two edges.
"""
if read is None:
return None
_seglens = [0] * (exons.shape[0] + 2)
_seglens[0] = np.sum(read.positions < exons[0, 0])
_seglens[-1] = np.sum(read.positions > exons[-1, -1])
for i in range(exons.shape[0]):
_seglens[i + 1] = np.sum(
(read.positions >= exons[i, 0]) * (read.positions <= exons[i, 1]))
return _seglens
def check_reads_compatible(transcript, reads, edge_hang=10, junc_hang=2):
"""Check if reads are compatible with a transcript
"""
is_compatible = [True] * len(reads)
for i in range(len(reads)):
_segs = _get_segment(transcript.exons, reads[i])
# check mismatch to regions not in this transcript
if len(reads[i].positions) - sum(_segs) >= junc_hang:
is_compatible[i] = False
continue
# check if edge hang is too short
if (_segs[0] > 0 or _segs[-1] > 0) and sum(_segs[1:-1]) < edge_hang:
is_compatible[i] = False
continue
# check if exon has been skipped
if len(_segs) > 4:
for j in range(2, len(_segs) - 2):
if (_segs[j-1] >= junc_hang and _segs[j+1] >= junc_hang and
transcript.exons[j-1, 1] - transcript.exons[j-1, 0] -
_segs[j] >= junc_hang):
is_compatible[i] = False
break
return np.array(is_compatible)
def SE_reads_count(gene, samFile, edge_hang=10, junc_hang=2, **kwargs):
"""Count the categorical reads mapped to a splicing event
rm_duplicate=True, inner_only=True,
mapq_min=0, mismatch_max=5, rlen_min=1, is_mated=True
"""
# Check SE event
if _check_SE_event(gene) == False:
print("This is not exon-skipping event!")
exit()
# Fetch reads (TODO: customise fetch_reads function, e.g., FLAG)
reads = fetch_reads(samFile, gene.chrom, gene.start, gene.stop, **kwargs)
# Check reads compatible
is_isoform1 = check_reads_compatible(gene.trans[0], reads["reads1"])
is_isoform2 = check_reads_compatible(gene.trans[1], reads["reads1"])
if len(reads["reads2"]) > 0:
is_isoform1 *= check_reads_compatible(gene.trans[0], reads["reads2"])
is_isoform2 *= check_reads_compatible(gene.trans[1], reads["reads2"])
is_isoform1 = np.append(is_isoform1,
check_reads_compatible(gene.trans[0], reads["reads1u"]))
is_isoform2 = np.append(is_isoform2,
check_reads_compatible(gene.trans[1], reads["reads1u"]))
is_isoform1 = np.append(is_isoform1,
check_reads_compatible(gene.trans[0], reads["reads2u"]))
is_isoform2 = np.append(is_isoform2,
check_reads_compatible(gene.trans[1], reads["reads2u"]))
# return Reads matrix
Rmat = np.zeros((len(is_isoform1), 2), dtype=bool)
Rmat[:, 0] = is_isoform1
Rmat[:, 1] = is_isoform2
return Rmat
def get_count_matrix(genes, sam_file, sam_num, edge_hang=10, junc_hang=2):
samFile = load_samfile(sam_file)
RV = []
for g in range(len(genes)):
_Rmat = SE_reads_count(genes[g], samFile, edge_hang=10, junc_hang=2,
rm_duplicate=True, inner_only=False, mapq_min=0, mismatch_max=5,
rlen_min=1, is_mated=True)
if _Rmat.shape[0] == 0:
continue
K = 2**(np.arange(_Rmat.shape[1]))
code_id, code_cnt = np.unique(np.dot(_Rmat, K), return_counts=True)
count_dict = {}
for i in range(len(code_id)):
count_dict["%d" %(code_id[i])] = code_cnt[i]
RV.append("%d\t%d\t%s" %(sam_num + 1, g + 1, str(count_dict)))
RV_line = ""
if len(RV) > 0:
RV_line = "\n".join(RV) + "\n"
return RV_line
def SE_probability(gene, rlen=75, edge_hang=10, junc_hang=2):
"""Get read categorical probability of each isoform.
In exon-skipping (SE) event, there are two isoform:
isoform1 for exon inclusion and isoform2 for exon exclusion.
Here, we only treat single-end reads. For paired-end reads,
we treat it as the single-end by only using the most informative
mate, namely the mate mapped to least number of isoform(s).
isoform1: l1 + l2 + l3 + rlen - 2 * edge_hang
p1: l2 + rlen - 2 * junc_hang
p3: l1 + l3 - 2 * edge_hang + 2 * junc_hang
isoform2: l1 + l3 + rlen - 2 * edge_hang
p1: rlen - 2 * junc_hang
p3: l1 + l3 - 2 * edge_hang + 2 * junc_hang
"""
# check SE event
if _check_SE_event(gene) == False:
print("This is not exon-skipping event: %s! %(gene.geneID)")
exit()
l1, l2, l3 = gene.trans[0].exons[:, 1] - gene.trans[0].exons[:, 0]
prob_mat = np.zeros((2, 3))
# Isoform 1
len_isoform1 = l1 + l2 + l3 + rlen - 2 * edge_hang
prob_mat[0, 0] = (l2 + rlen - 2 * junc_hang) / len_isoform1
prob_mat[0, 2] = (l1 + l3 - 2 * edge_hang + 2 * junc_hang) / len_isoform1
# Isoform 2
len_isoform2 = l1 + l3 + rlen - 2 * edge_hang
prob_mat[1, 1] = (rlen - 2 * junc_hang) / len_isoform2
prob_mat[1, 2] = (l1 + l3 - 2 * edge_hang + 2 * junc_hang) / len_isoform2
return prob_mat
def SE_effLen(gene, rlen=75, edge_hang=10, junc_hang=2):
"""Get effective length matrix for three read categories from two isoforms.
In exon-skipping (SE) event, there are two isoform:
isoform1 for exon inclusion and isoform2 for exon exclusion.
and three read groups:
group1: uniquely from isoform1
group2: uniquely from isoform2
group3: ambiguous identity
Here, we only treat single-end reads. For paired-end reads,
we treat it as the single-end by only using the most informative
mate, namely the mate mapped to least number of isoform(s).
isoform1: l1 + l2 + l3 + rlen - 2 * edge_hang
read group1: l2 + rlen - 2 * junc_hang
read group3: l1 + l3 - 2 * edge_hang + 2 * junc_hang
isoform2: l1 + l3 + rlen - 2 * edge_hang
read group2: rlen - 2 * junc_hang
read group3: l1 + l3 - 2 * edge_hang + 2 * junc_hang
"""
# check SE event
if _check_SE_event(gene) == False:
print("This is not exon-skipping event: %s! %(gene.geneID)")
exit()
l1, l2, l3 = gene.trans[0].exons[:, 1] - gene.trans[0].exons[:, 0]
isoLen_mat = np.zeros((2, 3))
# isoform length
len_isoform1 = l1 + l2 + l3 + rlen - 2 * edge_hang
len_isoform2 = l1 + l3 + rlen - 2 * edge_hang
# segments
isoLen_mat[0, 0] = l2 + rlen - 2 * junc_hang
isoLen_mat[1, 1] = rlen - 2 * junc_hang
isoLen_mat[0, 2] = l1 + l3 - 2 * edge_hang + 2 * junc_hang
isoLen_mat[1, 2] = l1 + l3 - 2 * edge_hang + 2 * junc_hang
# prob_mat = isoLen_mat / isoLen_mat.sum(1, keepdims=True)
return isoLen_mat
| 35.354067
| 79
| 0.603194
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,497
| 0.337935
|
6c5b5d2beb7892b3713dc1291924921532e74885
| 1,795
|
py
|
Python
|
encommon/tests/test_times.py
|
enasisnetwork/encommon-py
|
c2bb1412171c84fe2917a23b535a6db1b5f523c1
|
[
"MIT"
] | null | null | null |
encommon/tests/test_times.py
|
enasisnetwork/encommon-py
|
c2bb1412171c84fe2917a23b535a6db1b5f523c1
|
[
"MIT"
] | null | null | null |
encommon/tests/test_times.py
|
enasisnetwork/encommon-py
|
c2bb1412171c84fe2917a23b535a6db1b5f523c1
|
[
"MIT"
] | null | null | null |
#==============================================================================#
# Enasis Network Common Libraries #
# Python Functions Time Processing #
#==============================================================================#
# Primary Functions for Time Processing #
# : - - - - - - - - - - - - - - - - - - -- - - - - - - - - - - - - - - - - - - #
# : Standard Time Converting timeformat #
#==============================================================================#
#------------------------------------------------------------------------------#
# Primary Functions for Time Processing #
#------------------------------------------------------------------------------#
#
#~~ Standard Time Converting ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Conditionally perform the conversions to and from epoch and timestamp string
#-----------------------------------------------------------------------------
def test_timeformat():
#
# Import the module and functions relevant to this particular set of tests
from encommon.times import timeformat
#
# Initial section for instantizing variables expected by remaining routine
epoch = 1558763424
stamp = "2019-05-25T05:50:24"
#
# Assert the relevant conditions indicating either test success or failure
assert timeformat(epoch, "%Y-%m-%dT%H:%M:%S")[1] == stamp
assert timeformat(stamp, "%Y-%m-%dT%H:%M:%S")[0] == epoch
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#------------------------------------------------------------------------------#
| 54.393939
| 80
| 0.325905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,558
| 0.867967
|
6c5bb6b2d92f0865bef01adbf1214af8685dd82e
| 2,661
|
py
|
Python
|
source/dashboard.py
|
R0htg0r/Automatic-comments-for-Instagram-
|
0a4e02d45f02be1462fb44fc6ebf5c8eb11fbd04
|
[
"Apache-2.0"
] | 3
|
2021-04-03T19:39:03.000Z
|
2021-04-06T13:03:43.000Z
|
source/dashboard.py
|
R0htg0r/Automatic-comments-for-Instagram-
|
0a4e02d45f02be1462fb44fc6ebf5c8eb11fbd04
|
[
"Apache-2.0"
] | null | null | null |
source/dashboard.py
|
R0htg0r/Automatic-comments-for-Instagram-
|
0a4e02d45f02be1462fb44fc6ebf5c8eb11fbd04
|
[
"Apache-2.0"
] | null | null | null |
from colorama import Fore, Back, Style, init
import pyautogui
import time
import os
os.system("mode 120, 30")
class Poxtrop():
def __init__(cuspida):
cuspida.settings()
def interface(cuspida):
init()
print(Fore.YELLOW + """
######### ###### ## ## ########### ####### ###### #########
## ## ## ## ## ## ## ### ## ## ## ## ## ## ##
## ## ## ## ## ## ### ## ## ## ## ## ##
## ## ## ## ## ## ### ## ## ## ## ##
## ## ## ## ############ ### ## ## ## ## ##
######## ## ## ############ ### ## ## ## ########
## ## ## ## ## ## ### ## ####### ## ## ## ##
## ## ## ## ## ## ### ## ## ## ## ## ##
## ## ## ## ## ## ### ## ## ## ## ## ##
## ## ###### ## ## ##### ####### ###### ## ##""")
print(Fore.GREEN + """
Informações:
1) Coloque um comentário simples, e pequeno.
2) Defina o tempo em segundos, o recomendado é 10s
Versão: 1.0
""")
def settings(cuspida):
cuspida.interface()
try:
button_message = input(Fore.WHITE + " Comentário: ")
if button_message == "":
pass
button_tempo = int(input(Fore.WHITE + " Segundos: "))
button_clicked = 0
os.system("cls")
cuspida.interface()
print(Fore.YELLOW + " Sua caixa de transporte: ")
while True:
button_clicked += 1
time.sleep(button_tempo)
button_chatImg = pyautogui.locateOnScreen('./img/chat.png')
button_location = pyautogui.center(button_chatImg)
pyautogui.click(button_location)
pyautogui.typewrite(button_message)
pyautogui.press("enter")
print(Fore.GREEN + f" {button_clicked}ª mensagem enviada com sucesso. ")
except(ValueError):
exit()
except(TypeError):
exit()
except(KeyboardInterrupt):
exit()
Poxtrop()
| 42.238095
| 114
| 0.311161
| 2,536
| 0.950525
| 0
| 0
| 0
| 0
| 0
| 0
| 1,441
| 0.540105
|
6c5c369d85c41ace1c62ddc67471055b462a3df1
| 1,527
|
py
|
Python
|
ledshimdemo/display_options.py
|
RatJuggler/led-shim-effects
|
3c63f5f2ce3f35f52e784489deb9212757c18cd2
|
[
"MIT"
] | 1
|
2021-04-17T16:18:14.000Z
|
2021-04-17T16:18:14.000Z
|
ledshimdemo/display_options.py
|
RatJuggler/led-shim-effects
|
3c63f5f2ce3f35f52e784489deb9212757c18cd2
|
[
"MIT"
] | 12
|
2019-07-26T18:01:56.000Z
|
2019-08-31T15:35:17.000Z
|
ledshimdemo/display_options.py
|
RatJuggler/led-shim-demo
|
3c63f5f2ce3f35f52e784489deb9212757c18cd2
|
[
"MIT"
] | null | null | null |
import click
from typing import Callable, List
from .effect_parade import AbstractEffectParade
DISPLAY_OPTIONS = [
click.option('-p', '--parade', type=click.Choice(AbstractEffectParade.get_parade_options()),
help="How the effects are displayed.", default=AbstractEffectParade.get_default_option(),
show_default=True),
click.option('-d', '--duration', type=click.IntRange(1, 180),
help="How long to display each effect for, in seconds (1-180).", default=10, show_default=True),
click.option('-r', '--repeat', type=click.IntRange(1, 240),
help="How many times to run the effects before stopping (1-240).", default=1, show_default=True),
click.option('-b', '--brightness', type=click.IntRange(1, 10),
help="How bright the effects will be (1-10).", default=8, show_default=True),
click.option('-i', '--invert', is_flag=True, help="Change the display orientation.")
]
def add_options(options: List[click.option]) -> Callable:
"""
Create a decorator to apply Click options to a function.
:param options: Click options to be applied
:return: Decorator function
"""
def _add_options(func: Callable):
"""
Apply click options to the supplied function.
:param func: To add click options to.
:return: The function with the click options added.
"""
for option in reversed(options):
func = option(func)
return func
return _add_options
| 41.27027
| 114
| 0.64833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 626
| 0.409954
|
6c5e382a6852be827146dfca1422cff18cd4ad2e
| 587
|
py
|
Python
|
download_data_folder.py
|
MelvinYin/Defined_Proteins
|
75da20be82a47d85d27176db29580ab87d52b670
|
[
"BSD-3-Clause"
] | 2
|
2021-01-05T02:55:57.000Z
|
2021-04-16T15:49:08.000Z
|
download_data_folder.py
|
MelvinYin/Defined_Proteins
|
75da20be82a47d85d27176db29580ab87d52b670
|
[
"BSD-3-Clause"
] | null | null | null |
download_data_folder.py
|
MelvinYin/Defined_Proteins
|
75da20be82a47d85d27176db29580ab87d52b670
|
[
"BSD-3-Clause"
] | 1
|
2021-01-05T08:12:38.000Z
|
2021-01-05T08:12:38.000Z
|
import boto3
import os
import tarfile
if __name__ == "__main__":
s3 = boto3.client('s3', aws_access_key_id="AKIAY6UR252SQUQ3OSWZ",
aws_secret_access_key="08LQj"
"+ryk9SMojG18vERXKKzhNSYk5pLhAjrIAVX")
output_path = "./data.tar.gz"
with open(output_path, 'wb') as f:
s3.download_fileobj('definedproteins', "data.tar.gz", f)
assert os.path.isfile(output_path)
print("Download succeeded")
tar = tarfile.open(output_path, "r:gz")
tar.extractall()
tar.close()
os.remove(output_path)
| 34.529412
| 82
| 0.626917
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 155
| 0.264055
|
6c600ba2b9e8dfbbc98654347a117e7d18a03ded
| 8,247
|
py
|
Python
|
splotch/utils_visium.py
|
adaly/cSplotch
|
c79a5cbd155f2cd5bcc1d8b04b1824923feb1442
|
[
"BSD-3-Clause"
] | 1
|
2021-12-20T16:13:16.000Z
|
2021-12-20T16:13:16.000Z
|
splotch/utils_visium.py
|
adaly/cSplotch
|
c79a5cbd155f2cd5bcc1d8b04b1824923feb1442
|
[
"BSD-3-Clause"
] | null | null | null |
splotch/utils_visium.py
|
adaly/cSplotch
|
c79a5cbd155f2cd5bcc1d8b04b1824923feb1442
|
[
"BSD-3-Clause"
] | null | null | null |
import os, sys
import logging
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.ndimage import label
from .utils import watershed_tissue_sections, get_spot_adjacency_matrix
# Read in a series of Loupe annotation files and return the set of all unique categories.
# NOTE: "Undefined"
def unique_annots_loupe(loupe_files):
all_annots = []
for fh in loupe_files:
df = pd.read_csv(fh, header=0, sep=",")
for a in df.iloc[:,1].values:
if isinstance(a,str) and len(a)>0 and a.lower() != "undefined":
all_annots.append(a)
return sorted(list(set(all_annots)))
# Annotataion matrix from Loupe annotation file
def read_annot_matrix_loupe(loupe_file, position_file, unique_annots):
annots = pd.read_csv(loupe_file, header=0, sep=",")
positions = pd.read_csv(position_file, index_col=0, header=None,
names=["in_tissue", "array_row", "array_col", "pixel_row", "pixel_col"])
annot_matrix = np.zeros((len(unique_annots), len(annots['Barcode'])), dtype=int)
positions_list = []
for i,b in enumerate(annots['Barcode']):
xcoor = positions.loc[b,'array_col']
ycoor = positions.loc[b,'array_row']
positions_list.append('%d_%d' % (xcoor, ycoor))
if annots.iloc[i,1] in unique_annots:
annot_matrix[unique_annots.index(annots.iloc[i,1]),i] = 1
annot_frame = pd.DataFrame(annot_matrix, index=unique_annots, columns=positions_list)
return annot_frame
# Converts from pseudo-hex indexing of Visium (in which xdim is doubled and odd-indexed)
# rows are offset by 1) to standard array indexing with odd rows implicitly shifted.
def pseudo_hex_to_oddr(c):
x,y = c
if int(np.rint(y)) % 2 == 1:
x -= 1
return [int(np.rint(x//2)),int(np.rint(y))]
# Converts from pseudo-hex indexing of Visium (in which xdim is doubled and odd-indexed)
# rows are offset by 1) to Cartesian coordinates where neighbors are separated by unit distance.
def pseudo_hex_to_true_hex(c):
x_arr, y_arr = pseudo_hex_to_oddr(c)
x = x_arr
y = y_arr * np.sqrt(3)/2
if y_arr % 2 == 1:
x += 0.5
return [x,y]
''' Determines connected components by recursively checking neighbors in a hex grid.
bin_oddr_matrix - binary odd-right indexed matrix where 1 indicates annotated spot.
'''
def connected_components_hex(bin_oddr_matrix):
lmat = np.zeros_like(bin_oddr_matrix)
lmax = 0
# Returns immediate neighbors of a coordinate in an odd-right hex grid index.
def neighbors(cor):
N = []
# Spots on even-numbered rows have the following adjacency:
# [[1,1,0],[1,1,1],[1,1,0]]
if cor[1] % 2 == 0:
offsets = [[-1,-1],[0,-1],[-1,0],[1,0],[-1,1],[0,1]]
# Spots on odd-numbered rows have the following adjacency:
# [[0,1,1],[1,1,1],[0,1,1]]
else:
offsets = [[0,-1],[1,-1],[-1,0],[1,0],[0,1],[1,1]]
# Find all valid neighbors (within image bounds and present in binary array).
for o in offsets:
q = np.array(cor) + np.array(o)
if q[0]>=0 and q[1]>=0 and q[0]<bin_oddr_matrix.shape[1] and q[1]<bin_oddr_matrix.shape[0]:
if bin_oddr_matrix[q[1],q[0]] == 1:
N.append(q)
return N
# Find set of all spots connected to a given coordinate.
def neighborhood(cor, nmat):
nmat[cor[1],cor[0]] = True
N = neighbors(cor)
if len(N)==0:
return nmat
for q in N:
if not nmat[q[1],q[0]]:
neighborhood(q, nmat)
return nmat
# Default recursion limit is 999 -- if there are more than 1k spots on grid we want to
# allow for all of them to be traversed.
sys.setrecursionlimit(int(np.sum(bin_oddr_matrix)))
# Determine neighborhood of each unlabled spot, assign a label, and proceed.
for y in range(bin_oddr_matrix.shape[0]):
for x in range(bin_oddr_matrix.shape[1]):
if bin_oddr_matrix[y,x]==1 and lmat[y,x]==0:
nmat = neighborhood([x,y], np.zeros_like(bin_oddr_matrix, dtype=bool))
lmax += 1
lmat[nmat] = lmax
return lmat, lmax
''' Analog of detect_tissue_sections for hexagonally packed ST grids (Visium)
'''
def detect_tissue_sections_hex(coordinates, check_overlap=False, threshold=120):
# Convert from spatial hexagonal coordinates to odd-right indexing:
oddr_indices = np.array(list(map(pseudo_hex_to_oddr, coordinates)))
xdim, ydim = 64, 78 # Visium arrays have 78 rows of 64 spots each
bin_oddr_matrix = np.zeros((ydim,xdim))
for ind in oddr_indices:
bin_oddr_matrix[ind[1],ind[0]]=1
labels, n_labels = connected_components_hex(bin_oddr_matrix)
''' From here on, copy-pasta from utils.detect_tissue_section for removing small components
and detecting overlap.
'''
# get the labels of original spots (before dilation)
unique_labels,unique_labels_counts = np.unique(labels*bin_oddr_matrix,return_counts=True)
logging.info('Found %d candidate tissue sections'%(unique_labels.max()+1))
# this is used to label new tissue sections obtained by watershedding
max_label = unique_labels.max()+1
# let us see if there are any tissue sections with unexpected many spots
if check_overlap:
for unique_label,unique_label_counts in zip(unique_labels,unique_labels_counts):
# skip background
if unique_label == 0:
continue
# most likely two tissue sections are slightly overlapping
elif unique_label_counts >= threshold:
logging.warning('Tissue section has %d spots. Let us try to break the tissue section into two.'%(unique_label_counts))
labels = watershed_tissue_sections(unique_label,labels,max_label)
max_label = max_label + 1
unique_labels,unique_labels_counts = np.unique(labels*bin_oddr_matrix,return_counts=True)
# discard tissue sections with less than 10 spots
for idx in range(0,len(unique_labels_counts)):
if unique_labels_counts[idx] < 10:
labels[labels == unique_labels[idx]] = 0
spots_labeled = labels*bin_oddr_matrix
# get labels of detected tissue sections
# and discard skip the background class
unique_labels = np.unique(spots_labeled)
unique_labels = unique_labels[unique_labels > 0]
logging.info('Keeping %d tissue sections'%(len(unique_labels)))
return unique_labels, spots_labeled
''' Create a boolean vector indicating which spots from the coordinate list belong to the
tissue section being considered (tissue_idx, spots_tissue_section_labeled obtained by
connected component analysis in detect_tissue_sections_hex).
'''
def get_tissue_section_spots_hex(tissue_idx, array_coordinates_float, spots_tissue_section_labeled):
tissue_section_spots = np.zeros(array_coordinates_float.shape[0],dtype=bool)
for n, chex in enumerate(array_coordinates_float):
cor = pseudo_hex_to_oddr(chex)
if spots_tissue_section_labeled[cor[1],cor[0]] == tissue_idx:
tissue_section_spots[n] = True
return tissue_section_spots
''' Return spot adjacency matrix given a list of coordinates in pseudo-hex:
'''
def get_spot_adjacency_matrix_hex(coordinates):
cartesian_coords = np.array(list(map(pseudo_hex_to_true_hex, coordinates)))
return get_spot_adjacency_matrix(cartesian_coords)
from scipy.ndimage.measurements import label
from splotch.utils import read_array, filter_arrays, detect_tissue_sections, get_tissue_section_spots
import glob
if __name__ == "__main__":
annot_files = glob.glob('../data/Visium_test/*.csv')
aars = unique_annots_loupe(annot_files)
loupe_file = '../data/Visium_test/V014-CGND-MA-00765-A_loupe_AARs.csv'
position_file = '../data/Visium_test/V014-CGND-MA-00765-A/outs/spatial/tissue_positions_list.csv'
annot_frame = read_annot_matrix_loupe(loupe_file, position_file, aars)
array_coordinates_float = np.array([list(map(float, c.split("_"))) for c in annot_frame.columns.values])
unique_labels, spots_labeled = detect_tissue_sections_hex(array_coordinates_float, True, 600)
plt.figure()
plt.imshow(spots_labeled)
plt.show()
for tissue_idx in unique_labels:
tissue_section_spots = get_tissue_section_spots_hex(tissue_idx,array_coordinates_float,
spots_labeled)
tissue_section_coordinates_float = array_coordinates_float[tissue_section_spots]
tissue_section_coordinates_string = ["%.2f_%.2f" % (c[0],c[1]) for c in tissue_section_coordinates_float]
tissue_section_W = get_spot_adjacency_matrix_hex(tissue_section_coordinates_float)
print(np.sum(tissue_section_W))
df = pd.DataFrame(tissue_section_W, index=tissue_section_coordinates_string,
columns=tissue_section_coordinates_string)
| 36.0131
| 122
| 0.751789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,754
| 0.33394
|
6c609ad8257f94c3be0be69725b48962c792c7f1
| 1,729
|
py
|
Python
|
floa/routes.py
|
rsutton/loa
|
31ca8cc3f7be011b21f22ed2ce509d135a4b866b
|
[
"MIT"
] | null | null | null |
floa/routes.py
|
rsutton/loa
|
31ca8cc3f7be011b21f22ed2ce509d135a4b866b
|
[
"MIT"
] | null | null | null |
floa/routes.py
|
rsutton/loa
|
31ca8cc3f7be011b21f22ed2ce509d135a4b866b
|
[
"MIT"
] | null | null | null |
from flask import (
Blueprint,
render_template,
request,
session,
current_app as app
)
from flask_login import current_user
from floa.extensions import loa
from floa.models.library import Library
bp = Blueprint(
name='home',
import_name=__name__,
url_prefix="/"
)
@app.errorhandler(404)
def handle_404(err):
return render_template('404.html'), 404
@app.errorhandler(500)
def handle_500(err):
return render_template('500.html'), 500
@app.context_processor
def context_process():
last_update = loa.last_update
catalog_count = len(loa.catalog)
return dict(
last_update=last_update,
catalog_count=catalog_count,
session_library=session['LIBRARY'])
@bp.route("/")
def home():
if current_user.is_authenticated:
if loa.time_for_update():
session['LIBRARY'] = Library(library=current_user.library.library)\
.update(loa.catalog).library
else:
session['LIBRARY'] = current_user.library.library
else:
if 'LIBRARY' not in session:
session['LIBRARY'] = Library().update(loa.catalog).library
return render_template(
'home.html',
data=dict(catalog=loa.catalog)
)
@bp.route("/_update/item", methods=["POST"])
def update_book_status():
# create library list from the session object
library = Library(library=session['LIBRARY'])
library.set_status(
id=request.json['id'],
status=request.json['status']
)
# save updated library to session
session['LIBRARY'] = library.library
if current_user.is_authenticated:
current_user.library = library
current_user.save()
return "OK"
| 24.7
| 79
| 0.657606
| 0
| 0
| 0
| 0
| 1,414
| 0.817814
| 0
| 0
| 221
| 0.12782
|
6c619fe8bbdf105e5a1586be4e70bb3d3697916a
| 3,496
|
py
|
Python
|
api/async/__init__.py
|
lampwins/orangengine-ui
|
8c864cd297176aa0ff9ead9682f2085f9fd3f1c0
|
[
"MIT"
] | 1
|
2017-10-28T00:21:43.000Z
|
2017-10-28T00:21:43.000Z
|
api/async/__init__.py
|
lampwins/orangengine-ui
|
8c864cd297176aa0ff9ead9682f2085f9fd3f1c0
|
[
"MIT"
] | null | null | null |
api/async/__init__.py
|
lampwins/orangengine-ui
|
8c864cd297176aa0ff9ead9682f2085f9fd3f1c0
|
[
"MIT"
] | 4
|
2017-01-26T23:31:32.000Z
|
2019-04-17T14:02:00.000Z
|
import logging
import orangengine
from api.models import Device as DeviceModel
from celery.utils.log import get_task_logger
from api import debug
celery_logger = get_task_logger(__name__)
if debug:
celery_logger.setLevel(logging.DEBUG)
celery_logger.debug('Enabled Debug mode')
class OEDeviceFactory(object):
"""Device Factory for the orangengine device instances
The factory is responsible for maintaining singlton instances for each device.
It contains public methods for updating (refreshing) the api device models from
the database, and the respective orangengine device instances.
"""
def __init__(self):
self._devices = {}
self._device_models = {}
self._refresh_all_device_models()
celery_logger.debug("*****************************: %s", self)
@staticmethod
def _dispatch_device(device_model):
"""use the device model to dispatch an orangengine device and store it"""
if device_model:
conn_params = {
'host': device_model.hostname,
'username': device_model.username,
'password': device_model.password,
'device_type': device_model.driver,
'apikey': device_model.apikey,
}
celery_logger.info("Dispatching device: %s", device_model.hostname)
return orangengine.dispatch(**conn_params)
def _refresh_device_model(self, hostname):
"""load and override the device model from the database for the given hostname"""
device_model = DeviceModel.query.filter_by(deleted=False, hostname=hostname).first()
if device_model:
self._device_models[hostname] = device_model
return device_model
def _refresh_all_device_models(self):
"""replace all device models and refrsh them"""
self._device_models = {}
device_models = DeviceModel.query.filter_by(deleted=False).all()
if device_models:
for device_model in device_models:
self._device_models[device_model.hostname] = device_model
def _init_device(self, hostname):
celery_logger.debug("init %s", hostname)
device_model = self._device_models.get(hostname)
if device_model is None:
device_model = self._refresh_device_model(hostname)
device = self._dispatch_device(device_model)
self._devices[hostname] = device
return device
def get_device(self, hostname, refresh_none=True):
"""Return the orangengine device singlton instance for the given hostname.
Optionally (by default) refresh the device (and model) if it is not found
"""
celery_logger.debug("getting device %s", hostname)
device = self._devices.get(hostname)
if not device and refresh_none:
device = self._init_device(hostname)
return device
def get_all_device_models(self):
"""Return a list of all device models currently stored
"""
return self._device_models.values()
def get_device_model(self, hostname):
"""Return the device model for a given hostname
"""
return self._device_models.get(hostname)
def delete_device(self, hostname, include_model=True):
"""Delete the orangengine device instance and optionally the model
"""
self._devices.pop(hostname)
if include_model:
self._device_models.pop(hostname)
| 33.615385
| 92
| 0.663043
| 3,203
| 0.91619
| 0
| 0
| 589
| 0.168478
| 0
| 0
| 1,024
| 0.292906
|
6c62a1650704041514fc09b42720dad2d27e5799
| 753
|
py
|
Python
|
app1/migrations/0060_auto_20201222_2131.py
|
vashuteotia123/zbcvit
|
da29b3281ccc87481a264b63c5b6c3a549945f33
|
[
"MIT"
] | 6
|
2021-09-16T16:46:56.000Z
|
2022-02-06T13:00:08.000Z
|
app1/migrations/0060_auto_20201222_2131.py
|
vashuteotia123/zbcvit
|
da29b3281ccc87481a264b63c5b6c3a549945f33
|
[
"MIT"
] | null | null | null |
app1/migrations/0060_auto_20201222_2131.py
|
vashuteotia123/zbcvit
|
da29b3281ccc87481a264b63c5b6c3a549945f33
|
[
"MIT"
] | 1
|
2021-09-14T09:26:58.000Z
|
2021-09-14T09:26:58.000Z
|
# Generated by Django 2.2.7 on 2020-12-22 16:01
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app1', '0059_auto_20201111_2321'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='id',
),
migrations.AlterField(
model_name='event',
name='event_name',
field=models.CharField(max_length=50, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='resources',
name='resource_date_time',
field=models.DateTimeField(default=datetime.datetime(2020, 12, 22, 21, 31, 39, 678304)),
),
]
| 25.965517
| 100
| 0.589641
| 644
| 0.855246
| 0
| 0
| 0
| 0
| 0
| 0
| 139
| 0.184595
|
6c63b62274efc319d7d5ff5ab63d36ad70596229
| 240
|
py
|
Python
|
stacks/tests/test_decode_string.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | null | null | null |
stacks/tests/test_decode_string.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | null | null | null |
stacks/tests/test_decode_string.py
|
ahcode0919/python-ds-algorithms
|
0d617b78c50b6c18da40d9fa101438749bfc82e1
|
[
"MIT"
] | 3
|
2020-10-07T20:24:45.000Z
|
2020-12-16T04:53:19.000Z
|
from stacks.decode_string import decode_string
def test_decode_string():
assert decode_string("3[a]2[bc]") == "aaabcbc"
assert decode_string("3[a2[c]]") == "accaccacc"
assert decode_string("2[abc]3[cd]ef") == "abcabccdcdcdef"
| 30
| 61
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 72
| 0.3
|
6c65225c18ab757299cb8993ab36ee8beae952c4
| 239
|
py
|
Python
|
receiver_udp.py
|
pabitra0177/ITR-internship
|
3d1909b9e4a1b980ad4f6cb4b8c1fb17811c2d75
|
[
"MIT"
] | null | null | null |
receiver_udp.py
|
pabitra0177/ITR-internship
|
3d1909b9e4a1b980ad4f6cb4b8c1fb17811c2d75
|
[
"MIT"
] | null | null | null |
receiver_udp.py
|
pabitra0177/ITR-internship
|
3d1909b9e4a1b980ad4f6cb4b8c1fb17811c2d75
|
[
"MIT"
] | null | null | null |
#
import socket
ip = "127.0.0.1"
port = 5001
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.bind((ip,port))
i=0
while True:
data, addr = s.recvfrom(1024)
print "Received from ",addr
print "Received ",data
s.close()
| 14.058824
| 51
| 0.656904
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 0.167364
|
6c6938ad771712cddf43056b1ad20a6d5a62ca66
| 4,240
|
py
|
Python
|
yolov3/utils/checkpoint.py
|
hysts/pytorch_yolov3
|
6d4c7a1e42d366894effac8ca52f7116f891b5ab
|
[
"MIT"
] | 13
|
2019-03-22T15:22:22.000Z
|
2021-09-30T21:15:37.000Z
|
yolov3/utils/checkpoint.py
|
hysts/pytorch_yolov3
|
6d4c7a1e42d366894effac8ca52f7116f891b5ab
|
[
"MIT"
] | null | null | null |
yolov3/utils/checkpoint.py
|
hysts/pytorch_yolov3
|
6d4c7a1e42d366894effac8ca52f7116f891b5ab
|
[
"MIT"
] | null | null | null |
import copy
import logging
import pathlib
import torch
import torch.nn as nn
from yolov3.config import get_default_config
from yolov3.utils.config_node import ConfigNode
class CheckPointer:
def __init__(
self,
model,
optimizer=None,
scheduler=None,
checkpoint_dir=None,
logger=None,
distributed_rank=0,
):
self.model = model
self.optimizer = optimizer
self.scheduler = scheduler
self.checkpoint_dir = pathlib.Path(
checkpoint_dir) if checkpoint_dir is not None else None
if logger is None:
logger = logging.getLogger(__name__)
self.logger = logger
self.distributed_rank = distributed_rank
def save(self, name, **kwargs):
if self.checkpoint_dir is None or self.distributed_rank != 0:
return
checkpoint = copy.deepcopy(kwargs)
if isinstance(self.model,
(nn.DataParallel, nn.parallel.DistributedDataParallel)):
checkpoint['model'] = self.model.module.state_dict()
else:
checkpoint['model'] = self.model.state_dict()
if self.optimizer is not None:
checkpoint['optimizer'] = self.optimizer.state_dict()
if self.scheduler is not None:
checkpoint['scheduler'] = self.scheduler.state_dict()
outpath = self.checkpoint_dir / f'{name}.pth'
self.logger.info(f'Saving checkpoint to {outpath.as_posix()}')
torch.save(checkpoint, outpath)
self.tag_last_checkpoint(outpath)
def load(self, path=None, backbone=False):
if path is None and self.has_checkpoint():
path = self.get_checkpoint_filepath()
if isinstance(path, str):
path = pathlib.Path(path)
if path is None or not path.exists():
raise RuntimeError('Checkpoint not found.')
self.logger.info(f'Loading checkpoint from {path.as_posix()}')
checkpoint = self._load_checkpoint(path)
self.load_checkpoint(checkpoint, backbone)
if 'optimizer' in checkpoint.keys() and self.optimizer is not None:
self.logger.info(f'Loading optimizer from {path.as_posix()}')
self.optimizer.load_state_dict(checkpoint['optimizer'])
if 'scheduler' in checkpoint.keys() and self.scheduler is not None:
self.logger.info(f'Loading scheduler from {path.as_posix()}')
self.scheduler.load_state_dict(checkpoint['scheduler'])
default_config = get_default_config()
if 'config' in checkpoint.keys():
config = ConfigNode(checkpoint['config'])
else:
config = default_config
return config, checkpoint.get('iteration', 0)
def has_checkpoint(self):
if self.checkpoint_dir is None:
return False
checkpoint_file = self.checkpoint_dir / 'last_checkpoint'
return checkpoint_file.exists()
def get_checkpoint_filepath(self):
checkpoint_file = self.checkpoint_dir / 'last_checkpoint'
try:
with open(checkpoint_file, 'r') as fin:
last_saved = fin.read()
last_saved = last_saved.strip()
last_saved = self.checkpoint_dir / last_saved
except IOError:
last_saved = None
return last_saved
def tag_last_checkpoint(self, last_filepath):
outfile = self.checkpoint_dir / 'last_checkpoint'
with open(outfile, 'w') as fout:
fout.write(last_filepath.name)
@staticmethod
def _load_checkpoint(path):
return torch.load(path, map_location='cpu')
def load_checkpoint(self, checkpoint, backbone):
if isinstance(self.model,
(nn.DataParallel, nn.parallel.DistributedDataParallel)):
if not backbone:
self.model.module.load_state_dict(checkpoint['model'])
else:
self.model.module.backbone.load_state_dict(checkpoint['model'])
else:
if not backbone:
self.model.load_state_dict(checkpoint['model'])
else:
self.model.backbone.load_state_dict(checkpoint['model'])
| 36.551724
| 79
| 0.624764
| 4,065
| 0.958726
| 0
| 0
| 97
| 0.022877
| 0
| 0
| 407
| 0.095991
|
6c6a82e95bf8ebf0eb518403b616adac59f096b0
| 505
|
py
|
Python
|
autograd/tests/test_z_playground.py
|
pmaederyork/Dragrongrad
|
32794d561f8d0273592ed55d315013eab2c24b8b
|
[
"MIT"
] | 3
|
2018-12-17T16:24:11.000Z
|
2020-06-03T22:40:50.000Z
|
autograd/tests/test_z_playground.py
|
cs207-project-group4/project-repo
|
d5ee88d2a7d16477d816d830ba90d241a05e3b48
|
[
"MIT"
] | 2
|
2018-10-18T17:59:26.000Z
|
2018-12-08T16:06:34.000Z
|
autograd/tests/test_z_playground.py
|
cs207-project-group4/project-repo
|
d5ee88d2a7d16477d816d830ba90d241a05e3b48
|
[
"MIT"
] | 1
|
2019-08-19T06:06:13.000Z
|
2019-08-19T06:06:13.000Z
|
# -*- coding: utf-8 -*-
from autograd.blocks.trigo import sin, cos
from autograd.variable import Variable
import numpy as np
import autograd as ad
from autograd import config
class test():
def __init__(self, x):
self.x=x
def parent(self):
print('parent')
class sub(test):
def __init__(self, x):
super().__init__(x)
def parent(self):
print('child')
super().parent()
t=sub(2)
t.parent()
| 15.78125
| 42
| 0.552475
| 258
| 0.510891
| 0
| 0
| 0
| 0
| 0
| 0
| 38
| 0.075248
|
6c6a90b147afe488a76460582fd0b95042612fc0
| 135
|
py
|
Python
|
PySpace/using_sys.py
|
dralee/LearningRepository
|
4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4
|
[
"Apache-2.0"
] | null | null | null |
PySpace/using_sys.py
|
dralee/LearningRepository
|
4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4
|
[
"Apache-2.0"
] | null | null | null |
PySpace/using_sys.py
|
dralee/LearningRepository
|
4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# 文件名:using_sys.py
import sys
print('命令行参数如下:')
for i in sys.argv:
print(i)
print('\n\nPython路径为:',sys.path,'\n')
| 15
| 37
| 0.674074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 98
| 0.586826
|
6c6f498aea5f5f14a181bf4e682dea6414249ebe
| 1,749
|
py
|
Python
|
gaussian_filter.py
|
baiching/Paper-Implementations
|
56136a88a64885270adbefd6999815a1ad6f56a2
|
[
"MIT"
] | null | null | null |
gaussian_filter.py
|
baiching/Paper-Implementations
|
56136a88a64885270adbefd6999815a1ad6f56a2
|
[
"MIT"
] | null | null | null |
gaussian_filter.py
|
baiching/Paper-Implementations
|
56136a88a64885270adbefd6999815a1ad6f56a2
|
[
"MIT"
] | null | null | null |
import math
import numbers
import torch
from torch import nn
from torch.nn import functional as F
def gaussian_filter(in_channel, out_channel, kernel_size=15, sigma=3):
"""
This method returns 2d gaussian filter
input :
in_channel : Number of input channels
out_channel : Expected number of output channels
kernel_size : size of the filter (H x H)
sigma : sigma
output:
returns : gaussian_filter
"""
# Create a x, y coordinate grid of shape (kernel_size, kernel_size, 2)
x_cord = torch.arange(kernel_size)
x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1)
mean = (kernel_size - 1)/2.
variance = sigma**2.
# Calculate the 2-dimensional gaussian kernel which is
# the product of two gaussian distributions for two different
# variables (in this case called x and y)
gaussian_kernel = (1./(2.*math.pi*variance)) *\
torch.exp(
-torch.sum((xy_grid - mean)**2., dim=-1) /\
(2*variance)
)
# Make sure sum of values in gaussian kernel equals 1.
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
# Reshape to 2d depthwise convolutional weight
gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size)
gaussian_kernel = gaussian_kernel.repeat(in_channel, 1, 1, 1)
gaussian_filter = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
kernel_size=kernel_size, groups=in_channel, bias=False)
gaussian_filter.weight.data = gaussian_kernel
gaussian_filter.weight.requires_grad = False
return gaussian_filter
| 34.98
| 83
| 0.675815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 594
| 0.339623
|
6c6ff29fbade9a404f47dd54164a91e8e0704f4b
| 664
|
py
|
Python
|
opfu/stock.py
|
XavierDingRotman/OptionsFutures
|
bab0de0d66efe39f05e9ddf59460ec76547d9ada
|
[
"Apache-2.0"
] | 1
|
2020-07-05T20:54:15.000Z
|
2020-07-05T20:54:15.000Z
|
opfu/stock.py
|
XavierDingRotman/OptionsFutures
|
bab0de0d66efe39f05e9ddf59460ec76547d9ada
|
[
"Apache-2.0"
] | null | null | null |
opfu/stock.py
|
XavierDingRotman/OptionsFutures
|
bab0de0d66efe39f05e9ddf59460ec76547d9ada
|
[
"Apache-2.0"
] | null | null | null |
from opfu.security import Security
class Stock(Security):
def __init__(self, S, T, is_short=False):
self.S = S
self.K = self.S
self.T = T
Security.__init__(self, is_short, price=0)
def payoff_long(self, P):
return P - self.S
def graph_payoff(self, start=0, end=None, num=100):
if end == None:
end = self.S * 2
Security.graph_payoff(self, start, end, num)
def get_bsm_price(self):
return self.S
def greek_letter(self, greek, dd=0, method="BSM"):
if greek == "delta":
return 1
if greek == "gamma":
return 0
return 0
| 23.714286
| 55
| 0.554217
| 626
| 0.942771
| 0
| 0
| 0
| 0
| 0
| 0
| 19
| 0.028614
|
6c7066dd2f2223bc38f4edca28dbdaad3e0c39bc
| 172
|
py
|
Python
|
ABC103/ABC103a.py
|
VolgaKurvar/AtCoder
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
[
"Unlicense"
] | null | null | null |
ABC103/ABC103a.py
|
VolgaKurvar/AtCoder
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
[
"Unlicense"
] | null | null | null |
ABC103/ABC103a.py
|
VolgaKurvar/AtCoder
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
[
"Unlicense"
] | null | null | null |
# ABC103a
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
a = list(map(int, input().split()))
a.sort(reverse=True)
print(abs(a[1]-a[0])+abs(a[1]-a[2]))
| 19.111111
| 36
| 0.674419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.052326
|
6c707e42c5437ebc563efca0ace739aedca17496
| 283
|
py
|
Python
|
awsthreatprep/config.py
|
cclauss/ThreatPrep
|
b1881be239e7b86d86acc70a207989d459bd9d79
|
[
"MIT"
] | 50
|
2016-08-05T03:33:00.000Z
|
2022-02-16T13:52:15.000Z
|
awsthreatprep/config.py
|
cclauss/ThreatPrep
|
b1881be239e7b86d86acc70a207989d459bd9d79
|
[
"MIT"
] | null | null | null |
awsthreatprep/config.py
|
cclauss/ThreatPrep
|
b1881be239e7b86d86acc70a207989d459bd9d79
|
[
"MIT"
] | 14
|
2017-06-26T02:54:43.000Z
|
2021-11-17T07:38:52.000Z
|
import os
config = {
#iam
'ACCOUNT_INACTIVE_DAYS': 30, #Accounts are inactive if not used for 30 days
'PASSWORD_ROTATION_DAYS': 90, #Paswords should be rotated every 90 days
'ACCESS_KEY_ROTATION_DAYS': 90 #Access Keys should be rotated every 90 days
}
| 31.444444
| 85
| 0.696113
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 208
| 0.734982
|
6c72586f407f6e08ecae9c71f47245060e33b3dd
| 28,356
|
py
|
Python
|
widgets/RichTextCtrl.py
|
iubica/wx-portfolio
|
12101986db72bcaffd9b744d514d6f9f651ad5a1
|
[
"MIT"
] | 3
|
2018-03-19T07:57:10.000Z
|
2021-07-05T08:55:14.000Z
|
widgets/RichTextCtrl.py
|
iubica/wx-portfolio
|
12101986db72bcaffd9b744d514d6f9f651ad5a1
|
[
"MIT"
] | 6
|
2020-03-24T15:40:18.000Z
|
2021-12-13T19:46:09.000Z
|
widgets/RichTextCtrl.py
|
iubica/wx-portfolio
|
12101986db72bcaffd9b744d514d6f9f651ad5a1
|
[
"MIT"
] | 4
|
2018-03-29T21:59:55.000Z
|
2019-12-16T14:56:38.000Z
|
#!/usr/bin/env python
from six import BytesIO
import wx
import wx.richtext as rt
import images
#----------------------------------------------------------------------
class RichTextFrame(wx.Frame):
def __init__(self, *args, **kw):
wx.Frame.__init__(self, *args, **kw)
self.MakeMenuBar()
self.MakeToolBar()
self.CreateStatusBar()
self.SetStatusText("Welcome to wx.richtext.RichTextCtrl!")
self.rtc = rt.RichTextCtrl(self, style=wx.VSCROLL|wx.HSCROLL|wx.NO_BORDER);
wx.CallAfter(self.rtc.SetFocus)
self.rtc.Freeze()
self.rtc.BeginSuppressUndo()
self.rtc.BeginParagraphSpacing(0, 20)
self.rtc.BeginAlignment(wx.TEXT_ALIGNMENT_CENTRE)
self.rtc.BeginBold()
self.rtc.BeginFontSize(14)
self.rtc.WriteText("Welcome to wxRichTextCtrl, a wxWidgets control for editing and presenting styled text and images")
self.rtc.EndFontSize()
self.rtc.Newline()
self.rtc.BeginItalic()
self.rtc.WriteText("by Julian Smart")
self.rtc.EndItalic()
self.rtc.EndBold()
self.rtc.Newline()
self.rtc.WriteImage(images._rt_zebra.GetImage())
self.rtc.EndAlignment()
self.rtc.Newline()
self.rtc.Newline()
self.rtc.WriteText("What can you do with this thing? ")
self.rtc.WriteImage(images._rt_smiley.GetImage())
self.rtc.WriteText(" Well, you can change text ")
self.rtc.BeginTextColour((255, 0, 0))
self.rtc.WriteText("colour, like this red bit.")
self.rtc.EndTextColour()
self.rtc.BeginTextColour((0, 0, 255))
self.rtc.WriteText(" And this blue bit.")
self.rtc.EndTextColour()
self.rtc.WriteText(" Naturally you can make things ")
self.rtc.BeginBold()
self.rtc.WriteText("bold ")
self.rtc.EndBold()
self.rtc.BeginItalic()
self.rtc.WriteText("or italic ")
self.rtc.EndItalic()
self.rtc.BeginUnderline()
self.rtc.WriteText("or underlined.")
self.rtc.EndUnderline()
self.rtc.BeginFontSize(14)
self.rtc.WriteText(" Different font sizes on the same line is allowed, too.")
self.rtc.EndFontSize()
self.rtc.WriteText(" Next we'll show an indented paragraph.")
self.rtc.BeginLeftIndent(60)
self.rtc.Newline()
self.rtc.WriteText("It was in January, the most down-trodden month of an Edinburgh winter. An attractive woman came into the cafe, which is nothing remarkable.")
self.rtc.EndLeftIndent()
self.rtc.Newline()
self.rtc.WriteText("Next, we'll show a first-line indent, achieved using BeginLeftIndent(100, -40).")
self.rtc.BeginLeftIndent(100, -40)
self.rtc.Newline()
self.rtc.WriteText("It was in January, the most down-trodden month of an Edinburgh winter. An attractive woman came into the cafe, which is nothing remarkable.")
self.rtc.EndLeftIndent()
self.rtc.Newline()
self.rtc.WriteText("Numbered bullets are possible, again using sub-indents:")
self.rtc.BeginNumberedBullet(1, 100, 60)
self.rtc.Newline()
self.rtc.WriteText("This is my first item. Note that wxRichTextCtrl doesn't automatically do numbering, but this will be added later.")
self.rtc.EndNumberedBullet()
self.rtc.BeginNumberedBullet(2, 100, 60)
self.rtc.Newline()
self.rtc.WriteText("This is my second item.")
self.rtc.EndNumberedBullet()
self.rtc.Newline()
self.rtc.WriteText("The following paragraph is right-indented:")
self.rtc.BeginRightIndent(200)
self.rtc.Newline()
self.rtc.WriteText("It was in January, the most down-trodden month of an Edinburgh winter. An attractive woman came into the cafe, which is nothing remarkable.")
self.rtc.EndRightIndent()
self.rtc.Newline()
self.rtc.WriteText("The following paragraph is right-aligned with 1.5 line spacing:")
self.rtc.BeginAlignment(wx.TEXT_ALIGNMENT_RIGHT)
self.rtc.BeginLineSpacing(wx.TEXT_ATTR_LINE_SPACING_HALF)
self.rtc.Newline()
self.rtc.WriteText("It was in January, the most down-trodden month of an Edinburgh winter. An attractive woman came into the cafe, which is nothing remarkable.")
self.rtc.EndLineSpacing()
self.rtc.EndAlignment()
self.rtc.Newline()
self.rtc.WriteText("Other notable features of wxRichTextCtrl include:")
self.rtc.BeginSymbolBullet('*', 100, 60)
self.rtc.Newline()
self.rtc.WriteText("Compatibility with wxTextCtrl API")
self.rtc.EndSymbolBullet()
self.rtc.BeginSymbolBullet('*', 100, 60)
self.rtc.Newline()
self.rtc.WriteText("Easy stack-based BeginXXX()...EndXXX() style setting in addition to SetStyle()")
self.rtc.EndSymbolBullet()
self.rtc.BeginSymbolBullet('*', 100, 60)
self.rtc.Newline()
self.rtc.WriteText("XML loading and saving")
self.rtc.EndSymbolBullet()
self.rtc.BeginSymbolBullet('*', 100, 60)
self.rtc.Newline()
self.rtc.WriteText("Undo/Redo, with batching option and Undo suppressing")
self.rtc.EndSymbolBullet()
self.rtc.BeginSymbolBullet('*', 100, 60)
self.rtc.Newline()
self.rtc.WriteText("Clipboard copy and paste")
self.rtc.EndSymbolBullet()
self.rtc.BeginSymbolBullet('*', 100, 60)
self.rtc.Newline()
self.rtc.WriteText("wxRichTextStyleSheet with named character and paragraph styles, and control for applying named styles")
self.rtc.EndSymbolBullet()
self.rtc.BeginSymbolBullet('*', 100, 60)
self.rtc.Newline()
self.rtc.WriteText("A design that can easily be extended to other content types, ultimately with text boxes, tables, controls, and so on")
self.rtc.EndSymbolBullet()
self.rtc.Newline()
self.rtc.WriteText("Note: this sample content was generated programmatically from within the MyFrame constructor in the demo. The images were loaded from inline XPMs. Enjoy wxRichTextCtrl!")
self.rtc.Newline()
self.rtc.Newline()
self.rtc.BeginFontSize(12)
self.rtc.BeginBold()
self.rtc.WriteText("Additional comments by David Woods:")
self.rtc.EndBold()
self.rtc.EndFontSize()
self.rtc.Newline()
self.rtc.WriteText("I find some of the RichTextCtrl method names, as used above, to be misleading. Some character styles are stacked in the RichTextCtrl, and they are removed in the reverse order from how they are added, regardless of the method called. Allow me to demonstrate what I mean.")
self.rtc.Newline()
self.rtc.WriteText('Start with plain text. ')
self.rtc.BeginBold()
self.rtc.WriteText('BeginBold() makes it bold. ')
self.rtc.BeginItalic()
self.rtc.WriteText('BeginItalic() makes it bold-italic. ')
self.rtc.EndBold()
self.rtc.WriteText('EndBold() should make it italic but instead makes it bold. ')
self.rtc.EndItalic()
self.rtc.WriteText('EndItalic() takes us back to plain text. ')
self.rtc.Newline()
self.rtc.WriteText('Start with plain text. ')
self.rtc.BeginBold()
self.rtc.WriteText('BeginBold() makes it bold. ')
self.rtc.BeginUnderline()
self.rtc.WriteText('BeginUnderline() makes it bold-underline. ')
self.rtc.EndBold()
self.rtc.WriteText('EndBold() should make it underline but instead makes it bold. ')
self.rtc.EndUnderline()
self.rtc.WriteText('EndUnderline() takes us back to plain text. ')
self.rtc.Newline()
self.rtc.WriteText('According to Julian, this functions "as expected" because of the way the RichTextCtrl is written. I wrote the SetFontStyle() method here to demonstrate a way to work with overlapping styles that solves this problem.')
self.rtc.Newline()
# Create and initialize text attributes
self.textAttr = rt.RichTextAttr()
self.SetFontStyle(fontColor=wx.Colour(0, 0, 0), fontBgColor=wx.Colour(255, 255, 255), fontFace='Times New Roman', fontSize=10, fontBold=False, fontItalic=False, fontUnderline=False)
self.rtc.WriteText('Start with plain text. ')
self.SetFontStyle(fontBold=True)
self.rtc.WriteText('Bold. ')
self.SetFontStyle(fontItalic=True)
self.rtc.WriteText('Bold-italic. ')
self.SetFontStyle(fontBold=False)
self.rtc.WriteText('Italic. ')
self.SetFontStyle(fontItalic=False)
self.rtc.WriteText('Back to plain text. ')
self.rtc.Newline()
self.rtc.WriteText('Start with plain text. ')
self.SetFontStyle(fontBold=True)
self.rtc.WriteText('Bold. ')
self.SetFontStyle(fontUnderline=True)
self.rtc.WriteText('Bold-Underline. ')
self.SetFontStyle(fontBold=False)
self.rtc.WriteText('Underline. ')
self.SetFontStyle(fontUnderline=False)
self.rtc.WriteText('Back to plain text. ')
self.rtc.Newline()
self.rtc.EndParagraphSpacing()
self.rtc.EndSuppressUndo()
self.rtc.Thaw()
def SetFontStyle(self, fontColor = None, fontBgColor = None, fontFace = None, fontSize = None,
fontBold = None, fontItalic = None, fontUnderline = None):
if fontColor:
self.textAttr.SetTextColour(fontColor)
if fontBgColor:
self.textAttr.SetBackgroundColour(fontBgColor)
if fontFace:
self.textAttr.SetFontFaceName(fontFace)
if fontSize:
self.textAttr.SetFontSize(fontSize)
if fontBold != None:
if fontBold:
self.textAttr.SetFontWeight(wx.FONTWEIGHT_BOLD)
else:
self.textAttr.SetFontWeight(wx.FONTWEIGHT_NORMAL)
if fontItalic != None:
if fontItalic:
self.textAttr.SetFontStyle(wx.FONTSTYLE_ITALIC)
else:
self.textAttr.SetFontStyle(wx.FONTSTYLE_NORMAL)
if fontUnderline != None:
if fontUnderline:
self.textAttr.SetFontUnderlined(True)
else:
self.textAttr.SetFontUnderlined(False)
self.rtc.SetDefaultStyle(self.textAttr)
def OnURL(self, evt):
wx.MessageBox(evt.GetString(), "URL Clicked")
def OnFileOpen(self, evt):
# This gives us a string suitable for the file dialog based on
# the file handlers that are loaded
wildcard, types = rt.RichTextBuffer.GetExtWildcard(save=False)
dlg = wx.FileDialog(self, "Choose a filename",
wildcard=wildcard,
style=wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
if path:
fileType = types[dlg.GetFilterIndex()]
self.rtc.LoadFile(path, fileType)
dlg.Destroy()
def OnFileSave(self, evt):
if not self.rtc.GetFilename():
self.OnFileSaveAs(evt)
return
self.rtc.SaveFile()
def OnFileSaveAs(self, evt):
wildcard, types = rt.RichTextBuffer.GetExtWildcard(save=True)
dlg = wx.FileDialog(self, "Choose a filename",
wildcard=wildcard,
style=wx.FD_SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
if path:
fileType = types[dlg.GetFilterIndex()]
ext = rt.RichTextBuffer.FindHandlerByType(fileType).GetExtension()
if not path.endswith(ext):
path += '.' + ext
self.rtc.SaveFile(path, fileType)
dlg.Destroy()
def OnFileViewHTML(self, evt):
# Get an instance of the html file handler, use it to save the
# document to a StringIO stream, and then display the
# resulting html text in a dialog with a HtmlWindow.
handler = rt.RichTextHTMLHandler()
handler.SetFlags(rt.RICHTEXT_HANDLER_SAVE_IMAGES_TO_MEMORY)
handler.SetFontSizeMapping([7,9,11,12,14,22,100])
stream = BytesIO()
if not handler.SaveStream(self.rtc.GetBuffer(), stream):
return
import wx.html
dlg = wx.Dialog(self, title="HTML", style=wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER)
html = wx.html.HtmlWindow(dlg, size=(500,400), style=wx.BORDER_SUNKEN)
html.SetPage(stream.getvalue())
btn = wx.Button(dlg, wx.ID_CANCEL)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(html, 1, wx.ALL|wx.EXPAND, 5)
sizer.Add(btn, 0, wx.ALL|wx.CENTER, 10)
dlg.SetSizer(sizer)
sizer.Fit(dlg)
dlg.ShowModal()
handler.DeleteTemporaryImages()
def OnFileExit(self, evt):
self.Close(True)
def OnBold(self, evt):
self.rtc.ApplyBoldToSelection()
def OnItalic(self, evt):
self.rtc.ApplyItalicToSelection()
def OnUnderline(self, evt):
self.rtc.ApplyUnderlineToSelection()
def OnAlignLeft(self, evt):
self.rtc.ApplyAlignmentToSelection(wx.TEXT_ALIGNMENT_LEFT)
def OnAlignRight(self, evt):
self.rtc.ApplyAlignmentToSelection(wx.TEXT_ALIGNMENT_RIGHT)
def OnAlignCenter(self, evt):
self.rtc.ApplyAlignmentToSelection(wx.TEXT_ALIGNMENT_CENTRE)
def OnIndentMore(self, evt):
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_LEFT_INDENT)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
attr.SetLeftIndent(attr.GetLeftIndent() + 100)
attr.SetFlags(wx.TEXT_ATTR_LEFT_INDENT)
self.rtc.SetStyle(r, attr)
def OnIndentLess(self, evt):
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_LEFT_INDENT)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
if attr.GetLeftIndent() >= 100:
attr.SetLeftIndent(attr.GetLeftIndent() - 100)
attr.SetFlags(wx.TEXT_ATTR_LEFT_INDENT)
self.rtc.SetStyle(r, attr)
def OnParagraphSpacingMore(self, evt):
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_PARA_SPACING_AFTER)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
attr.SetParagraphSpacingAfter(attr.GetParagraphSpacingAfter() + 20);
attr.SetFlags(wx.TEXT_ATTR_PARA_SPACING_AFTER)
self.rtc.SetStyle(r, attr)
def OnParagraphSpacingLess(self, evt):
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_PARA_SPACING_AFTER)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
if attr.GetParagraphSpacingAfter() >= 20:
attr.SetParagraphSpacingAfter(attr.GetParagraphSpacingAfter() - 20);
attr.SetFlags(wx.TEXT_ATTR_PARA_SPACING_AFTER)
self.rtc.SetStyle(r, attr)
def OnLineSpacingSingle(self, evt):
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
attr.SetLineSpacing(10)
self.rtc.SetStyle(r, attr)
def OnLineSpacingHalf(self, evt):
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
attr.SetLineSpacing(15)
self.rtc.SetStyle(r, attr)
def OnLineSpacingDouble(self, evt):
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
ip = self.rtc.GetInsertionPoint()
if self.rtc.GetStyle(ip, attr):
r = rt.RichTextRange(ip, ip)
if self.rtc.HasSelection():
r = self.rtc.GetSelectionRange()
attr.SetFlags(wx.TEXT_ATTR_LINE_SPACING)
attr.SetLineSpacing(20)
self.rtc.SetStyle(r, attr)
def OnFont(self, evt):
if not self.rtc.HasSelection():
return
r = self.rtc.GetSelectionRange()
fontData = wx.FontData()
fontData.EnableEffects(False)
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_FONT)
if self.rtc.GetStyle(self.rtc.GetInsertionPoint(), attr):
fontData.SetInitialFont(attr.GetFont())
dlg = wx.FontDialog(self, fontData)
if dlg.ShowModal() == wx.ID_OK:
fontData = dlg.GetFontData()
font = fontData.GetChosenFont()
if font:
attr.SetFlags(wx.TEXT_ATTR_FONT)
attr.SetFont(font)
self.rtc.SetStyle(r, attr)
dlg.Destroy()
def OnColour(self, evt):
colourData = wx.ColourData()
attr = wx.TextAttr()
attr.SetFlags(wx.TEXT_ATTR_TEXT_COLOUR)
if self.rtc.GetStyle(self.rtc.GetInsertionPoint(), attr):
colourData.SetColour(attr.GetTextColour())
dlg = wx.ColourDialog(self, colourData)
if dlg.ShowModal() == wx.ID_OK:
colourData = dlg.GetColourData()
colour = colourData.GetColour()
if colour:
if not self.rtc.HasSelection():
self.rtc.BeginTextColour(colour)
else:
r = self.rtc.GetSelectionRange()
attr.SetFlags(wx.TEXT_ATTR_TEXT_COLOUR)
attr.SetTextColour(colour)
self.rtc.SetStyle(r, attr)
dlg.Destroy()
def OnUpdateBold(self, evt):
evt.Check(self.rtc.IsSelectionBold())
def OnUpdateItalic(self, evt):
evt.Check(self.rtc.IsSelectionItalics())
def OnUpdateUnderline(self, evt):
evt.Check(self.rtc.IsSelectionUnderlined())
def OnUpdateAlignLeft(self, evt):
evt.Check(self.rtc.IsSelectionAligned(wx.TEXT_ALIGNMENT_LEFT))
def OnUpdateAlignCenter(self, evt):
evt.Check(self.rtc.IsSelectionAligned(wx.TEXT_ALIGNMENT_CENTRE))
def OnUpdateAlignRight(self, evt):
evt.Check(self.rtc.IsSelectionAligned(wx.TEXT_ALIGNMENT_RIGHT))
def ForwardEvent(self, evt):
# The RichTextCtrl can handle menu and update events for undo,
# redo, cut, copy, paste, delete, and select all, so just
# forward the event to it.
self.rtc.ProcessEvent(evt)
def MakeMenuBar(self):
def doBind(item, handler, updateUI=None):
self.Bind(wx.EVT_MENU, handler, item)
if updateUI is not None:
self.Bind(wx.EVT_UPDATE_UI, updateUI, item)
fileMenu = wx.Menu()
doBind( fileMenu.Append(-1, "&Open\tCtrl+O", "Open a file"),
self.OnFileOpen )
doBind( fileMenu.Append(-1, "&Save\tCtrl+S", "Save a file"),
self.OnFileSave )
doBind( fileMenu.Append(-1, "&Save As...\tF12", "Save to a new file"),
self.OnFileSaveAs )
fileMenu.AppendSeparator()
doBind( fileMenu.Append(-1, "&View as HTML", "View HTML"),
self.OnFileViewHTML)
fileMenu.AppendSeparator()
doBind( fileMenu.Append(-1, "E&xit\tCtrl+Q", "Quit this program"),
self.OnFileExit )
editMenu = wx.Menu()
doBind( editMenu.Append(wx.ID_UNDO, "&Undo\tCtrl+Z"),
self.ForwardEvent, self.ForwardEvent)
doBind( editMenu.Append(wx.ID_REDO, "&Redo\tCtrl+Y"),
self.ForwardEvent, self.ForwardEvent )
editMenu.AppendSeparator()
doBind( editMenu.Append(wx.ID_CUT, "Cu&t\tCtrl+X"),
self.ForwardEvent, self.ForwardEvent )
doBind( editMenu.Append(wx.ID_COPY, "&Copy\tCtrl+C"),
self.ForwardEvent, self.ForwardEvent)
doBind( editMenu.Append(wx.ID_PASTE, "&Paste\tCtrl+V"),
self.ForwardEvent, self.ForwardEvent)
doBind( editMenu.Append(wx.ID_CLEAR, "&Delete\tDel"),
self.ForwardEvent, self.ForwardEvent)
editMenu.AppendSeparator()
doBind( editMenu.Append(wx.ID_SELECTALL, "Select A&ll\tCtrl+A"),
self.ForwardEvent, self.ForwardEvent )
#doBind( editMenu.AppendSeparator(), )
#doBind( editMenu.Append(-1, "&Find...\tCtrl+F"), )
#doBind( editMenu.Append(-1, "&Replace...\tCtrl+R"), )
formatMenu = wx.Menu()
doBind( formatMenu.AppendCheckItem(-1, "&Bold\tCtrl+B"),
self.OnBold, self.OnUpdateBold)
doBind( formatMenu.AppendCheckItem(-1, "&Italic\tCtrl+I"),
self.OnItalic, self.OnUpdateItalic)
doBind( formatMenu.AppendCheckItem(-1, "&Underline\tCtrl+U"),
self.OnUnderline, self.OnUpdateUnderline)
formatMenu.AppendSeparator()
doBind( formatMenu.AppendCheckItem(-1, "L&eft Align"),
self.OnAlignLeft, self.OnUpdateAlignLeft)
doBind( formatMenu.AppendCheckItem(-1, "&Centre"),
self.OnAlignCenter, self.OnUpdateAlignCenter)
doBind( formatMenu.AppendCheckItem(-1, "&Right Align"),
self.OnAlignRight, self.OnUpdateAlignRight)
formatMenu.AppendSeparator()
doBind( formatMenu.Append(-1, "Indent &More"), self.OnIndentMore)
doBind( formatMenu.Append(-1, "Indent &Less"), self.OnIndentLess)
formatMenu.AppendSeparator()
doBind( formatMenu.Append(-1, "Increase Paragraph &Spacing"), self.OnParagraphSpacingMore)
doBind( formatMenu.Append(-1, "Decrease &Paragraph Spacing"), self.OnParagraphSpacingLess)
formatMenu.AppendSeparator()
doBind( formatMenu.Append(-1, "Normal Line Spacing"), self.OnLineSpacingSingle)
doBind( formatMenu.Append(-1, "1.5 Line Spacing"), self.OnLineSpacingHalf)
doBind( formatMenu.Append(-1, "Double Line Spacing"), self.OnLineSpacingDouble)
formatMenu.AppendSeparator()
doBind( formatMenu.Append(-1, "&Font..."), self.OnFont)
mb = wx.MenuBar()
mb.Append(fileMenu, "&File")
mb.Append(editMenu, "&Edit")
mb.Append(formatMenu, "F&ormat")
self.SetMenuBar(mb)
def MakeToolBar(self):
def doBind(item, handler, updateUI=None):
self.Bind(wx.EVT_TOOL, handler, item)
if updateUI is not None:
self.Bind(wx.EVT_UPDATE_UI, updateUI, item)
tbar = self.CreateToolBar()
doBind( tbar.AddTool(-1, '', images._rt_open.GetBitmap(),
shortHelp="Open"), self.OnFileOpen)
doBind( tbar.AddTool(-1, '', images._rt_save.GetBitmap(),
shortHelp="Save"), self.OnFileSave)
tbar.AddSeparator()
doBind( tbar.AddTool(wx.ID_CUT, '', images._rt_cut.GetBitmap(),
shortHelp="Cut"), self.ForwardEvent, self.ForwardEvent)
doBind( tbar.AddTool(wx.ID_COPY, '', images._rt_copy.GetBitmap(),
shortHelp="Copy"), self.ForwardEvent, self.ForwardEvent)
doBind( tbar.AddTool(wx.ID_PASTE, '', images._rt_paste.GetBitmap(),
shortHelp="Paste"), self.ForwardEvent, self.ForwardEvent)
tbar.AddSeparator()
doBind( tbar.AddTool(wx.ID_UNDO, '', images._rt_undo.GetBitmap(),
shortHelp="Undo"), self.ForwardEvent, self.ForwardEvent)
doBind( tbar.AddTool(wx.ID_REDO, '', images._rt_redo.GetBitmap(),
shortHelp="Redo"), self.ForwardEvent, self.ForwardEvent)
tbar.AddSeparator()
doBind( tbar.AddCheckTool(-1, '', images._rt_bold.GetBitmap(),
shortHelp="Bold"), self.OnBold, self.OnUpdateBold)
doBind( tbar.AddCheckTool(-1, '', images._rt_italic.GetBitmap(),
shortHelp="Italic"), self.OnItalic, self.OnUpdateItalic)
doBind( tbar.AddCheckTool(-1, '', images._rt_underline.GetBitmap(),
shortHelp="Underline"), self.OnUnderline, self.OnUpdateUnderline)
tbar.AddSeparator()
doBind( tbar.AddCheckTool(-1, '', images._rt_alignleft.GetBitmap(),
shortHelp="Align Left"), self.OnAlignLeft, self.OnUpdateAlignLeft)
doBind( tbar.AddCheckTool(-1, '', images._rt_centre.GetBitmap(),
shortHelp="Center"), self.OnAlignCenter, self.OnUpdateAlignCenter)
doBind( tbar.AddCheckTool(-1, '', images._rt_alignright.GetBitmap(),
shortHelp="Align Right"), self.OnAlignRight, self.OnUpdateAlignRight)
tbar.AddSeparator()
doBind( tbar.AddTool(-1, '', images._rt_indentless.GetBitmap(),
shortHelp="Indent Less"), self.OnIndentLess)
doBind( tbar.AddTool(-1, '', images._rt_indentmore.GetBitmap(),
shortHelp="Indent More"), self.OnIndentMore)
tbar.AddSeparator()
doBind( tbar.AddTool(-1, '', images._rt_font.GetBitmap(),
shortHelp="Font"), self.OnFont)
doBind( tbar.AddTool(-1, '', images._rt_colour.GetBitmap(),
shortHelp="Font Colour"), self.OnColour)
tbar.Realize()
#----------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
b = wx.Button(self, -1, "Show the RichTextCtrl sample", (50,50))
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
self.AddRTCHandlers()
def AddRTCHandlers(self):
# make sure we haven't already added them.
if rt.RichTextBuffer.FindHandlerByType(rt.RICHTEXT_TYPE_HTML) is not None:
return
# This would normally go in your app's OnInit method. I'm
# not sure why these file handlers are not loaded by
# default by the C++ richtext code, I guess it's so you
# can change the name or extension if you wanted...
rt.RichTextBuffer.AddHandler(rt.RichTextHTMLHandler())
rt.RichTextBuffer.AddHandler(rt.RichTextXMLHandler())
# ...like this
rt.RichTextBuffer.AddHandler(rt.RichTextXMLHandler(name="Other XML",
ext="ox",
type=99))
# This is needed for the view as HTML option since we tell it
# to store the images in the memory file system.
wx.FileSystem.AddHandler(wx.MemoryFSHandler())
def OnButton(self, evt):
win = RichTextFrame(self, -1, "wx.richtext.RichTextCtrl",
size=(700, 500),
style = wx.DEFAULT_FRAME_STYLE)
win.Show(True)
# give easy access to the demo's PyShell if it's running
self.rtfrm = win
self.rtc = win.rtc
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = """<html><body>
<h2><center>wx.richtext.RichTextCtrl</center></h2>
</body></html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| 38.684857
| 302
| 0.608513
| 27,654
| 0.975243
| 0
| 0
| 0
| 0
| 0
| 0
| 5,456
| 0.192411
|
6c74c309dcd00dafc4c1aae00a0c378fd733102d
| 1,105
|
py
|
Python
|
src/user/models.py
|
fga-gpp-mds/2017.2-Grupo12
|
a90f94d0d497f625ab82ef44a907561f3bfa835f
|
[
"MIT"
] | 6
|
2017-10-02T12:07:40.000Z
|
2017-12-14T11:40:07.000Z
|
src/user/models.py
|
fga-gpp-mds/2017.2-Grupo12
|
a90f94d0d497f625ab82ef44a907561f3bfa835f
|
[
"MIT"
] | 92
|
2017-09-30T19:14:21.000Z
|
2017-12-14T04:41:16.000Z
|
src/user/models.py
|
fga-gpp-mds/2017.2-Grupo12
|
a90f94d0d497f625ab82ef44a907561f3bfa835f
|
[
"MIT"
] | 3
|
2017-09-06T00:49:38.000Z
|
2018-07-13T00:32:37.000Z
|
from django.db import models
from django.contrib.auth.models import User
class Person(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
name = models.CharField(max_length=64, null=False)
email = models.EmailField(max_length=100, null=False)
class Advisor(Person):
cpf = models.CharField(max_length=14, null=False)
tipo_cae = models.CharField(default='Municipal', max_length=9, null=False)
nome_cae = models.CharField(default='CAE', max_length=50, null=False)
cep = models.CharField(max_length=10, null=False)
bairro = models.CharField(max_length=30, null=False)
municipio = models.CharField(max_length=30, null=False)
uf = models.CharField(max_length=2, null=False)
class Meta:
permissions = (
('advisor', 'Advisor permissions'),
)
class President(Advisor):
class Meta:
permissions = (
('president', 'President permissions'),
)
class Administrator(Person):
class Meta:
permissions = (
('administrator', 'Administrator permissions'),
)
| 29.078947
| 78
| 0.673303
| 1,020
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 122
| 0.110407
|
6c768de90390e5fd0ea2640bab37a8869d234309
| 1,784
|
py
|
Python
|
lcd/nodemcu_gpio_lcd_test.py
|
petrkr/python_lcd
|
92e5d0211e5cef4dcc9078905f4bd53dc2cc78b4
|
[
"MIT"
] | 237
|
2015-07-19T21:33:01.000Z
|
2022-03-30T00:19:46.000Z
|
lcd/nodemcu_gpio_lcd_test.py
|
petrkr/python_lcd
|
92e5d0211e5cef4dcc9078905f4bd53dc2cc78b4
|
[
"MIT"
] | 25
|
2015-07-19T20:44:31.000Z
|
2022-01-26T10:42:07.000Z
|
lcd/nodemcu_gpio_lcd_test.py
|
petrkr/python_lcd
|
92e5d0211e5cef4dcc9078905f4bd53dc2cc78b4
|
[
"MIT"
] | 107
|
2015-09-05T12:54:55.000Z
|
2022-03-28T15:36:13.000Z
|
"""Implements a HD44780 character LCD connected via NodeMCU GPIO pins."""
from machine import Pin
from utime import sleep, ticks_ms
from nodemcu_gpio_lcd import GpioLcd
# Wiring used for this example:
#
# 1 - Vss (aka Ground) - Connect to one of the ground pins on you NodeMCU board.
# 2 - VDD - Connect to 3V
# 3 - VE (Contrast voltage) - I'll discuss this below
# 4 - RS (Register Select) connect to D0 (as per call to GpioLcd)
# 5 - RW (Read/Write) - connect to ground
# 6 - EN (Enable) connect to D1 (as per call to GpioLcd)
# 7 - D0 - leave unconnected
# 8 - D1 - leave unconnected
# 9 - D2 - leave unconnected
# 10 - D3 - leave unconnected
# 11 - D4 - connect to D2 (as per call to GpioLcd)
# 12 - D5 - connect to D3 (as per call to GpioLcd)
# 13 - D6 - connect to D4 (as per call to GpioLcd)
# 14 - D7 - connect to D5 (as per call to GpioLcd)
# 15 - A (BackLight Anode) - Connect to 3V
# 16 - K (Backlight Cathode) - Connect to Ground
#
# On 14-pin LCDs, there is no backlight, so pins 15 & 16 don't exist.
#
# The Contrast line (pin 3) typically connects to the center tap of a
# 10K potentiometer, and the other 2 legs of the 10K potentiometer are
# connected to pins 1 and 2 (Ground and VDD)
def test_main():
"""Test function for verifying basic functionality."""
print("Running test_main")
lcd = GpioLcd(rs_pin=Pin(16),
enable_pin=Pin(5),
d4_pin=Pin(4),
d5_pin=Pin(0),
d6_pin=Pin(2),
d7_pin=Pin(14),
num_lines=2, num_columns=20)
lcd.putstr("It Works!\nSecond Line")
sleep(3)
lcd.clear()
count = 0
while True:
lcd.move_to(0, 0)
lcd.putstr("%7d" % (ticks_ms() // 1000))
sleep(1)
count += 1
| 34.307692
| 81
| 0.627242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,193
| 0.668722
|
6c77d3d22c710ab0e8e3582be4b79df9edb68531
| 11,579
|
py
|
Python
|
apps/life_sci/examples/reaction_prediction/rexgen_direct/utils.py
|
LunaBlack/dgl
|
bd1e48a51e348b0e8e25622325adeb5ddea1c0ea
|
[
"Apache-2.0"
] | 2
|
2021-12-09T12:36:13.000Z
|
2022-03-01T21:22:36.000Z
|
apps/life_sci/examples/reaction_prediction/rexgen_direct/utils.py
|
sherry-1001/dgl
|
60d2e7d3c928d43bbb18e7ab17c066451c49f649
|
[
"Apache-2.0"
] | null | null | null |
apps/life_sci/examples/reaction_prediction/rexgen_direct/utils.py
|
sherry-1001/dgl
|
60d2e7d3c928d43bbb18e7ab17c066451c49f649
|
[
"Apache-2.0"
] | 2
|
2020-12-07T09:34:01.000Z
|
2020-12-13T06:18:58.000Z
|
import dgl
import errno
import numpy as np
import os
import random
import torch
from collections import defaultdict
from rdkit import Chem
def mkdir_p(path):
"""Create a folder for the given path.
Parameters
----------
path: str
Folder to create
"""
try:
os.makedirs(path)
print('Created directory {}'.format(path))
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
print('Directory {} already exists.'.format(path))
else:
raise
def setup(args, seed=0):
"""Setup for the experiment:
1. Decide whether to use CPU or GPU for training
2. Fix random seed for python, NumPy and PyTorch.
Parameters
----------
seed : int
Random seed to use.
Returns
-------
args
Updated configuration
"""
assert args['max_k'] >= max(args['top_ks']), \
'Expect max_k to be no smaller than the possible options ' \
'of top_ks, got {:d} and {:d}'.format(args['max_k'], max(args['top_ks']))
if torch.cuda.is_available():
args['device'] = 'cuda:0'
else:
args['device'] = 'cpu'
# Set random seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
mkdir_p(args['result_path'])
return args
def collate(data):
"""Collate multiple datapoints
Parameters
----------
data : list of 7-tuples
Each tuple is for a single datapoint, consisting of
a reaction, graph edits in the reaction, an RDKit molecule instance for all reactants,
a DGLGraph for all reactants, a complete graph for all reactants, the features for each
pair of atoms and the labels for each pair of atoms.
Returns
-------
reactions : list of str
List of reactions.
graph_edits : list of str
List of graph edits in the reactions.
mols : list of rdkit.Chem.rdchem.Mol
List of RDKit molecule instances for the reactants.
batch_mol_graphs : DGLGraph
DGLGraph for a batch of molecular graphs.
batch_complete_graphs : DGLGraph
DGLGraph for a batch of complete graphs.
batch_atom_pair_labels : float32 tensor of shape (V, 10)
Labels of atom pairs in the batch of graphs.
"""
reactions, graph_edits, mols, mol_graphs, complete_graphs, \
atom_pair_feats, atom_pair_labels = map(list, zip(*data))
batch_mol_graphs = dgl.batch(mol_graphs)
batch_mol_graphs.set_n_initializer(dgl.init.zero_initializer)
batch_mol_graphs.set_e_initializer(dgl.init.zero_initializer)
batch_complete_graphs = dgl.batch(complete_graphs)
batch_complete_graphs.set_n_initializer(dgl.init.zero_initializer)
batch_complete_graphs.set_e_initializer(dgl.init.zero_initializer)
batch_complete_graphs.edata['feats'] = torch.cat(atom_pair_feats, dim=0)
batch_atom_pair_labels = torch.cat(atom_pair_labels, dim=0)
return reactions, graph_edits, mols, batch_mol_graphs, \
batch_complete_graphs, batch_atom_pair_labels
def reaction_center_prediction(device, model, mol_graphs, complete_graphs):
"""Perform a soft prediction on reaction center.
Parameters
----------
device : str
Device to use for computation, e.g. 'cpu', 'cuda:0'
model : nn.Module
Model for prediction.
mol_graphs : DGLGraph
DGLGraph for a batch of molecular graphs
complete_graphs : DGLGraph
DGLGraph for a batch of complete graphs
Returns
-------
scores : float32 tensor of shape (E_full, 5)
Predicted scores for each pair of atoms to perform one of the following
5 actions in reaction:
* The bond between them gets broken
* Forming a single bond
* Forming a double bond
* Forming a triple bond
* Forming an aromatic bond
biased_scores : float32 tensor of shape (E_full, 5)
Comparing to scores, a bias is added if the pair is for a same atom.
"""
node_feats = mol_graphs.ndata.pop('hv').to(device)
edge_feats = mol_graphs.edata.pop('he').to(device)
node_pair_feats = complete_graphs.edata.pop('feats').to(device)
return model(mol_graphs, complete_graphs, node_feats, edge_feats, node_pair_feats)
def rough_eval(complete_graphs, preds, labels, num_correct):
batch_size = complete_graphs.batch_size
start = 0
for i in range(batch_size):
end = start + complete_graphs.batch_num_edges[i]
preds_i = preds[start:end, :].flatten()
labels_i = labels[start:end, :].flatten()
for k in num_correct.keys():
topk_values, topk_indices = torch.topk(preds_i, k)
is_correct = labels_i[topk_indices].sum() == labels_i.sum().float().cpu().data.item()
num_correct[k].append(is_correct)
start = end
def rough_eval_on_a_loader(args, model, data_loader):
"""A rough evaluation of model performance in the middle of training.
For final evaluation, we will eliminate some possibilities based on prior knowledge.
Parameters
----------
args : dict
Configurations fot the experiment.
model : nn.Module
Model for reaction center prediction.
data_loader : torch.utils.data.DataLoader
Loader for fetching and batching data.
Returns
-------
str
Message for evluation result.
"""
model.eval()
num_correct = {k: [] for k in args['top_ks']}
for batch_id, batch_data in enumerate(data_loader):
batch_reactions, batch_graph_edits, batch_mols, batch_mol_graphs, \
batch_complete_graphs, batch_atom_pair_labels = batch_data
with torch.no_grad():
pred, biased_pred = reaction_center_prediction(
args['device'], model, batch_mol_graphs, batch_complete_graphs)
rough_eval(batch_complete_graphs, biased_pred, batch_atom_pair_labels, num_correct)
msg = '|'
for k, correct_count in num_correct.items():
msg += ' acc@{:d} {:.4f} |'.format(k, np.mean(correct_count))
return msg
def eval(complete_graphs, preds, reactions, graph_edits, num_correct, max_k, easy):
"""Evaluate top-k accuracies for reaction center prediction.
Parameters
----------
complete_graphs : DGLGraph
DGLGraph for a batch of complete graphs
preds : float32 tensor of shape (E_full, 5)
Soft predictions for reaction center, E_full being the number of possible
atom-pairs and 5 being the number of possible bond changes
reactions : list of str
List of reactions.
graph_edits : list of str
List of graph edits in the reactions.
num_correct : dict
Counting the number of datapoints for meeting top-k accuracies.
max_k : int
Maximum number of atom pairs to be selected. This is intended to be larger
than max(num_correct.keys()) as we will filter out many atom pairs due to
considerations such as avoiding duplicates.
easy : bool
If True, reactants not contributing atoms to the product will be excluded in
top-k atom pair selection, which will make the task easier.
"""
# 0 for losing the bond
# 1, 2, 3, 1.5 separately for forming a single, double, triple or aromatic bond.
bond_change_to_id = {0.0: 0, 1:1, 2:2, 3:3, 1.5:4}
id_to_bond_change = {v: k for k, v in bond_change_to_id.items()}
num_change_types = len(bond_change_to_id)
batch_size = complete_graphs.batch_size
start = 0
for i in range(batch_size):
# Decide which atom-pairs will be considered.
reaction_i = reactions[i]
reaction_atoms_i = []
reaction_bonds_i = defaultdict(bool)
reactants_i, _, product_i = reaction_i.split('>')
product_mol_i = Chem.MolFromSmiles(product_i)
product_atoms_i = set([atom.GetAtomMapNum() for atom in product_mol_i.GetAtoms()])
for reactant in reactants_i.split('.'):
reactant_mol = Chem.MolFromSmiles(reactant)
reactant_atoms = [atom.GetAtomMapNum() for atom in reactant_mol.GetAtoms()]
if (len(set(reactant_atoms) & product_atoms_i) > 0) or (not easy):
reaction_atoms_i.extend(reactant_atoms)
for bond in reactant_mol.GetBonds():
end_atoms = sorted([bond.GetBeginAtom().GetAtomMapNum(),
bond.GetEndAtom().GetAtomMapNum()])
bond = tuple(end_atoms + [bond.GetBondTypeAsDouble()])
reaction_bonds_i[bond] = True
num_nodes = complete_graphs.batch_num_nodes[i]
end = start + complete_graphs.batch_num_edges[i]
preds_i = preds[start:end, :].flatten()
candidate_bonds = []
topk_values, topk_indices = torch.topk(preds_i, max_k)
for j in range(max_k):
preds_i_j = topk_indices[j].cpu().item()
# A bond change can be either losing the bond or forming a
# single, double, triple or aromatic bond
change_id = preds_i_j % num_change_types
change_type = id_to_bond_change[change_id]
pair_id = preds_i_j // num_change_types
atom1 = pair_id // num_nodes + 1
atom2 = pair_id % num_nodes + 1
# Avoid duplicates and an atom cannot form a bond with itself
if atom1 >= atom2:
continue
if atom1 not in reaction_atoms_i:
continue
if atom2 not in reaction_atoms_i:
continue
candidate = (int(atom1), int(atom2), float(change_type))
if reaction_bonds_i[candidate]:
continue
candidate_bonds.append(candidate)
gold_bonds = []
gold_edits = graph_edits[i]
for edit in gold_edits.split(';'):
atom1, atom2, change_type = edit.split('-')
atom1, atom2 = int(atom1), int(atom2)
gold_bonds.append((min(atom1, atom2), max(atom1, atom2), float(change_type)))
for k in num_correct.keys():
if set(gold_bonds) <= set(candidate_bonds[:k]):
num_correct[k] += 1
start = end
def reaction_center_final_eval(args, model, data_loader, easy):
"""Final evaluation of model performance.
args : dict
Configurations fot the experiment.
model : nn.Module
Model for reaction center prediction.
data_loader : torch.utils.data.DataLoader
Loader for fetching and batching data.
easy : bool
If True, reactants not contributing atoms to the product will be excluded in
top-k atom pair selection, which will make the task easier.
Returns
-------
msg : str
Summary of the top-k evaluation.
"""
model.eval()
num_correct = {k: 0 for k in args['top_ks']}
for batch_id, batch_data in enumerate(data_loader):
batch_reactions, batch_graph_edits, batch_mols, batch_mol_graphs, \
batch_complete_graphs, batch_atom_pair_labels = batch_data
with torch.no_grad():
pred, biased_pred = reaction_center_prediction(
args['device'], model, batch_mol_graphs, batch_complete_graphs)
eval(batch_complete_graphs, biased_pred, batch_reactions,
batch_graph_edits, num_correct, args['max_k'], easy)
msg = '|'
for k, correct_count in num_correct.items():
msg += ' acc@{:d} {:.4f} |'.format(k, correct_count / len(data_loader.dataset))
return msg
| 37.112179
| 97
| 0.648847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,876
| 0.421107
|
6c7908e8770d3b372e9f758cbbc3bb105b2fcb1e
| 8,602
|
py
|
Python
|
scripts/CMU/preprocess.py
|
Vidhan/allennlp
|
3f360d6da2b06ecb8afe03e7802791b9c5cd74d1
|
[
"Apache-2.0"
] | null | null | null |
scripts/CMU/preprocess.py
|
Vidhan/allennlp
|
3f360d6da2b06ecb8afe03e7802791b9c5cd74d1
|
[
"Apache-2.0"
] | null | null | null |
scripts/CMU/preprocess.py
|
Vidhan/allennlp
|
3f360d6da2b06ecb8afe03e7802791b9c5cd74d1
|
[
"Apache-2.0"
] | 1
|
2018-04-30T08:46:34.000Z
|
2018-04-30T08:46:34.000Z
|
import json
import os
import re
import uuid
from knowledge_graph_attr import KnowledgeGraph, Dijkstra
total = 0.0
ignored = 0.0
class Preprocessor(object):
def __init__(self):
pass
def _word_cleanup(self, lookups):
result = []
for word in lookups:
try:
assert word[0] == '['
except AssertionError as e:
print word
raise e
cleaned = ""
for char in word[1:]:
if char == '(' or char == ']':
break
cleaned += char
result.append(cleaned)
return result
def _get_indices(self, line, lookups):
result = []
for lookup in lookups:
start = line.find(lookup)
end = start + len(lookup)
result.append((start, end))
return result
def _line_cleanup(self, line):
ent_pattern = r"\[[a-zA-Z0-9' ]+\([a-zA-Z]+\[\d+\]\)\]"
ref_pattern = r"\[[a-zA-Z0-9' ]+\]"
entities = re.findall(ref_pattern + "|" + ent_pattern, line)
entities_clean = self._word_cleanup(entities)
for index, entity in enumerate(entities):
line = line.replace(entities[index], entities_clean[index])
return line.strip()
def _remove_brace(self, line):
open_braces = [index for index, c in enumerate(line) if c == '(']
close_braces = [index for index, c in enumerate(line) if c == ')']
ignore_ranges = list(zip(open_braces, close_braces))
next_index = 0
result = ""
for ignore_range in ignore_ranges:
result += line[next_index:ignore_range[0]]
next_index = ignore_range[1] + 1
result += line[next_index:]
result = result.replace("', '", "\t")
return result.strip("'").split("\t")
def generate_qas(self, question, answers, context, tags, qas=[]):
qa = {"question": question, "answers": [], "id": str(uuid.uuid4()) + "," + ",".join(tags)}
for ans in answers:
span_start = context.find(ans)
if span_start == -1:
continue
entry = {
'answer_start': span_start,
'text': ans,
}
qa['answers'].append(entry)
result = (len(qa['answers']) != 0)
if result:
qas.append(qa)
return result
def _get_tags(self, line):
ent_pattern = r"\[[a-zA-Z0-9' ]+\([a-zA-Z]+\[\d+\]\)\]"
ref_pattern = r"\[[a-zA-Z0-9' ]+\]"
words = r"[a-zA-Z0-9' ]+"
entities = re.findall(ent_pattern, line)
new_line = line
for entity in entities:
new_line = new_line.replace(entity, '')
attributes = re.findall(ref_pattern, new_line)
tags = []
for entity in entities:
name, class_type, identifier = re.findall(words, entity)
tags.append(class_type + "@" + identifier)
for attr in attributes:
attr = re.findall(words, attr)
tags.append(attr[0])
return tags
def preprocess(self, filename):
result = []
context_so_far = ""
entry = {"title": filename, "paragraphs": []}
kg = KnowledgeGraph()
graph = kg.prepare(filename)
nodes, edges, sorted_nodes = kg.prepare_edges(graph)
dj = Dijkstra(nodes, edges)
shortest_path = dj.shortestPath(sorted_nodes)
entry["dijkstra"] = shortest_path
global ignored
global total
with open(filename) as f:
context_changed = True
qas = []
paragraphs = entry["paragraphs"]
para = {}
for line in f:
if "question_" in line:
if context_changed:
qas = []
line = line.split('\t')
tags = self._get_tags(line[0])
try:
",".join(tags)
except Exception as e:
print(tags)
print(line[0])
raise e
question = self._line_cleanup(line[0])
answers = self._remove_brace(line[1])
context_so_far = " ".join(result)
updated = self.generate_qas(question, answers, context_so_far, tags, qas)
context_changed = False
else:
if not context_changed:
if len(qas) != 0:
para["context"] = context_so_far
para["qas"] = qas
paragraphs.append(para)
else:
ignored += 1
print("Ignoring paragraph", qas)
para = {}
total += 1
context_changed = True
line = self._line_cleanup(line) + "."
result.append(line)
return entry
def save_json(json_obj, filename):
with open(filename, 'w') as f:
json.dump(json_obj, f)
if __name__ == "__main__":
p = Preprocessor()
path = "/Users/prasoon/Desktop/train"
files = os.listdir(path)
student_train_json = {'data': []}
bug_train_json = {'data': []}
dept_train_json = {'data': []}
meet_train_json = {'data': []}
shop_train_json = {'data': []}
student_dev_json = {'data': []}
bug_dev_json = {'data': []}
dept_dev_json = {'data': []}
meet_dev_json = {'data': []}
shop_dev_json = {'data': []}
for index, each_file in enumerate(files):
if not os.path.isfile(each_file):
print("Dir", each_file)
inner_files = os.listdir(path + "/" + each_file)
for filename in inner_files:
if not filename.endswith("with_hints"):
print("Ignored file", filename)
continue
if filename.startswith('student'):
train_json = student_train_json
elif filename.startswith('bug'):
train_json = bug_train_json
elif filename.startswith('department'):
train_json = dept_train_json
elif filename.startswith('meetings'):
train_json = meet_train_json
elif filename.startswith('shopping'):
train_json = shop_train_json
else:
print("Ignored file", filename)
continue
if len(train_json['data']) > 100:
if filename.startswith('student'):
train_json = student_dev_json
elif filename.startswith('bug'):
train_json = bug_dev_json
elif filename.startswith('department'):
train_json = dept_dev_json
elif filename.startswith('meetings'):
train_json = meet_dev_json
elif filename.startswith('shopping'):
train_json = shop_dev_json
else:
print("Ignored file", filename)
continue
if len(train_json['data']) > 20:
continue
real_path = path + "/" + each_file + "/" + filename
print("Preprocessing:", index, filename)
train_json['data'].append(p.preprocess(real_path))
path += "/"
print(ignored, "/", total)
save_json(student_train_json, path + 'final/student_train.json')
save_json(bug_train_json, path + 'final/bug_train.json')
save_json(dept_train_json, path + 'final/department_train.json')
save_json(meet_train_json, path + 'final/meetings_train.json')
save_json(shop_train_json, path + 'final/shopping_train.json')
save_json(student_dev_json, path + 'final/student_dev.json')
save_json(bug_dev_json, path + 'final/bug_dev.json')
save_json(dept_dev_json, path + 'final/department_dev.json')
save_json(meet_dev_json, path + 'final/meetings_dev.json')
save_json(shop_dev_json, path + 'final/shopping_dev.json')
train = {'data': student_train_json['data'] + bug_train_json['data'] + dept_train_json['data'] +
meet_train_json['data']}
dev = shop_train_json
save_json(train, path + 'final/train.json')
save_json(dev, path + 'final/dev.json')
| 34.546185
| 100
| 0.514299
| 4,980
| 0.578935
| 0
| 0
| 0
| 0
| 0
| 0
| 972
| 0.112997
|
6c79a93effba00b7b6196ac9c718d0c037c656b9
| 5,168
|
py
|
Python
|
src/figures/violin_plot_sp_performance.py
|
espottesmith/hydrobench
|
e117774c94cff11debd764d231757174ec211e99
|
[
"MIT"
] | 1
|
2022-03-16T19:19:15.000Z
|
2022-03-16T19:19:15.000Z
|
src/figures/violin_plot_sp_performance.py
|
espottesmith/hydrobench
|
e117774c94cff11debd764d231757174ec211e99
|
[
"MIT"
] | null | null | null |
src/figures/violin_plot_sp_performance.py
|
espottesmith/hydrobench
|
e117774c94cff11debd764d231757174ec211e99
|
[
"MIT"
] | null | null | null |
import csv
import os
import difflib
import statistics
import numpy as np
import matplotlib.pyplot as plt
SMALL_SIZE = 12
MEDIUM_SIZE = 14
LARGE_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
# plt.rc('title', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=LARGE_SIZE, titlesize=LARGE_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
def adjacent_values(vals, q1, q3):
upper_adjacent_value = q3 + (q3 - q1) * 1.5
upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1])
lower_adjacent_value = q1 - (q3 - q1) * 1.5
lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1)
return lower_adjacent_value, upper_adjacent_value
base_dir = "/Users/ewcss/data/ssbt/20220211_benchmark"
methods = {"GGA": ["PBE", "PBE-D3(BJ)", "BLYP", "BLYP-D3(BJ)", "B97-D", "B97-D3", "mPW91", "mPW91-D3(BJ)", "VV10", "rVV10"],
"meta-GGA": ["M06-L", "M06-L-D3(0)", "SCAN", "SCAN-D3(BJ)", "TPSS", "TPSS-D3(BJ)", "MN12-L", "MN12-L-D3(BJ)", "B97M-rV"],
"hybrid GGA": ["PBE0", "PBE0-D3(BJ)", "B3LYP", "B3LYP-D3(BJ)", "CAM-B3LYP", "CAM-B3LYP-D3(0)", "mPW1PW91", "mPW1PW91-D3(BJ)", "wB97X", "wB97XD", "wB97XD3", "wB97XV"],
"hybrid meta-GGA": ["M06-2X", "M06-2X-D3(0)", "M06-HF", "M08-SO", "M11", "MN15", "BMK", "BMK-D3(BJ)", "TPSSh", "TPSSh-D3(BJ)", "SCAN0", "mPWB1K", "mPWB1K-D3(BJ)", "wB97M-V"]}
vac_mae = {x: dict() for x in methods}
vac_rel = {x: dict() for x in methods}
pcm_mae = {x: dict() for x in methods}
pcm_rel = {x: dict() for x in methods}
with open(os.path.join(base_dir, "abserrs_vacuum.csv")) as file:
reader = csv.reader(file)
for i, row in enumerate(reader):
if i == 0:
continue
elif row[0].lower() == "average" or "3c" in row[0].lower():
continue
funct = row[0]
# if funct == "M06-HF":
# continue
avg = float(row[-1])
for group, functs in methods.items():
if funct in functs:
vac_mae[group][funct] = avg
with open(os.path.join(base_dir, "abserrs_rel_vacuum.csv")) as file:
reader = csv.reader(file)
for i, row in enumerate(reader):
if i == 0:
continue
elif row[0].lower() == "average" or "3c" in row[0].lower():
continue
funct = row[0]
avg = float(row[-1])
# if funct == "M06-HF":
# continue
for group, functs in methods.items():
if funct in functs:
vac_rel[group][funct] = avg
# with open(os.path.join(base_dir, "abserrs_IEF-PCM.csv")) as file:
# reader = csv.reader(file)
# for i, row in enumerate(reader):
# if i == 0:
# continue
# elif row[0].lower() == "average" or "3c" in row[0].lower():
# continue
# funct = row[0]
# avg = float(row[-1])
#
# # if funct == "M06-HF":
# # continue
#
# for group, functs in methods.items():
# if funct in functs:
# pcm_mae[group][funct] = avg
#
# with open(os.path.join(base_dir, "abserrs_rel_IEF-PCM.csv")) as file:
# reader = csv.reader(file)
# for i, row in enumerate(reader):
# if i == 0:
# continue
# elif row[0].lower() == "average" or "3c" in row[0].lower():
# continue
# funct = row[0]
# avg = float(row[-1])
#
# # if funct == "M06-HF":
# # continue
#
# for group, functs in methods.items():
# if funct in functs:
# pcm_rel[group][funct] = avg
fig, axs = plt.subplots(2, 1, figsize=(14, 6), sharex=True)
for i, dset in enumerate([vac_mae, vac_rel]):
ax = axs[i]
if i == 0:
ax.set_ylabel("MAE (eV)")
else:
ax.set_ylabel("MRAE (unitless)")
xs = ["GGA", "meta-GGA", "hybrid GGA", "hybrid meta-GGA"]
avgs = list()
lowlims = list()
uplims = list()
data = list()
for group in xs:
data.append(np.array(sorted(list(dset[group].values()))))
ax.violinplot(data, [1,2,3,4], showmeans=False, showmedians=False, showextrema=False)
quartile1 = np.zeros(4)
medians = np.zeros(4)
quartile3 = np.zeros(4)
for i, d in enumerate(data):
q1, m, q3 = np.percentile(d, [25, 50, 75])
quartile1[i] = q1
medians[i] = m
quartile3[i] = q3
whiskers = np.array([adjacent_values(sorted_array, q1, q3)
for sorted_array, q1, q3 in zip(data, quartile1, quartile3)])
whiskers_min, whiskers_max = whiskers[:, 0], whiskers[:, 1]
inds = np.arange(1, len(medians) + 1)
ax.scatter(inds, medians, marker='o', color='white', s=30, zorder=3)
ax.vlines(inds, quartile1, quartile3, color='k', linestyle='-', lw=5)
ax.vlines(inds, whiskers_min, whiskers_max, color='k', linestyle='-', lw=1)
ax.set_xticks([1, 2, 3, 4])
ax.set_xticklabels(xs)
plt.tight_layout()
fig.savefig("sp_performance_violin.png", dpi=150)
plt.show()
| 32.917197
| 186
| 0.572175
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,016
| 0.390093
|
6c7aa53b02ade1969b440eeb2dca4bdd3802359c
| 205
|
py
|
Python
|
submissions/abc083/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 1
|
2021-05-10T01:16:28.000Z
|
2021-05-10T01:16:28.000Z
|
submissions/abc083/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 3
|
2021-05-11T06:14:15.000Z
|
2021-06-19T08:18:36.000Z
|
submissions/abc083/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | null | null | null |
# sys.stdin.readline()
import sys
input = sys.stdin.readline
a, b, c, d = map(int, input().split())
if a+b > c+d:
ans = 'Left'
elif a+b == c+d:
ans = 'Balanced'
else:
ans = 'Right'
print(ans)
| 15.769231
| 38
| 0.57561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.219512
|
6c7b5575e035c24915e3b04e46105e06901e65b5
| 255
|
py
|
Python
|
tensorstream/helpers/flatten.py
|
clems4ever/tensorstream
|
61bff14f65f71bdd4ab58aefbd6eda79ec5863cb
|
[
"Apache-2.0"
] | 5
|
2019-04-10T03:51:13.000Z
|
2020-07-12T10:50:24.000Z
|
tensorstream/helpers/flatten.py
|
clems4ever/tensorstream
|
61bff14f65f71bdd4ab58aefbd6eda79ec5863cb
|
[
"Apache-2.0"
] | null | null | null |
tensorstream/helpers/flatten.py
|
clems4ever/tensorstream
|
61bff14f65f71bdd4ab58aefbd6eda79ec5863cb
|
[
"Apache-2.0"
] | null | null | null |
def flatten(elems):
stack = []
if isinstance(elems, (list, tuple)):
for x in elems:
stack += flatten(x)
elif isinstance(elems, (dict)):
for x in elems.values():
stack += flatten(x)
else:
stack.append(elems)
return stack
| 19.615385
| 38
| 0.603922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6c7b64c2f62afaf0967618c5e7f57887d35fa040
| 3,972
|
py
|
Python
|
dafny_comparison/print_table.py
|
gleissen/goolong
|
2bc38024204f9747ed9818502c5df3d36b96dd7d
|
[
"Apache-2.0"
] | 1
|
2019-05-21T18:16:58.000Z
|
2019-05-21T18:16:58.000Z
|
dafny_comparison/print_table.py
|
gleissen/goolong
|
2bc38024204f9747ed9818502c5df3d36b96dd7d
|
[
"Apache-2.0"
] | 2
|
2020-08-06T15:19:12.000Z
|
2020-08-06T15:23:19.000Z
|
dafny_comparison/print_table.py
|
gokhankici/goolong
|
ac5689c374ddaa0156693f234be392059f318b3a
|
[
"Apache-2.0"
] | 2
|
2020-10-27T09:06:58.000Z
|
2021-12-07T16:30:38.000Z
|
#!/usr/bin/env python
import copy
import os.path as op
NAME_FMT = "%-20s"
class FileStats(object):
ANNOTS = ['code', 'annot', 'inv', 'harness']
def __init__(self, code=0, annot=0, inv=0, harness=0, comment='//'):
self.code = code
self.annot = annot
self.inv = inv
self.harness = harness
self.comment = comment
def __str__(self):
return ", ".join(map(lambda k: "%s = %s" % (k, self[k]), self.ANNOTS))
def __getitem__(self, key):
assert key in self.ANNOTS
return self.__dict__[key]
def __setitem__(self, key, item):
assert key in self.ANNOTS
self.__dict__[key] = item
def __add__(self, obj):
r = copy.deepcopy(self)
for k in self.ANNOTS:
r[k] = self[k] + obj[k]
return r
def __iadd__(self, d2):
return self + d2
class GlobalStats(object):
FIELDS = ['icet_stats', 'dafny_stats']
def __init__(self, icet_rw=0, icet_vc=0, dafny_t=0):
self.icet_stats = FileStats(comment='%%')
self.icet_rw = icet_rw
self.icet_vc = icet_vc
self.dafny_stats = FileStats(comment='//')
self.dafny_t = dafny_t
def column_values(self):
return [('# Lines' , "%10s", self.icet_stats.code),
('# Anns' , "%10s", "%d" % (self.icet_stats.annot)),
('# Invs' , "%10s", "%d" % (self.icet_stats.inv)),
('# Lines' , "%10s", self.dafny_stats.code),
('# Anns' , "%10s", "%d" % (self.dafny_stats.annot)),
('# Invs' , "%10s", "%d" % (self.dafny_stats.inv)),
('# Harness', "%10s", self.dafny_stats.harness)
]
def header(self):
return " | ".join(map(lambda (k,fmt,v): "%-10s" % k, self.column_values()))
def row(self):
return " | ".join(map(lambda (k,fmt,v): fmt % str(v), self.column_values()))
def __str__(self):
return ", ".join(map(lambda k: "%s = %s" % (k, self[k]), self.FIELDS))
def __getitem__(self, key):
assert key in self.FIELDS
return self.__dict__[key]
def __setitem__(self, key, item):
assert key in self.FIELDS
self.__dict__[key] = item
def __add__(self, obj):
r = copy.deepcopy(self)
for k in self.FIELDS:
r[k] = self[k] + obj[k]
return r
def __iadd__(self, d2):
return self + d2
def update_stats(filename, stat):
if not op.isfile(filename):
return
with open(filename, 'r') as f:
for line in f:
l = line.rstrip()
for c in FileStats.ANNOTS:
if l.endswith("%s %s" % (stat.comment, c)):
stat[c] += 1
break
if __name__ == '__main__':
THIS_FOLDER = op.dirname(op.abspath(op.realpath(__file__)))
ICET_FOLDER = op.join(THIS_FOLDER, 'icet')
DAFNY_FOLDER = op.join(THIS_FOLDER, 'dafny')
FILES = [(('twophase.icet', 'twophase.dfy'),
'Two-Phase Commit',
GlobalStats()),
(('raft.icet', 'raft.dfy'),
'Raft Leader Election',
GlobalStats()),
(('paxos.icet', 'paxos.dfy'),
'Single-Decree Paxos',
GlobalStats())]
stat_total = GlobalStats()
print " | ".join([" " * 20, "%-36s" % "IceT", "%-49s" % "Dafny"])
print " " * 20, "-" * 90
print " | ".join(["%-20s" % "Name", stat_total.header()])
print "-" * 111
for ((icet_filename, dafny_filename), name, both_stat) in FILES:
update_stats(op.join(ICET_FOLDER, icet_filename), both_stat.icet_stats)
update_stats(op.join(DAFNY_FOLDER, dafny_filename), both_stat.dafny_stats)
print " | ".join([NAME_FMT % name, both_stat.row()])
stat_total += both_stat
print "-" * 111
print " | ".join([NAME_FMT % "Total", stat_total.row()])
| 30.790698
| 84
| 0.527442
| 2,420
| 0.609265
| 0
| 0
| 0
| 0
| 0
| 0
| 503
| 0.126636
|
6c7c6ab3c977d309a6e23ab36c08b279c63de1a3
| 3,822
|
py
|
Python
|
src/po_utils/common_actions/element_interactions.py
|
matthew-bahloul/browser-utils
|
22372d1a6718d8a7fd4eebf116c728aaa06e68ee
|
[
"MIT"
] | null | null | null |
src/po_utils/common_actions/element_interactions.py
|
matthew-bahloul/browser-utils
|
22372d1a6718d8a7fd4eebf116c728aaa06e68ee
|
[
"MIT"
] | null | null | null |
src/po_utils/common_actions/element_interactions.py
|
matthew-bahloul/browser-utils
|
22372d1a6718d8a7fd4eebf116c728aaa06e68ee
|
[
"MIT"
] | null | null | null |
"""
by_locator : tuple --> (<selenium By object>, <selector string>)
x_offset : int --> integer value of x offset in pixels
y_offset : int --> integer value of y offset in pixels
x_destination : int --> integer value of x location on page
y_desitination : int --> integer value of y location on page
by_locator_source : tuple --> (<selenium By object>, <selector string>)
by_locator_target : tuple --> (<selenium By object>, <selector string>)
clear_first : bool --> toggle for clearing input field before writing text to it
press_enter : bool --> toggle for sending the ENTER key to an input field after writing to it
"""
from selenium.webdriver import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from po_utils.common_actions.waits import wait_for_page_to_load, wait_until_displayed, wait_until_not_displayed
@wait_until_displayed
@wait_for_page_to_load
def click_element(self, by_locator:tuple, x_offset:int=0, y_offset:int=0) -> None:
# I hate clicking
element = WebDriverWait(self._driver, self._driver_wait_time).until(EC.visibility_of_element_located(by_locator))
scroll_height = self._driver.execute_script('return document.body.scrollHeight')
window_size = self._driver.get_window_size()['height']
if element.location['y'] > (scroll_height - .5 * window_size):
self._driver.execute_script('window.scrollTo(0, document.body.scrollHeight)')
elif element.location['y'] < (.5 * window_size):
self._driver.execute_script('window.scrollTo(0, 0)')
else:
self._driver.execute_script(f"window.scrollTo({element.location['x']}, {element.location['y'] - .5 * window_size});")
if x_offset == 0 and y_offset == 0:
try:
WebDriverWait(self._driver, self._driver_wait_time).until(EC.element_to_be_clickable(by_locator)).click()
except:
WebDriverWait(self._driver, self._driver_wait_time).until(EC.element_to_be_clickable(by_locator)).click()
else:
ActionChains(self._driver).move_to_element_with_offset(WebDriverWait(self._driver, self._driver_wait_time).until(EC.visibility_of_element_located(by_locator)), x_offset, y_offset).click().perform()
@wait_until_displayed
@wait_for_page_to_load
def click_and_drag_element_by_offset(self, by_locator:tuple, x_destination:int, y_desitination:int) -> None:
element = WebDriverWait(self._driver, self._driver_wait_time).until(EC.visibility_of_element_located(by_locator))
ActionChains(self._driver).drag_and_drop_by_offset(element, x_destination, y_desitination).perform()
@wait_until_displayed
@wait_for_page_to_load
def click_and_drag_element(self, by_locator_source:tuple, by_locator_target:tuple) -> None:
source = WebDriverWait(self._driver, self._driver_wait_time).until(EC.visibility_of_element_located(by_locator_source))
target = WebDriverWait(self._driver, self._driver_wait_time).until(EC.visibility_of_element_located(by_locator_target))
ActionChains(self._driver).drag_and_drop(source, target).perform()
@wait_until_displayed
@wait_for_page_to_load
def send_text_to_element(self, by_locator:tuple, text:str, clear_first:bool=True, press_enter:bool=False) -> None:
if clear_first:
self._driver.find_element(*by_locator).clear()
self._driver.find_element(*by_locator).send_keys(text)
if press_enter:
self._driver.find_element(*by_locator).send_keys(Keys.ENTER)
@wait_until_displayed
@wait_for_page_to_load
def hover_over_element(self, by_locator:tuple) -> None:
element = self._driver.find_element(*by_locator)
ActionChains(self._driver).move_to_element(element).perform()
| 53.083333
| 205
| 0.75641
| 0
| 0
| 0
| 0
| 2,767
| 0.723967
| 0
| 0
| 938
| 0.245421
|
6c7e366d11f836cc2b4028018db9d96639fae992
| 174
|
py
|
Python
|
Topics/Custom generators/Even numbers/main.py
|
valenciarichards/hypernews-portal
|
0b6c4d8aefe4f8fc7dc90d6542716e98f52515b3
|
[
"MIT"
] | 1
|
2021-07-26T03:06:14.000Z
|
2021-07-26T03:06:14.000Z
|
Topics/Custom generators/Even numbers/main.py
|
valenciarichards/hypernews-portal
|
0b6c4d8aefe4f8fc7dc90d6542716e98f52515b3
|
[
"MIT"
] | null | null | null |
Topics/Custom generators/Even numbers/main.py
|
valenciarichards/hypernews-portal
|
0b6c4d8aefe4f8fc7dc90d6542716e98f52515b3
|
[
"MIT"
] | null | null | null |
n = int(input())
def even(x):
yield x * 2
for number in range(n):
print(next(even(number)))
# Don't forget to print out the first n numbers one by one here
| 13.384615
| 63
| 0.62069
| 0
| 0
| 28
| 0.16092
| 0
| 0
| 0
| 0
| 63
| 0.362069
|
6c7f914b76e891552a3b496827a2a433ae7084c1
| 2,096
|
py
|
Python
|
cronman/cron_jobs/run_cron_tasks.py
|
ryancheley/django-cronman
|
5be5d9d5eecba0f110808c9e7a97ef89ef620ade
|
[
"BSD-3-Clause"
] | 17
|
2018-09-25T16:28:36.000Z
|
2022-01-31T14:43:24.000Z
|
cronman/cron_jobs/run_cron_tasks.py
|
ryancheley/django-cronman
|
5be5d9d5eecba0f110808c9e7a97ef89ef620ade
|
[
"BSD-3-Clause"
] | 14
|
2018-11-04T14:45:14.000Z
|
2022-02-01T04:02:47.000Z
|
cronman/cron_jobs/run_cron_tasks.py
|
ryancheley/django-cronman
|
5be5d9d5eecba0f110808c9e7a97ef89ef620ade
|
[
"BSD-3-Clause"
] | 3
|
2018-09-25T16:28:44.000Z
|
2022-02-01T04:08:23.000Z
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
from __future__ import unicode_literals
from django.db import connections
from django.utils import timezone
from django.utils.functional import cached_property
from cronman.config import app_settings
from cronman.job import BaseCronJob
from cronman.models import CronTask
from cronman.spawner import CronSpawner
from cronman.utils import cron_jobs_module_config
class RunCronTasks(BaseCronJob):
"""Starts worker processes for cron jobs requested to run in Admin
via CronTask model.
"""
lock_ignore_errors = True
cronitor_id = app_settings.CRONMAN_RUN_CRON_TASKS_CRONITOR_ID
@cached_property
def cron_spawner(self):
"""Cron Spawner instance"""
return CronSpawner(logger=self.logger)
def run(self):
"""Main logic"""
cron_tasks = self.get_pending_cron_tasks()
num_cron_tasks = len(cron_tasks)
num_started = 0
for i, cron_task in enumerate(cron_tasks, 1):
self.logger.info(
"Starting worker for CronTask {} ({}/{})".format(
cron_task, i, num_cron_tasks
)
)
pid = self.start_cron_task(cron_task)
if pid is not None:
num_started += 1
if num_started:
status_message = "Started {} CronTask(s).".format(num_started)
else:
status_message = "No CronTasks started."
self.logger.info(status_message)
def get_pending_cron_tasks(self):
"""Retrieve pending CronTasks"""
allowed_tasks = cron_jobs_module_config(
"ALLOWED_CRON_TASKS", default=()
)
cron_tasks = list(
CronTask.objects.pending()
.filter(start_at__lte=timezone.now())
.filter(cron_job__in=allowed_tasks)
)
connections.close_all() # close db connections
return cron_tasks
def start_cron_task(self, cron_task):
"""Starts worker for given CronTask"""
return self.cron_spawner.start_worker(cron_task.job_spec())
| 31.757576
| 74
| 0.648378
| 1,677
| 0.800095
| 0
| 0
| 127
| 0.060592
| 0
| 0
| 391
| 0.186546
|
6c8301238acf3bc4525ac9e26175e629b0f3e112
| 2,893
|
py
|
Python
|
day23.py
|
alexa-infra/advent-of-code-2018
|
f14e8c87b655c479097ae713572bb0260ec993fc
|
[
"MIT"
] | null | null | null |
day23.py
|
alexa-infra/advent-of-code-2018
|
f14e8c87b655c479097ae713572bb0260ec993fc
|
[
"MIT"
] | null | null | null |
day23.py
|
alexa-infra/advent-of-code-2018
|
f14e8c87b655c479097ae713572bb0260ec993fc
|
[
"MIT"
] | null | null | null |
import re
parse_re = re.compile(
r'pos\=\<(?P<x>-?\d+),(?P<y>-?\d+),(?P<z>-?\d+)\>, r\=(?P<r>\d+)'
)
def parse(text):
m = parse_re.match(text)
d = m.groupdict()
pos = int(d['x']), int(d['y']), int(d['z'])
r = int(d['r'])
return pos, r
def dist(a, b):
return sum(abs(x1-x2) for x1, x2 in zip(a, b))
def getinrange(data):
mmax = max(data, key=lambda d: d[1])
maxp, maxr = mmax
pp = [(p, r) for p, r in data if dist(p, maxp) <= maxr]
return len(pp)
def solve(data):
pp = [p for p, r in data]
xs = [x for x, y, z in pp]
ys = [y for x, y, z in pp]
zs = [z for x, y, z in pp]
xmin, xmax = min(xs), max(xs)
ymin, ymax = min(ys), max(ys)
zmin, zmax = min(zs), max(zs)
dd = 1
zero = (0, 0, 0)
while dd < xmax - xmin:
dd *= 2
while True:
tcount = 0
best = None
bestVal = None
for x in range(xmin, xmax+1, dd):
for y in range(ymin, ymax+1, dd):
for z in range(zmin, zmax+1, dd):
pos = (x, y, z)
count = 0
for p, r in data:
if (dist(pos, p) - r) / dd <= 0:
count += 1
if count > tcount:
tcount = count
best = pos
bestVal = dist(best, zero)
elif count == tcount:
if best is None or dist(pos, zero) < bestVal:
best = pos
bestVal = dist(best, zero)
if dd > 1:
x, y, z = best
xx = (x - dd, x + dd)
yy = (y - dd, y + dd)
zz = (z - dd, z + dd)
xmin, xmax = min(xx), max(xx)
ymin, ymax = min(yy), max(yy)
zmin, zmax = min(zz), max(zz)
dd = dd // 2
else:
return best, bestVal
def test1():
data = [
"pos=<0,0,0>, r=4",
"pos=<1,0,0>, r=1",
"pos=<4,0,0>, r=3",
"pos=<0,2,0>, r=1",
"pos=<0,5,0>, r=3",
"pos=<0,0,3>, r=1",
"pos=<1,1,1>, r=1",
"pos=<1,1,2>, r=1",
"pos=<1,3,1>, r=1",
]
data = [parse(d) for d in data]
assert getinrange(data) == 7
def test2():
data = [
"pos=<10,12,12>, r=2",
"pos=<12,14,12>, r=2",
"pos=<16,12,12>, r=4",
"pos=<14,14,14>, r=6",
"pos=<50,50,50>, r=200",
"pos=<10,10,10>, r=5",
]
data = [parse(d) for d in data]
best, bestVal = solve(data)
assert bestVal == 36
def main():
with open('day23.txt', 'r') as f:
data = f.readlines()
data = [parse(d) for d in data]
print('Part 1:', getinrange(data))
best, bestVal = solve(data)
print('Part 2:', bestVal)
if __name__ == '__main__':
test1()
test2()
main()
| 27.037383
| 69
| 0.412375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 409
| 0.141376
|
6c83ee69fde6360b183bb19fa3bcf09e78de7fd6
| 381
|
py
|
Python
|
setup.py
|
connormullett/lib_elo_calculator
|
1a699f233dd440b4295e8958b02422ce64b27c70
|
[
"MIT"
] | null | null | null |
setup.py
|
connormullett/lib_elo_calculator
|
1a699f233dd440b4295e8958b02422ce64b27c70
|
[
"MIT"
] | null | null | null |
setup.py
|
connormullett/lib_elo_calculator
|
1a699f233dd440b4295e8958b02422ce64b27c70
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from setuptools import find_packages, setup
setup(
name="lib_elo_calculator",
packages=find_packages(include=['lib_elo_calculator']),
version='0.1.0',
description='contains functions and formulas for calculating elo',
author='Connor Mullett',
license='MIT',
setup_requires=['pytest-runner'],
tests_require=['pytest'],
test_suite='tests'
)
| 23.8125
| 68
| 0.737533
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 172
| 0.451444
|
6c85751be92171445c98d3494d9be709e143efc5
| 1,526
|
py
|
Python
|
examples/intro-example/dags/tutorial.py
|
rfim/QoalaMoviesKaggle
|
3cf5486f012487c5585bbe86d3a2bc1c58979bac
|
[
"MIT"
] | null | null | null |
examples/intro-example/dags/tutorial.py
|
rfim/QoalaMoviesKaggle
|
3cf5486f012487c5585bbe86d3a2bc1c58979bac
|
[
"MIT"
] | null | null | null |
examples/intro-example/dags/tutorial.py
|
rfim/QoalaMoviesKaggle
|
3cf5486f012487c5585bbe86d3a2bc1c58979bac
|
[
"MIT"
] | null | null | null |
tot_name = os.path.join(os.path.dirname(__file__),'src/data', file_name)
# open the json datafile and read it in
with open(tot_name, 'r') as inputfile:
doc = json.load(inputfile)
# transform the data to the correct types and convert temp to celsius
id_movie = int(doc['id'])
movie_name = str(doc['original_title'])
year = str(doc['production_companies']['production_countries']['release date'])
country_origin = str(doc['production_companies']['origin_country'])
category_1 = str(doc['genres']['name'])
category_2 = str(doc['genres']['name'])
movie_rating = float(doc['popularity'])
avg_rating = float(doc['production_companies']['production_countries']['vote_average'])
total_clicks = float(doc['production_companies']['production_countries']['vote_count'])
# check for nan's in the numeric values and then enter into the database
valid_data = True
#for valid in np.isnan([lat, lon, humid, press, min_temp, max_temp, temp]):
# if valid is False:
# valid_data = False
# break;
row = (id_movie, movie_name, year, country_origin, category_1, category_2, movie_rating,
avg_rating, total_clicks)
insert_cmd = """INSERT INTO movies
(id_movie, movie_name, year,
country_origin, category_1, category_2,
movie_rating, avg_rating, total_clicks)
VALUES
(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"""
print(insert_cmd,row)
if valid_data is True:
pg_hook.run(insert_cmd, parameters=row)
| 40.157895
| 92
| 0.671035
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 850
| 0.557012
|
6c8601c43b4ff494fe3c99410a606a7250f4d9f9
| 20,189
|
py
|
Python
|
hclf/multiclass.py
|
tfmortie/hclf
|
68bdb61c12c4b8fefbb94f1ac8aa30baed8077c5
|
[
"MIT"
] | null | null | null |
hclf/multiclass.py
|
tfmortie/hclf
|
68bdb61c12c4b8fefbb94f1ac8aa30baed8077c5
|
[
"MIT"
] | null | null | null |
hclf/multiclass.py
|
tfmortie/hclf
|
68bdb61c12c4b8fefbb94f1ac8aa30baed8077c5
|
[
"MIT"
] | null | null | null |
"""
Code for hierarchical multi-class classifiers.
Author: Thomas Mortier
Date: Feb. 2021
TODO:
* Add option for set-valued prediction
* Feature: allow tree structures with non-unique node labels (currently, warning is thrown)
"""
import time
import warnings
import numpy as np
from .utils import HLabelEncoder, PriorityQueue
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.utils import _message_with_time
from sklearn.utils.validation import check_X_y, check_array, check_random_state
from sklearn.exceptions import NotFittedError, FitFailedWarning
from sklearn.metrics import accuracy_score
from joblib import Parallel, delayed, parallel_backend
from collections import ChainMap
class LCPN(BaseEstimator, ClassifierMixin):
"""Local classifier per parent node (LCPN) classifier.
Parameters
----------
estimator : scikit-learn base estimator
Represents the base estimator for the classification task in each node.
sep : str, default=';'
Path separator used for processing the hierarchical labels. If set to None,
a random hierarchy is created and provided flat labels are converted,
accordingly.
k : tuple of int, default=(2,2)
Min and max number of children a node can have in the random generated tree. Is ignored when
sep is not set to None.
n_jobs : int, default=None
The number of jobs to run in parallel. Currently this applies to fit,
and predict.
random_state : RandomState or an int seed, default=None
A random number generator instance to define the state of the
random permutations generator.
verbose : int, default=0
Controls the verbosity: the higher, the more messages
Examples
--------
>>> from hclf.multiclass import LCPN
>>> from sklearn.linear_model import LogisticRegression
>>>
>>> clf = LCPN(LogisticRegression(random_state=0),
>>> sep=";",
>>> n_jobs=4,
>>> random_state=0,
>>> verbose=1)
>>> clf.fit(X, y)
>>> clf.score(X, y)
"""
def __init__(self, estimator, sep=';', k=(2,2), n_jobs=None, random_state=None, verbose=0):
self.estimator = clone(estimator)
self.sep = sep
self.k = k
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.tree = {}
def _add_path(self, path):
current_node = path[0]
add_node = path[1]
# check if add_node is already registred
if add_node not in self.tree:
# check if add_node is terminal
if len(path) > 2:
# register add_node to the tree
self.tree[add_node] = {
"lbl": add_node,
"estimator": None,
"children": [],
"parent": current_node}
# add add_node to current_node's children (if not yet in list of children)
if add_node not in self.tree[current_node]["children"]:
self.tree[current_node]["children"].append(add_node)
# set estimator when num. of children for current_node is higher than 1 and if not yet set
if len(self.tree[current_node]["children"]) > 1 and self.tree[current_node]["estimator"] is None:
self.tree[current_node]["estimator"] = clone(self.estimator)
else:
# check for duplicate node labels
if self.tree[add_node]["parent"] != current_node:
warnings.warn("Duplicate node label {0} detected in hierarchy with parents {1}, {2}!".format(add_node, self.tree[add_node]["parent"], current_node), FitFailedWarning)
# process next couple of nodes in path
if len(path) > 2:
path = path[1:]
self._add_path(path)
def _fit_node(self, node):
# check if node has estimator
if node["estimator"] is not None:
# transform data for node
y_transform = []
sel_ind = []
for i,y in enumerate(self.y_):
if node["lbl"] in y.split(self.sep):
# need to include current label and sample (as long as it's "complete")
y_split = y.split(self.sep)
if y_split.index(node["lbl"]) < len(y_split)-1:
y_transform.append(y_split[y_split.index(node["lbl"])+1])
sel_ind.append(i)
X_transform = self.X_[sel_ind,:]
node["estimator"].fit(X_transform, y_transform)
if self.verbose >= 2:
print("Model {0} fitted!".format(node["lbl"]))
# now make sure that the order of labels correspond to the order of children
node["children"] = node["estimator"].classes_
return {node["lbl"]: node}
def fit(self, X, y):
"""Implementation of the fitting function for the LCPN classifier.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
The class labels
Returns
-------
self : object
Returns self.
"""
self.random_state_ = check_random_state(self.random_state)
# need to make sure that X and y have the correct shape
X, y = check_X_y(X, y, multi_output=False) # multi-output not supported (yet)
# check if n_jobs is integer
if not self.n_jobs is None:
if not isinstance(self.n_jobs, int):
raise TypeError("Parameter n_jobs must be of type int.")
# store number of outputs and complete data seen during fit
self.n_outputs_ = 1
self.X_ = X
self.y_ = y
# store label of root node
self.rlbl = self.y_[0].split(self.sep)[0]
# init tree
self.tree = {self.rlbl: {
"lbl": self.rlbl,
"estimator": None,
"children": [],
"parent": None}}
# check if sep is None or str
if type(self.sep) != str and self.sep is not None:
raise TypeError("Parameter sep must be of type str or None.")
# init and fit the hierarchical model
start_time = time.time()
# first init the tree
try:
if self.sep is None:
# transform labels to labels in some random hierarchy
self.sep = ';'
self.label_encoder_ = HLabelEncoder(k=self.k,random_state=self.random_state_)
self.y_ = self.label_encoder_.fit_transform(self.y_)
else:
self.label_encoder_ = None
for lbl in self.y_:
self._add_path(lbl.split(self.sep))
# now proceed to fitting
with parallel_backend("loky"):
fitted_tree = Parallel(n_jobs=self.n_jobs)(delayed(self._fit_node)(self.tree[node]) for node in self.tree)
self.tree = {k: v for d in fitted_tree for k, v in d.items()}
except NotFittedError as e:
raise NotFittedError("Tree fitting failed! Make sure that the provided data is in the correct format.")
# now store classes (leaf nodes) seen during fit
cls = []
nodes_to_visit = [self.tree[self.rlbl]]
while len(nodes_to_visit) > 0:
curr_node = nodes_to_visit.pop()
for c in curr_node["children"]:
# check if child is leaf node
if c not in self.tree:
cls.append(c)
else:
# add child to nodes_to_visit
nodes_to_visit.append(self.tree[c])
self.classes_ = cls
# make sure that classes_ are in same format of original labels
if self.label_encoder_ is not None:
self.classes_ = self.label_encoder_.inverse_transform(self.classes_)
else:
# construct dict with leaf node lbls -> path mappings
lbl_to_path = {yi.split(self.sep)[-1]: yi for yi in self.y_}
self.classes_ = [lbl_to_path[cls] for cls in self.classes_]
stop_time = time.time()
if self.verbose >= 1:
print(_message_with_time("LCPN", "fitting", stop_time-start_time))
return self
def _predict_nbop(self, i, X):
preds = []
# run over all samples
for x in X:
x = x.reshape(1,-1)
pred = self.rlbl
pred_path = [pred]
while pred in self.tree:
curr_node = self.tree[pred]
# check if we have a node with single path
if curr_node["estimator"] is not None:
pred = curr_node["estimator"].predict(x)[0]
else:
pred = curr_node["children"][0]
pred_path.append(pred)
preds.append(self.sep.join(pred_path))
return {i: preds}
def _predict_bop(self, i, X, scores):
preds = []
# run over all samples
for x in X:
x = x.reshape(1,-1)
nodes_to_visit = PriorityQueue()
nodes_to_visit.push(1.,self.rlbl)
pred = None
while not nodes_to_visit.is_empty():
curr_node_prob, curr_node = nodes_to_visit.pop()
curr_node_lbl = curr_node.split(self.sep)[-1]
curr_node_prob = 1-curr_node_prob
# check if we are at a leaf node
if curr_node_lbl not in self.tree:
pred = curr_node
break
else:
curr_node_v = self.tree[curr_node_lbl]
# check if we have a node with single path
if curr_node_v["estimator"] is not None:
# get probabilities
curr_node_ch_probs = self._predict_proba(curr_node_v["estimator"], x, scores)
# apply chain rule of probability
curr_node_ch_probs = curr_node_ch_probs*curr_node_prob
# add children to queue
for j,c in enumerate(curr_node_v["children"]):
prob_child = curr_node_ch_probs[:,j][0]
nodes_to_visit.push(prob_child, curr_node+self.sep+c)
else:
c = curr_node_v["children"][0]
nodes_to_visit.push(curr_node_prob,curr_node+self.sep+c)
preds.append(pred)
return {i: preds}
def predict(self, X, bop=False):
"""Return class predictions.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input samples.
bop : boolean, default=False
Returns Bayes-optimal solution when set to True. Returns
solution by following the path of maximum probability in each node, otherwise.
Returns
-------
preds : ndarray
Returns an array of predicted class labels.
"""
# check input
X = check_array(X)
scores = False
preds = []
start_time = time.time()
# check whether the base estimator supports probabilities
if not hasattr(self.estimator, 'predict_proba'):
# check whether the base estimator supports class scores
if not hasattr(self.estimator, 'decision_function'):
raise NotFittedError("{0} does not support \
probabilistic predictions nor scores.".format(self.estimator))
else:
scores = True
try:
# now proceed to predicting
with parallel_backend("loky"):
if not bop:
d_preds = Parallel(n_jobs=self.n_jobs)(delayed(self._predict_nbop)(i,X[ind]) for i,ind in enumerate(np.array_split(range(X.shape[0]), self.n_jobs)))
else:
d_preds = Parallel(n_jobs=self.n_jobs)(delayed(self._predict_bop)(i,X[ind],scores) for i,ind in enumerate(np.array_split(range(X.shape[0]), self.n_jobs)))
# collect predictions
preds_dict = dict(ChainMap(*d_preds))
for k in np.sort(list(preds_dict.keys())):
preds.extend(preds_dict[k])
# in case of no predefined hierarchy, backtransform to original labels
if self.label_encoder_ is not None:
preds = self.label_encoder_.inverse_transform([p.split(self.sep)[-1] for p in preds])
except NotFittedError as e:
raise NotFittedError("This model is not fitted yet. Cal 'fit' \
with appropriate arguments before using this \
method.")
stop_time = time.time()
if self.verbose >= 1:
print(_message_with_time("LCPN", "predicting", stop_time-start_time))
return preds
def _predict_proba(self, estimator, X, scores=False):
if not scores:
return estimator.predict_proba(X)
else:
# get scores
scores = estimator.decision_function(X)
scores = np.exp(scores)
# check if we only have one score (ie, when K=2)
if len(scores.shape) == 2:
# softmax evaluation
scores = scores/np.sum(scores,axis=1).reshape(scores.shape[0],1)
else:
# sigmoid evaluation
scores = 1/(1+np.exp(-scores))
scores = scores.reshape(-1,1)
scores = np.hstack([1-scores,scores])
return scores
def predict_proba(self, X):
"""Return probability estimates.
Important: the returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input samples.
avg : boolean, default=True
Return model average when true, and array of probability estimates otherwise.
Returns
-------
probs : ndarray
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in self.classes_.
"""
# check input
X = check_array(X)
scores = False
probs = []
start_time = time.time()
# check whether the base estimator supports probabilities
if not hasattr(self.estimator, 'predict_proba'):
# check whether the base estimator supports class scores
if not hasattr(self.estimator, 'decision_function'):
raise NotFittedError("{0} does not support \
probabilistic predictions nor scores.".format(self.estimator))
else:
scores = True
try:
nodes_to_visit = [(self.tree[self.rlbl], np.ones((X.shape[0],1)))]
while len(nodes_to_visit) > 0:
curr_node, parent_prob = nodes_to_visit.pop()
# check if we have a node with single path
if curr_node["estimator"] is not None:
# get probabilities
curr_node_probs = self._predict_proba(curr_node["estimator"], X, scores)
# apply chain rule of probability
curr_node_probs = curr_node_probs*parent_prob
for i,c in enumerate(curr_node["children"]):
# check if child is leaf node
prob_child = curr_node_probs[:,i].reshape(-1,1)
if c not in self.tree:
probs.append(prob_child)
else:
# add child to nodes_to_visit
nodes_to_visit.append((self.tree[c],prob_child))
else:
c = curr_node["children"][0]
# check if child is leaf node
if c not in self.tree:
probs.append(parent_prob)
else:
# add child to nodes_to_visit
nodes_to_visit.append((self.tree[c],parent_prob))
except NotFittedError as e:
raise NotFittedError("This model is not fitted yet. Cal 'fit' \
with appropriate arguments before using this \
method.")
stop_time = time.time()
if self.verbose >= 1:
print(_message_with_time("LCPN", "predicting probabilities", stop_time-start_time))
return np.hstack(probs)
def score(self, X, y):
"""Return mean accuracy score.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
Returns
-------
score : float
Mean accuracy of self.predict(X) wrt. y.
"""
# check input and outputs
X, y = check_X_y(X, y, multi_output=False)
start_time = time.time()
try:
preds = self.predict(X)
except NotFittedError as e:
raise NotFittedError("This model is not fitted yet. Cal 'fit' \
with appropriate arguments before using this \
method.")
stop_time = time.time()
if self.verbose >= 1:
print(_message_with_time("LCPN", "calculating score", stop_time-start_time))
score = accuracy_score(y, preds)
return score
def score_nodes(self, X, y):
"""Return mean accuracy score for each node in the hierarchy.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
Returns
-------
score_dict : dict
Mean accuracy of self.predict(X) wrt. y for each node in the hierarchy.
"""
# check input and outputs
X, y = check_X_y(X, y, multi_output=False)
start_time = time.time()
score_dict = {}
try:
# transform the flat labels, in case of no predefined hierarchy
if self.label_encoder_ is not None:
y = self.label_encoder_.transform(y)
for node in self.tree:
node = self.tree[node]
# check if node has estimator
if node["estimator"] is not None:
# transform data for node
y_transform = []
sel_ind = []
for i, yi in enumerate(y):
if node["lbl"] in yi.split(self.sep):
# need to include current label and sample (as long as it's "complete")
y_split = yi.split(self.sep)
if y_split.index(node["lbl"]) < len(y_split)-1:
y_transform.append(y_split[y_split.index(node["lbl"])+1])
sel_ind.append(i)
X_transform = X[sel_ind,:]
if len(sel_ind) != 0:
# obtain predictions
node_preds = node["estimator"].predict(X_transform)
acc = accuracy_score(y_transform, node_preds)
score_dict[node["lbl"]] = acc
except NotFittedError as e:
raise NotFittedError("This model is not fitted yet. Cal 'fit' \
with appropriate arguments before using this \
method.")
stop_time = time.time()
if self.verbose >= 1:
print(_message_with_time("LCPN", "calculating node scores", stop_time-start_time))
return score_dict
| 42.864119
| 182
| 0.553668
| 19,464
| 0.964089
| 0
| 0
| 0
| 0
| 0
| 0
| 7,803
| 0.386498
|
6c86a24e42a439643a1c92f29bdfc4a1de454d48
| 964
|
py
|
Python
|
tests/conftest.py
|
jwizzle/nerdchess
|
045726326abc3ff94af30bda0c66beff1ca52978
|
[
"WTFPL"
] | null | null | null |
tests/conftest.py
|
jwizzle/nerdchess
|
045726326abc3ff94af30bda0c66beff1ca52978
|
[
"WTFPL"
] | null | null | null |
tests/conftest.py
|
jwizzle/nerdchess
|
045726326abc3ff94af30bda0c66beff1ca52978
|
[
"WTFPL"
] | null | null | null |
"""Fixtures for pytest."""
import pytest
from nerdchess.board import Board
from nerdchess import pieces
@pytest.fixture
def board_fixt():
"""Wrap the boardfixt class as a pytest fixture."""
return BoardFixt(Board())
class BoardFixt():
"""Helper functions to manipulate a board passed as fixture."""
def __init__(self, board):
"""Init."""
self.board = board
def place_piece(self, piece, position):
"""Place a piece or pawn on the board."""
letter = position[0]
number = int(position[1])
self.board.squares[letter][number].occupant = piece
piece.position = position
def default_setup(self):
"""Set the board in default game start position.
Returns:
board: The new board object
"""
boardpieces = pieces.create_pieces()
pawns = pieces.create_pawns()
self.board.setup_board(boardpieces, pawns)
return self.board
| 25.368421
| 67
| 0.631743
| 734
| 0.761411
| 0
| 0
| 119
| 0.123444
| 0
| 0
| 310
| 0.321577
|
6c86a8871548627b9a0755d57d564bc3d174dbdd
| 2,649
|
py
|
Python
|
imports/language_check.py
|
ahmed-amr1/schtabtag
|
d5f1e550fccaf58cbcf9fac39528b921659cec7c
|
[
"MIT"
] | null | null | null |
imports/language_check.py
|
ahmed-amr1/schtabtag
|
d5f1e550fccaf58cbcf9fac39528b921659cec7c
|
[
"MIT"
] | null | null | null |
imports/language_check.py
|
ahmed-amr1/schtabtag
|
d5f1e550fccaf58cbcf9fac39528b921659cec7c
|
[
"MIT"
] | null | null | null |
def Check(src):
lang = None
if src == "auto":
lang = "Auto detect language"
if src == "en":
lang = "English - English"
if src == "de":
lang = "German - Deutsch"
if src == "ar":
lang = "Arabic - عربي"
if src == "es":
lang = "Spanish - español, castellano"
if src == "ru":
lang = "Russian - русский"
if src == "pl":
lang = "Polish - Polski"
if src == "it":
lang = "Italian - Italiano"
if src == "ja":
lang = "Japanese - 日本語"
if src == "ga":
lang = "Irish - Gaeilge"
if src == "hi":
lang = "Hindi - हिन्दी, हिंदी"
if src == "he":
lang = "Hebrew - עברית"
if src == "fr":
lang = "French - Français"
if src == "nl":
lang = "Dutch - Nederlands"
if src == "cs":
lang = "Czech - česky, čeština"
if src == "da":
lang = "Danish - Dansk"
if src == "zh":
lang = "Chinese - 中文, Zhōngwén"
if src == "fa":
lang = "Persian - فارسی"
return lang
"""
if src == "auto":
src = "Auto detect language"
if src == "en":
src = "English - English"
if src == "de":
src = "German - Deutsch"
if src == "ar":
src = "Arabic - عربي"
if src == "es":
src = "Spanish - español, castellano"
if src == "ru":
src = "Russian - русский"
if src == "pl":
src = "Polish - Polski"
if src == "it":
src = "Italian - Italiano"
if src == "ja":
src = "Japanese - 日本語"
if src == "ga":
src = "Irish - Gaeilge"
if src == "hi":
src = "Hindi - हिन्दी, हिंदी"
if src == "he":
src = "Hebrew - עברית"
if src == "fr":
src = "French - Français"
if src == "nl":
src = "Dutch - Nederlands"
if src == "cs":
src = "Czech - česky, čeština"
if src == "da":
src = "Danish - Dansk"
if src == "zh":
src = "Chinese - 中文, Zhōngwén"
if src == "fa":
src = "Persian - فارسی"
if dst == "en":
dst = "English - English"
if dst == "de":
dst = "German - Deutsch"
if dst == "ar":
dst = "Arabic - عربي"
if dst == "es":
dst = "Spanish - español, castellano"
if dst == "ru":
dst = "Russian - русский"
if dst == "pl":
dst = "Polish - Polski"
if dst == "it":
dst = "Italian - Italiano"
if dst == "ja":
dst = "Japanese - 日本語"
if dst == "ga":
dst = "Irish - Gaeilge"
if dst == "hi":
dst = "Hindi - हिन्दी, हिंदी"
if dst == "he":
dst = "Hebrew - עברית"
if dst == "fr":
dst = "French - Français"
if dst == "nl":
dst = "Dutch - Nederlands"
if dst == "cs":
dst = "Czech - česky, čeština"
if dst == "da":
dst = "Danish - Dansk"
if dst == "zh":
dst = "Chinese - 中文, Zhōngwén"
if dst == "fa":
dst = "Persian - فارسی"
"""
| 23.442478
| 42
| 0.495659
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,315
| 0.81831
|
6c86adac816e4b256e05f833e885292823f8146c
| 1,003
|
py
|
Python
|
puppo/decorator_functions/display_decorators.py
|
JHowell45/Pupper
|
5c863eba8651a5b1130c04321cc6cefacb71c7b2
|
[
"MIT"
] | null | null | null |
puppo/decorator_functions/display_decorators.py
|
JHowell45/Pupper
|
5c863eba8651a5b1130c04321cc6cefacb71c7b2
|
[
"MIT"
] | 1
|
2021-06-01T21:54:15.000Z
|
2021-06-01T21:54:15.000Z
|
puppo/decorator_functions/display_decorators.py
|
JHowell45/Pupper
|
5c863eba8651a5b1130c04321cc6cefacb71c7b2
|
[
"MIT"
] | null | null | null |
"""Decorator unctions for displaying commands."""
from functools import wraps
from shutil import get_terminal_size
import click
def command_handler(command_title, colour='green'):
"""Use this decorator for surrounding the functions with banners."""
def decorator(function):
"""Nested decorator function."""
terminal_width = int(get_terminal_size()[0])
title = ' {} '.format(command_title)
banner_length = int((terminal_width - len(title)) / 2)
banner = '-' * banner_length
command_banner = '|{0}{1}{0}|'.format(
banner, title.title())
lower_banner = '|{}|'.format('-' * int(len(command_banner) - 2))
@wraps(function)
def wrapper(*args, **kwargs):
"""Nested wrapper function."""
click.secho(command_banner, fg=colour)
result = function(*args, **kwargs)
click.secho(lower_banner, fg=colour)
return result
return wrapper
return decorator
| 34.586207
| 72
| 0.617149
| 0
| 0
| 0
| 0
| 270
| 0.269192
| 0
| 0
| 217
| 0.216351
|
6c87ac082f2ea2bf7c87cad18eaf0cdd7451709c
| 869
|
py
|
Python
|
opennem/api/schema.py
|
paulculmsee/opennem
|
9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1
|
[
"MIT"
] | 22
|
2020-06-30T05:27:21.000Z
|
2022-02-21T12:13:51.000Z
|
opennem/api/schema.py
|
paulculmsee/opennem
|
9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1
|
[
"MIT"
] | 71
|
2020-08-07T13:06:30.000Z
|
2022-03-15T06:44:49.000Z
|
opennem/api/schema.py
|
paulculmsee/opennem
|
9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1
|
[
"MIT"
] | 13
|
2020-06-30T03:28:32.000Z
|
2021-12-30T08:17:16.000Z
|
from typing import List, Optional
from pydantic import BaseModel, Field
class ApiBase(BaseModel):
class Config:
orm_mode = True
anystr_strip_whitespace = True
use_enum_values = True
arbitrary_types_allowed = True
validate_assignment = True
class UpdateResponse(BaseModel):
success: bool = True
records: List = []
class FueltechResponse(ApiBase):
success: bool = True
# @TODO fix circular references
# records: List[FueltechSchema]
class APINetworkRegion(ApiBase):
code: str
timezone: Optional[str]
class APINetworkSchema(ApiBase):
code: str
country: str
label: str
regions: Optional[List[APINetworkRegion]]
timezone: Optional[str] = Field(None, description="Network timezone")
interval_size: int = Field(..., description="Size of network interval in minutes")
| 21.725
| 86
| 0.696203
| 781
| 0.898734
| 0
| 0
| 0
| 0
| 0
| 0
| 117
| 0.134638
|
6c88a8da20ae18c022b5a983db40aed8a4ffb346
| 304
|
py
|
Python
|
test-examples/issue_678_reproduce.py
|
tlambert03/image-demos
|
a2974bcc7f040fd4d14e659c4cbfeabcf726c707
|
[
"BSD-3-Clause"
] | null | null | null |
test-examples/issue_678_reproduce.py
|
tlambert03/image-demos
|
a2974bcc7f040fd4d14e659c4cbfeabcf726c707
|
[
"BSD-3-Clause"
] | null | null | null |
test-examples/issue_678_reproduce.py
|
tlambert03/image-demos
|
a2974bcc7f040fd4d14e659c4cbfeabcf726c707
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Test adding 4D followed by 5D image layers to the viewer
Intially only 2 sliders should be present, then a third slider should be
created.
"""
import numpy as np
from skimage import data
import napari
with napari.gui_qt():
viewer = napari.view_image(np.random.random((2, 10, 50, 100, 100)))
| 19
| 72
| 0.733553
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 147
| 0.483553
|
6c8a1fd6e1a402d55f7841fa1d528a488bdf0b86
| 49,401
|
py
|
Python
|
tests/subsystem_tests.py
|
Goodpaster/QSoME
|
8b24d58dfab5ac0d90fd84b8519b25864eee6f74
|
[
"Apache-2.0"
] | 7
|
2018-09-28T21:40:08.000Z
|
2021-06-10T10:44:39.000Z
|
tests/subsystem_tests.py
|
Goodpaster/QSoME
|
8b24d58dfab5ac0d90fd84b8519b25864eee6f74
|
[
"Apache-2.0"
] | 1
|
2021-07-06T12:28:32.000Z
|
2021-07-29T20:34:13.000Z
|
tests/subsystem_tests.py
|
Goodpaster/QSoME
|
8b24d58dfab5ac0d90fd84b8519b25864eee6f74
|
[
"Apache-2.0"
] | 1
|
2021-04-08T12:28:44.000Z
|
2021-04-08T12:28:44.000Z
|
# A module to tests the methods of the Subsystem
import unittest
import os
import shutil
import re
from copy import copy
from qsome import cluster_subsystem, cluster_supersystem
from pyscf import gto, lib, scf, dft, cc, mp, mcscf, tools
from pyscf.cc import ccsd_t, uccsd_t
import numpy as np
import tempfile
class TestEnvSubsystemMethods(unittest.TestCase):
def setUp(self):
mol = gto.Mole()
mol.verbose = 3
mol.atom = '''
O 0.0 0.0 0.0
H 0.758602 0.00 0.504284
H 0.758602 0.00 -0.504284'''
mol.basis = '3-21g'
mol.build()
self.cs_mol = mol
os_mol = gto.Mole()
os_mol.verbose = 3
os_mol.atom = '''
Li 0.0 0.0 0.0
'''
os_mol.basis = '3-21g'
os_mol.spin = 1
os_mol.build()
self.os_mol = os_mol
self.env_method = 'lda'
#@unittest.skip
def test_init_density(self):
subsys = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol, self.env_method)
subsys.init_density()
init_dmat = scf.get_init_guess(self.cs_mol)
self.assertTrue(np.allclose(init_dmat, subsys.get_dmat()))
subsys = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol, self.env_method, init_guess='atom')
subsys.init_density()
init_dmat = scf.get_init_guess(self.cs_mol, key='atom')
self.assertTrue(np.allclose(init_dmat, subsys.get_dmat()))
subsys = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol, self.env_method, init_guess='supmol')
subsys.init_density()
init_dmat = scf.get_init_guess(self.cs_mol)
self.assertTrue(np.allclose(init_dmat, subsys.get_dmat()))
subsys = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol, self.env_method, init_guess='submol')
subsys.init_density()
scf_obj = subsys.env_scf
scf_obj.kernel()
init_dmat = scf_obj.make_rdm1()
self.assertTrue(np.allclose(init_dmat, subsys.get_dmat()))
#Test Unrestricted Open Shell.
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method, unrestricted=True)
subsys.init_density()
init_dmat = scf.uhf.get_init_guess(self.os_mol)
self.assertTrue(np.allclose(init_dmat, subsys.get_dmat()))
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method, init_guess='submol', unrestricted=True)
subsys.init_density()
scf_obj = subsys.env_scf
scf_obj.kernel()
init_dmat = scf_obj.make_rdm1()
self.assertTrue(np.allclose(init_dmat, subsys.get_dmat()))
#Test Restricted Open Shell.
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method)
subsys.init_density()
init_dmat = scf.rhf.get_init_guess(self.os_mol)
init_dmat = [init_dmat/2., init_dmat/2.]
self.assertTrue(np.allclose(init_dmat, subsys.get_dmat()))
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method, init_guess='submol')
subsys.init_density()
scf_obj = subsys.env_scf
scf_obj.kernel()
init_dmat = scf_obj.make_rdm1()
self.assertTrue(np.allclose(init_dmat, subsys.get_dmat()))
#@unittest.skip
def test_update_emb_pot(self):
#Closed Shell
subsys = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol, self.env_method)
subsys.init_density()
subsys.update_subsys_fock()
subsys.emb_fock[0] = subsys.subsys_fock[0]
subsys.emb_fock[1] = subsys.subsys_fock[1]
dim0 = subsys.emb_fock[0].shape[0]
dim1 = subsys.emb_fock[1].shape[1]
emb_fock = np.array([np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)])
subsys.emb_fock = emb_fock
subsys.update_emb_pot()
true_emb_pot = [emb_fock[0] - subsys.subsys_fock[0],
emb_fock[1] - subsys.subsys_fock[1]]
self.assertTrue(np.array_equal(true_emb_pot, subsys.emb_pot))
#Unrestricted
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method, unrestricted=True)
subsys.init_density()
subsys.update_subsys_fock()
subsys.emb_fock[0] = subsys.subsys_fock[0]
subsys.emb_fock[1] = subsys.subsys_fock[1]
dim0 = subsys.emb_fock[0].shape[0]
dim1 = subsys.emb_fock[1].shape[1]
emb_fock = np.array([np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)])
subsys.emb_fock = emb_fock
subsys.update_emb_pot()
true_emb_pot = [emb_fock[0] - subsys.subsys_fock[0],
emb_fock[1] - subsys.subsys_fock[1]]
self.assertTrue(np.array_equal(true_emb_pot, subsys.emb_pot))
#Restricted Open Shell
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method)
subsys.init_density()
subsys.update_subsys_fock()
subsys.emb_fock[0] = subsys.subsys_fock[0]
subsys.emb_fock[1] = subsys.subsys_fock[1]
dim0 = subsys.emb_fock[0].shape[0]
dim1 = subsys.emb_fock[1].shape[1]
emb_fock = np.array([np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)])
subsys.emb_fock = emb_fock
subsys.update_emb_pot()
true_emb_pot = [emb_fock[0] - subsys.subsys_fock[0],
emb_fock[1] - subsys.subsys_fock[1]]
self.assertTrue(np.array_equal(true_emb_pot, subsys.emb_pot))
#@unittest.skip
def test_env_proj_e(self):
subsys = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol, self.env_method)
subsys.init_density()
sub_dmat = subsys.get_dmat()
# With 0 potential.
no_proj_e = subsys.get_env_proj_e()
self.assertEqual(no_proj_e, 0.0)
# With potential
dim0 = subsys.emb_pot[0].shape[0]
dim1 = subsys.emb_pot[1].shape[1]
proj_potent = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
test_proj_e = np.einsum('ij,ji', proj_potent[0],
(sub_dmat/2.)).real
test_proj_e += np.einsum('ij,ji', proj_potent[1],
(sub_dmat/2.)).real
subsys.proj_pot = proj_potent
proj_e = subsys.get_env_proj_e()
self.assertEqual(test_proj_e, proj_e)
# Unrestricted Open Shell
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method, unrestricted=True)
subsys.init_density()
sub_dmat = subsys.get_dmat()
# With 0 potential.
no_proj_e = subsys.get_env_proj_e()
self.assertEqual(no_proj_e, 0.0)
# With potential
dim0 = subsys.emb_pot[0].shape[0]
dim1 = subsys.emb_pot[1].shape[1]
proj_potent = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
test_proj_e = np.einsum('ij,ji', proj_potent[0],
sub_dmat[0]).real
test_proj_e += np.einsum('ij,ji', proj_potent[1],
sub_dmat[1]).real
subsys.proj_pot = proj_potent
proj_e = subsys.get_env_proj_e()
self.assertEqual(test_proj_e, proj_e)
# Restricted Open Shell
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method)
subsys.init_density()
sub_dmat = subsys.get_dmat()
# With 0 potential.
no_proj_e = subsys.get_env_proj_e()
self.assertEqual(no_proj_e, 0.0)
# With potential
dim0 = subsys.emb_pot[0].shape[0]
dim1 = subsys.emb_pot[1].shape[1]
proj_potent = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
test_proj_e = np.einsum('ij,ji', proj_potent[0],
sub_dmat[0]).real
test_proj_e += np.einsum('ij,ji', proj_potent[1],
sub_dmat[1]).real
subsys.proj_pot = proj_potent
proj_e = subsys.get_env_proj_e()
self.assertEqual(test_proj_e, proj_e)
#@unittest.skip
def test_env_embed_e(self):
# Closed Shell
subsys = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol, self.env_method)
subsys.init_density()
sub_dmat = subsys.get_dmat()
# With 0 potential.
no_embed_e = subsys.get_env_emb_e()
self.assertEqual(no_embed_e, 0.0)
# With potential
dim0 = subsys.emb_pot[0].shape[0]
dim1 = subsys.emb_pot[1].shape[1]
emb_fock = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
emb_pot = [emb_fock[0] - subsys.subsys_fock[0],
emb_fock[1] - subsys.subsys_fock[1]]
true_emb_e = np.einsum('ij,ji', emb_pot[0],
(sub_dmat/2.)).real
true_emb_e += np.einsum('ij,ji', emb_pot[1],
(sub_dmat/2.)).real
subsys.emb_fock = emb_fock
emb_e = subsys.get_env_emb_e()
self.assertEqual(true_emb_e, emb_e)
# Unrestricted Open Shell
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method, unrestricted=True)
subsys.init_density()
sub_dmat = subsys.get_dmat()
# With 0 potential.
no_embed_e = subsys.get_env_emb_e()
self.assertEqual(no_embed_e, 0.0)
# With potential
dim0 = subsys.emb_pot[0].shape[0]
dim1 = subsys.emb_pot[1].shape[1]
emb_fock = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
emb_pot = emb_fock - subsys.subsys_fock
true_emb_e = np.einsum('ij,ji', emb_pot[0],
sub_dmat[0]).real
true_emb_e += np.einsum('ij,ji', emb_pot[1],
sub_dmat[1]).real
subsys.emb_fock = emb_fock
embed_e = subsys.get_env_emb_e()
self.assertEqual(true_emb_e, embed_e)
# Restricted Open Shell
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method)
subsys.init_density()
sub_dmat = subsys.get_dmat()
# With 0 potential.
no_embed_e = subsys.get_env_emb_e()
self.assertEqual(no_embed_e, 0.0)
# With potential
dim0 = subsys.emb_pot[0].shape[0]
dim1 = subsys.emb_pot[1].shape[1]
emb_fock = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
emb_pot = [emb_fock[0] - subsys.subsys_fock[0],
emb_fock[1] - subsys.subsys_fock[1]]
true_emb_e = np.einsum('ij,ji', emb_pot[0],
sub_dmat[0]).real
true_emb_e += np.einsum('ij,ji', emb_pot[1],
sub_dmat[1]).real
subsys.emb_fock = emb_fock
embed_e = subsys.get_env_emb_e()
self.assertEqual(true_emb_e, embed_e)
#@unittest.skip
def test_get_env_elec_energy(self):
# Closed Shell
subsys = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol, self.env_method)
subsys.init_density()
# Default test
def_elec_e = subsys.get_env_elec_energy()
sub_dmat = subsys.get_dmat()
test_scf = dft.RKS(self.cs_mol)
test_scf.xc = self.env_method
test_elec_e = test_scf.energy_elec(dm=sub_dmat)
self.assertAlmostEqual(test_elec_e[0], def_elec_e, delta=1e-10)
# With just embedding potential
dim0 = subsys.emb_pot[0].shape[0]
dim1 = subsys.emb_pot[1].shape[1]
emb_fock = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
emb_pot = [emb_fock[0] - subsys.subsys_fock[0],
emb_fock[1] - subsys.subsys_fock[1]]
test_embed_e = np.einsum('ij,ji', (emb_pot[0] + emb_pot[1])/2.,
(sub_dmat)).real
def_elec_e_embed = subsys.get_env_elec_energy(emb_pot=emb_pot)
def_emb_e = def_elec_e_embed - def_elec_e
self.assertAlmostEqual(test_embed_e, def_emb_e, delta=1e-10)
# With just projection potential
proj_potent = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
test_proj_e = np.einsum('ij,ji', (proj_potent[0] + proj_potent[1])/2.,
(sub_dmat)).real
def_elec_e_proj = subsys.get_env_elec_energy(proj_pot=proj_potent)
def_proj_e = def_elec_e_proj - def_elec_e
self.assertAlmostEqual(test_proj_e, def_proj_e, delta=1e-10)
# With both.
emb_fock = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
emb_pot = [emb_fock[0] - subsys.subsys_fock[0],
emb_fock[1] - subsys.subsys_fock[1]]
proj_potent = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
test_proj_e = np.einsum('ij,ji', (proj_potent[0] + proj_potent[1])/2.,
(sub_dmat)).real
test_embed_e = np.einsum('ij,ji', (emb_pot[0] + emb_pot[1])/2.,
(sub_dmat)).real
def_elec_e_tot = subsys.get_env_elec_energy(emb_pot=emb_pot, proj_pot=proj_potent)
def_proj_e = def_elec_e_tot - def_elec_e
self.assertAlmostEqual(test_proj_e + test_embed_e, def_proj_e, delta=1e-10)
# Unrestricted Open Shell
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method, unrestricted=True)
subsys.init_density()
# Default test
def_elec_e = subsys.get_env_elec_energy()
sub_dmat = subsys.env_dmat
test_scf = dft.UKS(self.os_mol)
test_scf.xc = self.env_method
test_elec_e = test_scf.energy_elec(dm=sub_dmat)
self.assertAlmostEqual(test_elec_e[0], def_elec_e, delta=1e-10)
# With just embedding potential
dim0 = subsys.emb_pot[0].shape[0]
dim1 = subsys.emb_pot[1].shape[1]
emb_fock = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
emb_pot = emb_fock - subsys.subsys_fock
test_embed_e = np.einsum('ij,ji', emb_pot[0],
sub_dmat[0]).real
test_embed_e += np.einsum('ij,ji', emb_pot[1],
sub_dmat[1]).real
def_elec_e_embed = subsys.get_env_elec_energy(emb_pot=emb_pot)
def_emb_e = def_elec_e_embed - def_elec_e
self.assertAlmostEqual(test_embed_e, def_emb_e, delta=1e-10)
# With just projection potential
proj_potent = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
test_proj_e = np.einsum('ij,ji', proj_potent[0],
sub_dmat[0]).real
test_proj_e += np.einsum('ij,ji', proj_potent[1],
sub_dmat[1]).real
def_elec_e_proj = subsys.get_env_elec_energy(proj_pot=proj_potent)
def_proj_e = def_elec_e_proj - def_elec_e
self.assertAlmostEqual(test_proj_e, def_proj_e, delta=1e-10)
# With both.
emb_fock = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
emb_pot = emb_fock - subsys.subsys_fock
proj_potent = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
test_proj_e = np.einsum('ij,ji', proj_potent[0],
sub_dmat[0]).real
test_proj_e += np.einsum('ij,ji', proj_potent[1],
sub_dmat[1]).real
test_embed_e = np.einsum('ij,ji', emb_pot[0],
sub_dmat[0]).real
test_embed_e += np.einsum('ij,ji', emb_pot[1],
sub_dmat[1]).real
def_elec_e_tot = subsys.get_env_elec_energy(emb_pot=emb_pot, proj_pot=proj_potent)
def_proj_emb_e = def_elec_e_tot - def_elec_e
self.assertAlmostEqual(test_proj_e + test_embed_e, def_proj_emb_e, delta=1e-10)
# Restricted Open Shell
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method)
subsys.init_density()
# Default test
def_elec_e = subsys.get_env_elec_energy()
sub_dmat = subsys.env_dmat
test_scf = dft.ROKS(self.os_mol)
test_scf.xc = self.env_method
test_elec_e = test_scf.energy_elec(dm=sub_dmat)
self.assertAlmostEqual(test_elec_e[0], def_elec_e, delta=1e-10)
# With just embedding potential
dim0 = subsys.emb_pot[0].shape[0]
dim1 = subsys.emb_pot[1].shape[1]
emb_fock = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
emb_pot = [emb_fock[0] - subsys.subsys_fock[0],
emb_fock[1] - subsys.subsys_fock[1]]
test_embed_e = np.einsum('ij,ji', emb_pot[0],
sub_dmat[0]).real
test_embed_e += np.einsum('ij,ji', emb_pot[1],
sub_dmat[1]).real
def_elec_e_embed = subsys.get_env_elec_energy(emb_pot=emb_pot)
def_emb_e = def_elec_e_embed - def_elec_e
self.assertAlmostEqual(test_embed_e, def_emb_e, delta=1e-10)
# With just projeciton potential
proj_potent = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
test_proj_e = np.einsum('ij,ji', proj_potent[0],
sub_dmat[0]).real
test_proj_e += np.einsum('ij,ji', proj_potent[1],
sub_dmat[1]).real
def_elec_e_proj = subsys.get_env_elec_energy(proj_pot=proj_potent)
def_proj_e = def_elec_e_proj - def_elec_e
self.assertAlmostEqual(test_proj_e, def_proj_e, delta=1e-10)
# With both.
emb_fock = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
emb_pot = [emb_fock[0] - subsys.subsys_fock[0],
emb_fock[1] - subsys.subsys_fock[1]]
proj_potent = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
test_proj_e = np.einsum('ij,ji', proj_potent[0],
sub_dmat[0]).real
test_proj_e += np.einsum('ij,ji', proj_potent[1],
sub_dmat[1]).real
test_embed_e = np.einsum('ij,ji', emb_pot[0],
sub_dmat[0]).real
test_embed_e += np.einsum('ij,ji', emb_pot[1],
sub_dmat[1]).real
def_elec_e_tot = subsys.get_env_elec_energy(emb_pot=emb_pot, proj_pot=proj_potent)
def_proj_emb_e = def_elec_e_tot - def_elec_e
self.assertAlmostEqual(test_proj_e + test_embed_e, def_proj_emb_e, delta=1e-10)
#@unittest.skip
def test_get_env_energy(self):
subsys = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol, self.env_method)
subsys.init_density()
sub_dmat = subsys.get_dmat()
dim0 = subsys.emb_pot[0].shape[0]
dim1 = subsys.emb_pot[1].shape[1]
emb_fock = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
emb_pot = [emb_fock[0] - subsys.subsys_fock[0],
emb_fock[1] - subsys.subsys_fock[1]]
proj_potent = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
true_proj_e = np.einsum('ij,ji', (proj_potent[0] + proj_potent[1])/2.,
(sub_dmat)).real
true_embed_e = np.einsum('ij,ji', (emb_pot[0] + emb_pot[1])/2.,
(sub_dmat)).real
true_scf = dft.RKS(self.cs_mol)
true_scf.xc = self.env_method
true_subsys_e = true_scf.energy_tot(dm=sub_dmat)
subsys_e_tot = subsys.get_env_energy(emb_pot=emb_pot, proj_pot=proj_potent)
true_e_tot = true_subsys_e + true_proj_e + true_embed_e
self.assertAlmostEqual(true_e_tot, subsys_e_tot, delta=1e-10)
#Unrestricted
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method, unrestricted=True)
subsys.init_density()
sub_dmat = subsys.get_dmat()
dim0 = subsys.emb_pot[0].shape[0]
dim1 = subsys.emb_pot[1].shape[1]
emb_fock = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
emb_pot = [emb_fock[0] - subsys.subsys_fock[0],
emb_fock[1] - subsys.subsys_fock[1]]
proj_potent = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
true_proj_e = np.einsum('ij,ji', proj_potent[0],
sub_dmat[0].real)
true_proj_e += np.einsum('ij,ji', proj_potent[1],
sub_dmat[1].real)
true_embed_e = np.einsum('ij,ji', emb_pot[0],
(sub_dmat[0]).real)
true_embed_e += np.einsum('ij,ji', emb_pot[1],
(sub_dmat[1]).real)
true_scf = dft.UKS(self.os_mol)
true_scf.xc = self.env_method
true_subsys_e = true_scf.energy_tot(dm=sub_dmat)
subsys_e_tot = subsys.get_env_energy(emb_pot=emb_pot, proj_pot=proj_potent)
true_e_tot = true_subsys_e + true_proj_e + true_embed_e
self.assertAlmostEqual(true_e_tot, subsys_e_tot, delta=1e-10)
#Restricted Open Shell
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method)
subsys.init_density()
sub_dmat = subsys.get_dmat()
dim0 = subsys.emb_pot[0].shape[0]
dim1 = subsys.emb_pot[1].shape[1]
emb_fock = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
emb_pot = [emb_fock[0] - subsys.subsys_fock[0],
emb_fock[1] - subsys.subsys_fock[1]]
proj_potent = [np.random.rand(dim0, dim1), np.random.rand(dim0, dim1)]
true_proj_e = np.einsum('ij,ji', proj_potent[0],
sub_dmat[0].real)
true_proj_e += np.einsum('ij,ji', proj_potent[1],
sub_dmat[1].real)
true_embed_e = np.einsum('ij,ji', emb_pot[0],
(sub_dmat[0]).real)
true_embed_e += np.einsum('ij,ji', emb_pot[1],
(sub_dmat[1]).real)
true_scf = dft.ROKS(self.os_mol)
true_scf.xc = self.env_method
true_subsys_e = true_scf.energy_tot(dm=sub_dmat)
subsys_e_tot = subsys.get_env_energy(emb_pot=emb_pot, proj_pot=proj_potent)
true_e_tot = true_subsys_e + true_proj_e + true_embed_e
self.assertAlmostEqual(true_e_tot, subsys_e_tot, delta=1e-10)
#@unittest.skip
def test_save_orbs(self):
import tempfile
from pyscf.tools import molden
t_file = tempfile.NamedTemporaryFile()
subsys = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol, self.env_method, filename=t_file.name)
subsys.init_density()
subsys.chkfile_index = '0'
subsys.diagonalize()
sub_mo_coeff = subsys.env_mo_coeff
sub_mo_energy = subsys.env_mo_energy
sub_mo_occ = subsys.env_mo_occ
chkfile_index = subsys.chkfile_index
subsys.save_orbital_file()
true_ftmp = tempfile.NamedTemporaryFile()
molden.from_mo(self.cs_mol, true_ftmp.name, sub_mo_coeff[0], ene=sub_mo_energy[0], occ=(sub_mo_occ[0] * 2.))
with open(t_file.name + '_' + chkfile_index + '_subenv.molden', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data, true_den_data)
#Unrestricted open shell
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method, unrestricted=True, filename=t_file.name)
subsys.init_density()
subsys.chkfile_index = '0'
subsys.diagonalize()
sub_mo_coeff = subsys.env_mo_coeff
sub_mo_energy = subsys.env_mo_energy
sub_mo_occ = subsys.env_mo_occ
chkfile_index = subsys.chkfile_index
subsys.save_orbital_file()
true_ftmp = tempfile.NamedTemporaryFile()
molden.from_mo(self.os_mol, true_ftmp.name, sub_mo_coeff[0], spin='Alpha', ene=sub_mo_energy[0], occ=sub_mo_occ[0])
with open(t_file.name + '_' + chkfile_index + '_subenv_alpha.molden', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data, true_den_data)
true_ftmp = tempfile.NamedTemporaryFile()
molden.from_mo(self.os_mol, true_ftmp.name, sub_mo_coeff[1], spin='Beta', ene=sub_mo_energy[1], occ=sub_mo_occ[1])
with open(t_file.name + '_' + chkfile_index + '_subenv_beta.molden', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data, true_den_data)
#Restricted Open Shell
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method, filename=t_file.name)
subsys.init_density()
subsys.chkfile_index = '0'
subsys.diagonalize()
sub_mo_coeff = subsys.env_mo_coeff
sub_mo_energy = subsys.env_mo_energy
sub_mo_occ = subsys.env_mo_occ
chkfile_index = subsys.chkfile_index
subsys.save_orbital_file()
true_ftmp = tempfile.NamedTemporaryFile()
molden.from_mo(self.os_mol, true_ftmp.name, sub_mo_coeff[0], ene=sub_mo_energy[0], occ=(sub_mo_occ[0] + sub_mo_occ[1]))
with open(t_file.name + '_' + chkfile_index + '_subenv.molden', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data, true_den_data)
#@unittest.skip
def test_save_density(self):
import tempfile
from pyscf.tools import cubegen
t_file = tempfile.NamedTemporaryFile()
subsys = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol, self.env_method, filename=t_file.name)
subsys.init_density()
subsys.chkfile_index = '0'
subsys.diagonalize()
subsys.save_density_file()
sub_dmat = subsys.get_dmat()
true_ftmp = tempfile.NamedTemporaryFile()
cubegen.density(self.cs_mol, true_ftmp.name, sub_dmat)
with open(t_file.name + '_' + subsys.chkfile_index + '_subenv.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
#Unrestricted open shell
t_file = tempfile.NamedTemporaryFile()
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method, filename=t_file.name, unrestricted=True)
subsys.init_density()
subsys.chkfile_index = '0'
subsys.diagonalize()
subsys.save_density_file()
sub_dmat = subsys.get_dmat()
true_ftmp = tempfile.NamedTemporaryFile()
cubegen.density(self.os_mol, true_ftmp.name, sub_dmat[0])
with open(t_file.name + '_' + subsys.chkfile_index + '_subenv_alpha.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
true_ftmp = tempfile.NamedTemporaryFile()
cubegen.density(self.os_mol, true_ftmp.name, sub_dmat[1])
with open(t_file.name + '_' + subsys.chkfile_index + '_subenv_beta.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
#Restricted open shell
t_file = tempfile.NamedTemporaryFile()
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method, filename=t_file.name)
subsys.init_density()
subsys.chkfile_index = '0'
subsys.diagonalize()
subsys.save_density_file()
sub_dmat = subsys.get_dmat()
true_ftmp = tempfile.NamedTemporaryFile()
cubegen.density(self.os_mol, true_ftmp.name, sub_dmat[0])
with open(t_file.name + '_' + subsys.chkfile_index + '_subenv_alpha.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
true_ftmp = tempfile.NamedTemporaryFile()
cubegen.density(self.os_mol, true_ftmp.name, sub_dmat[1])
with open(t_file.name + '_' + subsys.chkfile_index + '_subenv_beta.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
#@unittest.skip
def test_save_spin_density(self):
import tempfile
from pyscf.tools import cubegen
#Unrestricted Open Shell
t_file = tempfile.NamedTemporaryFile()
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method, filename=t_file.name, unrestricted=True)
subsys.init_density()
subsys.chkfile_index = '0'
subsys.diagonalize()
subsys.save_spin_density_file()
sub_dmat = subsys.get_dmat()
true_ftmp = tempfile.NamedTemporaryFile()
cubegen.density(self.os_mol, true_ftmp.name, np.subtract(sub_dmat[0],sub_dmat[1]))
with open(t_file.name + '_' + subsys.chkfile_index + '_subenv_spinden.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
#Restricted Open Shell
t_file = tempfile.NamedTemporaryFile()
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method, filename=t_file.name)
subsys.init_density()
subsys.chkfile_index = '0'
subsys.diagonalize()
subsys.save_spin_density_file()
sub_dmat = subsys.get_dmat()
true_ftmp = tempfile.NamedTemporaryFile()
cubegen.density(self.os_mol, true_ftmp.name, np.subtract(sub_dmat[0],sub_dmat[1]))
with open(t_file.name + '_' + subsys.chkfile_index + '_subenv_spinden.cube', 'r') as fin:
test_den_data = fin.read()
with open(true_ftmp.name, 'r') as fin:
true_den_data = fin.read()
self.assertEqual(test_den_data[99:], true_den_data[99:])
#@unittest.skip
def test_save_read_chkfile(self):
import h5py
t_file = tempfile.NamedTemporaryFile()
subsys = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol, self.env_method, filename=t_file.name)
subsys.chkfile_index = '0'
subsys.init_density()
subsys.diagonalize()
subsys.save_chkfile()
with h5py.File(t_file.name + '.hdf5', 'r') as hf:
subsys_coeff = hf[f'subsystem:0/mo_coeff']
sub_env_mo_coeff = subsys_coeff[:]
subsys_occ = hf[f'subsystem:0/mo_occ']
sub_env_mo_occ = subsys_occ[:]
subsys_energy = hf[f'subsystem:0/mo_energy']
sub_env_mo_energy = subsys_energy[:]
self.assertTrue(np.array_equal(subsys.env_mo_coeff, sub_env_mo_coeff))
self.assertTrue(np.array_equal(subsys.env_mo_occ, sub_env_mo_occ))
self.assertTrue(np.array_equal(subsys.env_mo_energy, sub_env_mo_energy))
subsys2 = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol, self.env_method, filename=t_file.name, init_guess='chk')
subsys2.chkfile_index = '0'
subsys2.init_density()
self.assertTrue(np.array_equal(subsys.env_mo_coeff, subsys2.env_mo_coeff))
self.assertTrue(np.array_equal(subsys.env_mo_occ, subsys2.env_mo_occ))
self.assertTrue(np.array_equal(subsys.env_mo_energy, subsys2.env_mo_energy))
#@unittest.skip
def test_diagonalize(self):
# Closed Shell
# Unsure how to test this with embedding potential or projection pot.
subsys = cluster_subsystem.ClusterEnvSubSystem(self.cs_mol, self.env_method)
subsys.chkfile_index = '0'
subsys.init_density()
subsys.diagonalize()
test_scf = dft.RKS(self.cs_mol)
test_scf.max_cycle = 1
test_scf.xc = self.env_method
test_scf.kernel()
test_dmat = test_scf.make_rdm1()
self.assertTrue(np.allclose(test_dmat, subsys.get_dmat()))
# Unrestricted Open Shell
# Unsure how to test this with embedding potential or projection pot.
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method, unrestricted=True)
subsys.chkfile_index = '0'
subsys.init_density()
subsys.diagonalize()
test_scf = dft.UKS(self.os_mol)
test_scf.max_cycle = 1
test_scf.xc = self.env_method
test_scf.kernel()
test_dmat = test_scf.make_rdm1()
self.assertTrue(np.allclose(test_dmat[0], subsys.env_dmat[0]))
self.assertTrue(np.allclose(test_dmat[1], subsys.env_dmat[1]))
# Restricted Open Shell
# Unsure how to test this with embedding potential or projection pot.
subsys = cluster_subsystem.ClusterEnvSubSystem(self.os_mol, self.env_method)
subsys.chkfile_index = '0'
subsys.init_density()
subsys.diagonalize()
test_scf = dft.ROKS(self.os_mol)
test_scf.max_cycle = 1
test_scf.xc = self.env_method
test_scf.kernel()
test_dmat = test_scf.make_rdm1()
self.assertTrue(np.allclose(test_dmat[0], subsys.env_dmat[0]))
self.assertTrue(np.allclose(test_dmat[1], subsys.env_dmat[1]))
class TestHLSubsystemMethods(unittest.TestCase):
def setUp(self):
mol = gto.Mole()
mol.verbose = 3
mol.atom = '''
O 0.000000 0.000000 0.000000
H 0.758602 0.000000 0.504284
H 0.758602 0.000000 -0.504284'''
mol.basis = '3-21g'
mol.build()
self.cs_mol = mol
self.env_method = 'lda'
mol2 = gto.Mole()
mol2.verbose = 3
mol2.atom = '''
Li 0.0 0.0 0.0
'''
mol2.basis = '3-21g'
mol2.spin = 1
mol2.build()
self.os_mol = mol2
#@unittest.skip
def test_hl_init_guess(self):
hl_method = 'hf'
conv_param = 1e-10
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol, self.env_method, hl_method, hl_conv=conv_param, hl_cycles=0, hl_init_guess="1e")
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
test_scf = scf.RHF(self.cs_mol)
correct_dmat = test_scf.get_init_guess(key="1e")
test_scf.max_cycle = 0
test_scf.kernel(dm0=correct_dmat)
correct_dmat = test_scf.make_rdm1()
self.assertTrue(np.allclose(correct_dmat, subsys.hl_sr_scf.make_rdm1()))
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol, self.env_method, hl_method, hl_conv=conv_param, hl_cycles=0, hl_init_guess="minao")
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
test_scf = scf.RHF(self.cs_mol)
correct_dmat = test_scf.get_init_guess(key="minao")
test_scf.max_cycle = 0
test_scf.kernel(dm0=correct_dmat)
correct_dmat = test_scf.make_rdm1()
self.assertTrue(np.allclose(correct_dmat, subsys.hl_sr_scf.make_rdm1()))
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol, self.env_method, hl_method, hl_conv=conv_param, hl_cycles=0, hl_init_guess="atom")
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
test_scf = scf.RHF(self.cs_mol)
correct_dmat = test_scf.get_init_guess(key="atom")
test_scf.max_cycle = 0
test_scf.kernel(dm0=correct_dmat)
correct_dmat = test_scf.make_rdm1()
self.assertTrue(np.allclose(correct_dmat, subsys.hl_sr_scf.make_rdm1()))
#Use the embedded density as the hl guess.
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol, self.env_method, hl_method, hl_conv=conv_param, hl_cycles=0, hl_init_guess="ft")
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
test_scf = scf.RHF(self.cs_mol)
test_scf.max_cycle = 0
test_scf.kernel(dm0=subsys.get_dmat())
correct_dmat = test_scf.make_rdm1()
self.assertTrue(np.allclose(correct_dmat, subsys.hl_sr_scf.make_rdm1()))
#Unrestricted Open Shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method, hl_conv=conv_param, hl_cycles=0, hl_init_guess="1e", hl_unrestricted=True)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
test_scf = scf.UHF(self.os_mol)
correct_dmat = test_scf.get_init_guess(key="1e")
test_scf.max_cycle = 0
test_scf.kernel(dm0=correct_dmat)
correct_dmat = test_scf.make_rdm1()
self.assertTrue(np.allclose(correct_dmat, subsys.hl_sr_scf.make_rdm1()))
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method, hl_conv=conv_param, hl_cycles=0, hl_init_guess="minao", hl_unrestricted=True)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
test_scf = scf.UHF(self.os_mol)
correct_dmat = test_scf.get_init_guess(key="minao")
test_scf.max_cycle = 0
test_scf.kernel(dm0=correct_dmat)
correct_dmat = test_scf.make_rdm1()
self.assertTrue(np.allclose(correct_dmat, subsys.hl_sr_scf.make_rdm1()))
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method, hl_conv=conv_param, hl_cycles=0, hl_init_guess="atom", hl_unrestricted=True)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
test_scf = scf.UHF(self.os_mol)
correct_dmat = test_scf.get_init_guess(key="atom")
test_scf.max_cycle = 0
test_scf.kernel(dm0=correct_dmat)
correct_dmat = test_scf.make_rdm1()
self.assertTrue(np.allclose(correct_dmat, subsys.hl_sr_scf.make_rdm1()))
#Use the embedded density as the hl guess.
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method, hl_conv=conv_param, hl_cycles=0, hl_init_guess="ft", hl_unrestricted=True)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
test_scf = scf.UHF(self.os_mol)
test_scf.max_cycle = 0
test_scf.kernel(dm0=subsys.get_dmat())
correct_dmat = test_scf.make_rdm1()
self.assertTrue(np.allclose(correct_dmat, subsys.hl_sr_scf.make_rdm1()))
#Restricted Open Shell
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method, hl_conv=conv_param, hl_cycles=0, hl_init_guess="1e")
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
test_scf = scf.ROHF(self.os_mol)
correct_dmat = test_scf.get_init_guess(key="1e")
test_scf.max_cycle = 0
test_scf.kernel(dm0=correct_dmat)
correct_dmat = test_scf.make_rdm1()
self.assertTrue(np.allclose(correct_dmat, subsys.hl_sr_scf.make_rdm1()))
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method, hl_conv=conv_param, hl_cycles=0, hl_init_guess="minao")
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
test_scf = scf.ROHF(self.os_mol)
correct_dmat = test_scf.get_init_guess(key="minao")
test_scf.max_cycle = 0
test_scf.kernel(dm0=correct_dmat)
correct_dmat = test_scf.make_rdm1()
self.assertTrue(np.allclose(correct_dmat, subsys.hl_sr_scf.make_rdm1()))
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method, hl_conv=conv_param, hl_cycles=0, hl_init_guess="atom")
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
test_scf = scf.ROHF(self.os_mol)
correct_dmat = test_scf.get_init_guess(key="atom")
test_scf.max_cycle = 0
test_scf.kernel(dm0=correct_dmat)
correct_dmat = test_scf.make_rdm1()
self.assertTrue(np.allclose(correct_dmat, subsys.hl_sr_scf.make_rdm1()))
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method, hl_conv=conv_param, hl_cycles=0, hl_init_guess="ft")
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
test_scf = scf.ROHF(self.os_mol)
test_scf.max_cycle = 0
test_scf.kernel(dm0=subsys.get_dmat())
correct_dmat = test_scf.make_rdm1()
self.assertTrue(np.allclose(correct_dmat, subsys.hl_sr_scf.make_rdm1()))
#@unittest.skip
def test_hf_in_env_energy(self):
# Closed shell
hl_method = 'hf'
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol, self.env_method, hl_method)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
true_scf = scf.RHF(self.cs_mol)
true_e = true_scf.kernel()
self.assertAlmostEqual(subsys_hl_e, true_e, delta=1e-10)
# Unrestricted Open shell
hl_method = 'hf'
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method, hl_unrestricted=True)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
true_scf = scf.UHF(self.os_mol)
true_e = true_scf.kernel()
self.assertAlmostEqual(subsys_hl_e, true_e, delta=1e-8)
# Restricted Open shell
hl_method = 'hf'
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
true_scf = scf.ROHF(self.os_mol)
true_e = true_scf.kernel()
self.assertAlmostEqual(subsys_hl_e, true_e, delta=1e-10)
def test_dft_in_env_energy(self):
# Closed shell
hl_method = 'm06'
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol, self.env_method, hl_method)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
true_scf = scf.RKS(self.cs_mol)
true_scf.xc = 'm06'
true_e = true_scf.kernel()
self.assertAlmostEqual(subsys_hl_e, true_e, delta=1e-10)
# Unrestricted Open shell
hl_method = 'm06'
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method, hl_unrestricted=True)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
true_scf = scf.UKS(self.os_mol)
true_scf.xc = 'm06'
true_e = true_scf.kernel()
self.assertAlmostEqual(subsys_hl_e, true_e, delta=1e-10)
# Restricted Open shell
hl_method = 'm06'
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
true_scf = scf.ROKS(self.os_mol)
true_scf.xc = 'm06'
true_e = true_scf.kernel()
self.assertAlmostEqual(subsys_hl_e, true_e, delta=1e-10)
def test_ccsd_in_env_energy(self):
# Closed shell
hl_method = 'ccsd'
hl_dict = {'froz_core_orbs': 1}
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol, self.env_method, hl_method, hl_dict=hl_dict)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
true_scf = scf.RHF(self.cs_mol)
true_hf_e = true_scf.kernel()
true_cc = cc.CCSD(true_scf)
true_cc.frozen = 1
true_cc_e = true_cc.kernel()[0]
self.assertAlmostEqual(subsys_hl_e, true_hf_e + true_cc_e, delta=1e-7)
# Unrestricted Open shell
hl_method = 'ccsd'
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method, hl_unrestricted=True)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
true_scf = scf.UHF(self.os_mol)
true_hf_e = true_scf.kernel()
true_cc = cc.UCCSD(true_scf)
true_cc_e = true_cc.kernel()[0]
self.assertAlmostEqual(subsys_hl_e, true_hf_e + true_cc_e, delta=1e-8)
# Restricted Open shell
#hl_method = 'ccsd'
#subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method)
#subsys.init_density()
#subsys_hl_e = subsys.get_hl_in_env_energy()
#true_scf = scf.ROHF(self.os_mol)
#true_hf_e = true_scf.kernel()
#true_cc = cc.UCCSD(true_scf)
#true_cc_e = true_cc.kernel()[0]
#self.assertAlmostEqual(subsys_hl_e, true_hf_e + true_cc_e, delta=1e-10)
def test_ccsdt_in_env_energy(self):
# Closed shell
hl_method = 'ccsd(t)'
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol, self.env_method, hl_method)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
true_scf = scf.RHF(self.cs_mol)
true_hf_e = true_scf.kernel()
true_cc = cc.CCSD(true_scf)
true_cc_e = true_cc.kernel()[0]
true_t_e = ccsd_t.kernel(true_cc, true_cc.ao2mo())
self.assertAlmostEqual(subsys_hl_e, true_hf_e + true_cc_e + true_t_e, delta=1e-7)
# Open shell
hl_method = 'ccsd(t)'
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method, hl_unrestricted=True)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
true_scf = scf.UHF(self.os_mol)
true_hf_e = true_scf.kernel()
true_cc = cc.UCCSD(true_scf)
true_cc_e = true_cc.kernel()[0]
true_t_e = uccsd_t.kernel(true_cc, true_cc.ao2mo())
self.assertAlmostEqual(subsys_hl_e, true_hf_e + true_cc_e + true_t_e, delta=1e-10)
def test_mp_in_env_energy(self):
# Closed shell
hl_method = 'mp2'
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol, self.env_method, hl_method)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
true_scf = scf.RHF(self.cs_mol)
true_hf_e = true_scf.kernel()
true_mp = mp.MP2(true_scf)
true_mp_e = true_mp.kernel()[0]
self.assertAlmostEqual(subsys_hl_e, true_hf_e + true_mp_e, delta=1e-10)
# Open shell
hl_method = 'mp2'
subsys = cluster_subsystem.ClusterHLSubSystem(self.os_mol, self.env_method, hl_method, hl_unrestricted=True)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
true_scf = scf.UHF(self.os_mol)
true_hf_e = true_scf.kernel()
true_mp = mp.UMP2(true_scf)
true_mp_e = true_mp.kernel()[0]
self.assertAlmostEqual(subsys_hl_e, true_hf_e + true_mp_e, delta=1e-8)
def test_casscf_in_env_energy(self):
# Closed shell
hl_method = 'cas[2,2]'
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol, self.env_method, hl_method)
subsys.init_density()
subsys_hl_e = subsys.get_hl_in_env_energy()
true_scf = scf.RHF(self.cs_mol)
true_hf_e = true_scf.kernel()
true_casscf = mcscf.CASSCF(true_scf, 2, 2)
true_casscf_e = true_casscf.kernel()[0]
self.assertAlmostEqual(subsys_hl_e, true_casscf_e, delta=1e-8)
def test_ci_in_env_energy(self):
pass
def test_dmrg_in_env_energy(self):
pass
def test_gw_in_env_energy(self):
pass
def test_hci_in_env_energy(self):
pass
def test_icmpspt_in_env_energy(self):
pass
def test_mrpt_in_env_energy(self):
pass
def test_shciscf_in_env_energy(self):
pass
def test_fci_in_env_energy(self):
pass
def test_fciqmc_in_env_energy(self):
pass
#@unittest.skip
def test_fcidump_in_env_energy(self):
#there is not a great way to test this. Pretty sure it's working.
# Closed shell
t_file = tempfile.NamedTemporaryFile()
hl_method = 'fcidump'
subsys = cluster_subsystem.ClusterHLSubSystem(self.cs_mol, self.env_method, hl_method, filename=t_file.name)
subsys.init_density()
subsys.chkfile_index = '0'
subsys_hl_e = subsys.get_hl_in_env_energy()
true_scf = scf.RHF(self.cs_mol)
true_hf_e = true_scf.kernel()
t_file = tempfile.NamedTemporaryFile()
fcidump_filename = t_file.name
tools.fcidump.from_scf(true_scf, fcidump_filename, tol=1e-200)
with open(subsys.filename + '.fcidump', 'r') as fin:
test_fcidump = fin.read()[:100].splitlines()
test_fcidump += fin.read()[200:300].splitlines()
test_fcidump += fin.read()[300:400].splitlines()
test_fcidump += fin.read()[1000:1200].splitlines()
test_fcidump += fin.read()[3000:3200].splitlines()
with open(fcidump_filename, 'r') as fin:
true_fcidump = fin.read()[:100].splitlines()
true_fcidump += fin.read()[200:300].splitlines()
true_fcidump += fin.read()[300:400].splitlines()
true_fcidump += fin.read()[1000:1200].splitlines()
true_fcidump += fin.read()[3000:3200].splitlines()
self.assertEqual(test_fcidump[:4], true_fcidump[:4])
for i in range(4, len(test_fcidump)):
print (i)
print (test_fcidump[i])
print (true_fcidump[i])
test_fci_val = float(test_fcidump[i].split()[0])
true_fci_val = float(true_fcidump[i].split()[0])
self.assertAlmostEqual(test_fci_val, true_fci_val)
| 43.182692
| 172
| 0.638125
| 49,078
| 0.993462
| 0
| 0
| 0
| 0
| 0
| 0
| 3,663
| 0.074148
|
6c8c154f105569426c30727bc7ab8defbef28f73
| 1,051
|
py
|
Python
|
scripts/undeploy_service.py
|
Suremaker/consul-deployment-agent
|
466c36d3fcb9f8bfa144299dde7cb94f4341907b
|
[
"Apache-2.0"
] | 6
|
2016-10-10T09:26:07.000Z
|
2018-09-20T08:59:42.000Z
|
scripts/undeploy_service.py
|
Suremaker/consul-deployment-agent
|
466c36d3fcb9f8bfa144299dde7cb94f4341907b
|
[
"Apache-2.0"
] | 11
|
2016-10-10T12:11:07.000Z
|
2018-05-09T22:11:02.000Z
|
scripts/undeploy_service.py
|
Suremaker/consul-deployment-agent
|
466c36d3fcb9f8bfa144299dde7cb94f4341907b
|
[
"Apache-2.0"
] | 16
|
2016-09-28T16:00:58.000Z
|
2019-02-25T16:52:12.000Z
|
#!/usr/bin/env python
import argparse
import consulate
class Options(object):
pass
options = Options()
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--name', required=True, help='service name')
parser.add_argument('-s', '--slice', help='slice name (optional)')
parser.add_argument('-r', '--role', required=True, help='server role name')
parser.add_argument('-e', '--environment', required=True, help='environment name')
args = parser.parse_args(namespace=options)
print('[Initiating service removal]')
print(' Service: %s' % args.name)
print(' Slice: %s' % args.slice)
print(' Role: %s' % args.role)
print(' Environment: %s' % args.environment)
consul_session = consulate.Consul()
if args.slice is None:
deployment_key = 'enviroments/{0}/roles/{1}/services/{2}'.format(args.environment, args.role, args.name)
else:
deployment_key = 'enviroments/{0}/roles/{1}/services/{2}/{3}'.format(args.environment, args.role, args.name, args.slice)
del consul_session.kv[deployment_key]
print('Service removal triggered.')
| 30.911765
| 124
| 0.713606
| 31
| 0.029496
| 0
| 0
| 0
| 0
| 0
| 0
| 351
| 0.333968
|
6c8d08da4457f70f71f8796a1ee31a832ff90488
| 190
|
py
|
Python
|
day08/test04.py
|
jaywoong/python
|
99daedd5a9418b72b2d5c3b800080e730eb9b3ea
|
[
"Apache-2.0"
] | null | null | null |
day08/test04.py
|
jaywoong/python
|
99daedd5a9418b72b2d5c3b800080e730eb9b3ea
|
[
"Apache-2.0"
] | null | null | null |
day08/test04.py
|
jaywoong/python
|
99daedd5a9418b72b2d5c3b800080e730eb9b3ea
|
[
"Apache-2.0"
] | null | null | null |
from value import Account
acc1 = Account(10000, 3.2)
print(acc1)
acc1.__balance = 100000000
print(acc1)
print(acc1.getBalance())
print(acc1.getInterest())
acc1.setInterest(2.8)
print(acc1)
| 17.272727
| 26
| 0.763158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
6c8e315e18d51be8398247d53085f6019815be6e
| 2,717
|
py
|
Python
|
tests/functional/conftest.py
|
charmed-kubernetes/ceph-csi-operator
|
06a6a9fed6055e3f0e0bfde835d7f607febcf6ea
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/conftest.py
|
charmed-kubernetes/ceph-csi-operator
|
06a6a9fed6055e3f0e0bfde835d7f607febcf6ea
|
[
"Apache-2.0"
] | null | null | null |
tests/functional/conftest.py
|
charmed-kubernetes/ceph-csi-operator
|
06a6a9fed6055e3f0e0bfde835d7f607febcf6ea
|
[
"Apache-2.0"
] | 1
|
2022-03-24T19:17:47.000Z
|
2022-03-24T19:17:47.000Z
|
# Copyright 2021 Martin Kalcok
# See LICENSE file for licensing details.
"""Pytest fixtures for functional tests."""
# pylint: disable=W0621
import logging
import tempfile
from pathlib import Path
import pytest
from kubernetes import client, config
from pytest_operator.plugin import OpsTest
logger = logging.getLogger(__name__)
@pytest.fixture(scope="session")
def namespace() -> str:
"""Return namespace used for functional tests."""
return "default"
@pytest.fixture(scope="module")
async def kube_config(ops_test: OpsTest) -> Path:
"""Return path to the kube config of the tested Kubernetes cluster.
Config file is fetched from kubernetes-master unit and stored in the temporary file.
"""
k8s_master = ops_test.model.applications["kubernetes-master"].units[0]
with tempfile.TemporaryDirectory() as tmp_dir:
kube_config_file = Path(tmp_dir).joinpath("kube_config")
# This split is needed because `model_name` gets reported in format "<controller>:<model>"
model_name = ops_test.model_name.split(":", maxsplit=1)[-1]
cmd = "juju scp -m {} {}:config {}".format(
model_name, k8s_master.name, kube_config_file
).split()
return_code, _, std_err = await ops_test.run(*cmd)
assert return_code == 0, std_err
yield kube_config_file
@pytest.fixture()
async def cleanup_k8s(kube_config, namespace: str):
"""Cleanup kubernetes resources created during test."""
yield # act only on teardown
config.load_kube_config(str(kube_config))
pod_prefixes = ["read-test-ceph-", "write-test-ceph"]
pvc_prefix = "pvc-test-"
core_api = client.CoreV1Api()
for pod in core_api.list_namespaced_pod(namespace).items:
pod_name = pod.metadata.name
if any(pod_name.startswith(prefix) for prefix in pod_prefixes):
try:
logger.info("Removing Pod %s", pod_name)
core_api.delete_namespaced_pod(pod_name, namespace)
except client.ApiException as exc:
if exc.status != 404:
raise exc
logger.debug("Pod %s is already removed", pod_name)
for pvc in core_api.list_namespaced_persistent_volume_claim(namespace).items:
pvc_name = pvc.metadata.name
if pvc_name.startswith(pvc_prefix):
try:
logger.info("Removing PersistentVolumeClaim %s", pvc_name)
core_api.delete_namespaced_persistent_volume_claim(pvc_name, namespace)
except client.ApiException as exc:
if exc.status != 404:
raise exc
logger.debug("PersistentVolumeClaim %s is already removed.", pvc_name)
| 36.226667
| 98
| 0.670593
| 0
| 0
| 2,194
| 0.807508
| 2,375
| 0.874126
| 2,194
| 0.807508
| 779
| 0.286713
|
6657771a019db8ff3764b551b4d27a9c8de3eee0
| 3,922
|
py
|
Python
|
caronte/allauth/utils.py
|
simodalla/django-caronte
|
e47175849605924c26441c3a3d6d94f4340b9df7
|
[
"BSD-3-Clause"
] | null | null | null |
caronte/allauth/utils.py
|
simodalla/django-caronte
|
e47175849605924c26441c3a3d6d94f4340b9df7
|
[
"BSD-3-Clause"
] | null | null | null |
caronte/allauth/utils.py
|
simodalla/django-caronte
|
e47175849605924c26441c3a3d6d94f4340b9df7
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib.auth import get_user_model
from django.contrib.admin.templatetags.admin_urls import admin_urlname
from django.core.mail import mail_admins
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.template import loader, Context
from django.utils.html import strip_tags
from allauth.exceptions import ImmediateHttpResponse
from ..models import LoginAuthorization, LogUnauthorizedLogin, AuthorizedDomain
User = get_user_model()
class AuthorizationService:
def __init__(self, user=None):
self._user = user
@property
def user(self):
return self._user
@user.setter
def user(self, user):
self._user = user
@property
def login_authorization(self):
try:
return LoginAuthorization.objects.get(
username=getattr(self.user, User.USERNAME_FIELD, ''))
except LoginAuthorization.DoesNotExist as exc:
raise exc
def make_unathorized_login(self, reason):
try:
LogUnauthorizedLogin.objects.create(
username=self.user.get_username(), reason=reason)
except Exception:
pass
return ImmediateHttpResponse(
redirect(reverse('caronte:unauthorized_login')))
def is_email_in_authorized_domain(self):
email = self.user.email
if not email or '@' not in email:
return False
domain = email.split('@')[1]
try:
AuthorizedDomain.objects.get(domain=domain)
return True
except AuthorizedDomain.DoesNotExist:
return False
def set_fields_from_authorized(self, authorized_user, fields=None):
if authorized_user:
fields = fields or ['is_staff', 'is_superuser']
for field in fields:
setattr(self.user,
field,
getattr(authorized_user, field, False))
return True
return False
def copy_fields(self, source_user, fields=None, dest_update=True):
"""
Update fields from list param 'fields' to 'dest_user' User from
'source_user' User.
"""
fields = fields or []
changed = False
for field in fields:
social_field = getattr(source_user, field)
if not (getattr(self.user, field) == social_field):
setattr(self.user, field, social_field)
changed = True
if changed and dest_update:
self.user.save()
return changed
@staticmethod
def _email_for_sociallogin(subject, template, context=None):
context = context or {}
message = loader.get_template(template).render(Context(context))
mail_admins(subject,
strip_tags(message).lstrip('\n'),
fail_silently=True,
html_message=message)
def email_new_sociallogin(self, request):
email = self.user.email
context = {'email': email,
'user_url': request.build_absolute_uri(
reverse(admin_urlname(self.user._meta, 'changelist')))
+ '?email={}'.format(email)}
subject = 'Nuovo socialaccount di {}'.format(email)
return self._email_for_sociallogin(
subject, "custom_email_user/email/new_sociallogin.html", context)
def email_link_sociallogin(self, request):
email = self.user.email
context = {'email': email,
'user_url': request.build_absolute_uri(
self.user.get_absolute_url())}
subject = 'Collegamento socialaccount di {}'.format(email)
return self._email_for_sociallogin(
subject, "custom_email_user/email/link_sociallogin.html", context)
| 35.017857
| 79
| 0.624936
| 3,351
| 0.854411
| 0
| 0
| 722
| 0.18409
| 0
| 0
| 413
| 0.105303
|
665885ddd8b1d1e99097726c1613e0a5986ad3d5
| 15,918
|
py
|
Python
|
Task/data.py
|
sndnyang/GMMC
|
e9cd85c9d55a7de411daad490c8db84dfe9c0455
|
[
"Apache-2.0"
] | 4
|
2021-05-09T16:00:12.000Z
|
2021-12-16T12:31:25.000Z
|
Task/data.py
|
sndnyang/GMMC
|
e9cd85c9d55a7de411daad490c8db84dfe9c0455
|
[
"Apache-2.0"
] | null | null | null |
Task/data.py
|
sndnyang/GMMC
|
e9cd85c9d55a7de411daad490c8db84dfe9c0455
|
[
"Apache-2.0"
] | null | null | null |
from tensorflow.python.platform import flags
from tensorflow.contrib.data.python.ops import batching
import tensorflow as tf
import json
from torch.utils.data import Dataset
import pickle
import os.path as osp
import os
import numpy as np
import time
from scipy.misc import imread, imresize
from torchvision.datasets import CIFAR10, MNIST, SVHN, CIFAR100, ImageFolder
from torchvision import transforms
import torch
import torchvision
FLAGS = flags.FLAGS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Dataset Options
flags.DEFINE_string('dsprites_path',
'/root/data/dsprites-dataset/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz',
'path to dsprites characters')
flags.DEFINE_string('imagenet_datadir', '/root/imagenet_big', 'whether cutoff should always in image')
flags.DEFINE_bool('dshape_only', False, 'fix all factors except for shapes')
flags.DEFINE_bool('dpos_only', False, 'fix all factors except for positions of shapes')
flags.DEFINE_bool('dsize_only', False, 'fix all factors except for size of objects')
flags.DEFINE_bool('drot_only', False, 'fix all factors except for rotation of objects')
flags.DEFINE_bool('dsprites_restrict', False, 'fix all factors except for rotation of objects')
flags.DEFINE_string('imagenet_path', '/root/imagenet', 'path to imagenet images')
flags.DEFINE_string('load_path', '/root/imagenet', 'path to imagenet images')
flags.DEFINE_string('load_type', 'npy', 'npy or png')
flags.DEFINE_bool('single', False, 'single ')
flags.DEFINE_string('datasource', 'random', 'default or noise or negative or single')
# Data augmentation options
# flags.DEFINE_bool('cutout_inside', False, 'whether cutoff should always in image')
# flags.DEFINE_float('cutout_prob', 1.0, 'probability of using cutout')
# flags.DEFINE_integer('cutout_mask_size', 16, 'size of cutout')
# flags.DEFINE_bool('cutout', False, 'whether to add cutout regularizer to data')
flags.DEFINE_string('eval', '', '')
flags.DEFINE_string('init', '', '')
flags.DEFINE_string('norm', '', '')
flags.DEFINE_string('n_steps', '', '')
flags.DEFINE_string('reinit_freq', '', '')
flags.DEFINE_string('print_every', '', '')
flags.DEFINE_string('n_sample_steps', '', '')
flags.DEFINE_integer('gpu-id', 0, '')
def cutout(mask_color=(0, 0, 0)):
mask_size_half = FLAGS.cutout_mask_size // 2
offset = 1 if FLAGS.cutout_mask_size % 2 == 0 else 0
def _cutout(image):
image = np.asarray(image).copy()
if np.random.random() > FLAGS.cutout_prob:
return image
h, w = image.shape[:2]
if FLAGS.cutout_inside:
cxmin, cxmax = mask_size_half, w + offset - mask_size_half
cymin, cymax = mask_size_half, h + offset - mask_size_half
else:
cxmin, cxmax = 0, w + offset
cymin, cymax = 0, h + offset
cx = np.random.randint(cxmin, cxmax)
cy = np.random.randint(cymin, cymax)
xmin = cx - mask_size_half
ymin = cy - mask_size_half
xmax = xmin + FLAGS.cutout_mask_size
ymax = ymin + FLAGS.cutout_mask_size
xmin = max(0, xmin)
ymin = max(0, ymin)
xmax = min(w, xmax)
ymax = min(h, ymax)
image[:, ymin:ymax, xmin:xmax] = np.array(mask_color)[:, None, None]
return image
return _cutout
class CelebA(Dataset):
def __init__(self):
self.path = "/root/data/img_align_celeba"
self.ims = os.listdir(self.path)
self.ims = [osp.join(self.path, im) for im in self.ims]
def __len__(self):
return len(self.ims)
def __getitem__(self, index):
label = 1
if FLAGS.single:
index = 0
path = self.ims[index]
im = imread(path)
im = imresize(im, (32, 32))
image_size = 32
im = im / 255.
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0, 1, size=(image_size, image_size, 3))
return im_corrupt, im, label
class Cifar10(Dataset):
def __init__(
self, FLAGS,
train=True,
full=False,
augment=False,
noise=True,
rescale=1.0):
if augment:
transform_list = [
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
]
# if FLAGS.cutout:
# transform_list.append(cutout())
transform = transforms.Compose(transform_list)
else:
transform = transforms.ToTensor()
self.FLAGS = FLAGS
self.full = full
self.data = CIFAR10(
"../data/dataset/cifar10",
transform=transform,
train=train,
download=True)
self.test_data = CIFAR10(
"../data/dataset/cifar10",
transform=transform,
train=False,
download=True)
self.one_hot_map = np.eye(10)
self.noise = noise
self.rescale = rescale
def __len__(self):
if self.full:
return len(self.data) + len(self.test_data)
else:
return len(self.data)
def __getitem__(self, index):
FLAGS = self.FLAGS
FLAGS.single = False
if not FLAGS.single:
if self.full:
if index >= len(self.data):
im, label = self.test_data[index - len(self.data)]
else:
im, label = self.data[index]
else:
im, label = self.data[index]
else:
im, label = self.data[0]
im = np.transpose(im, (1, 2, 0)).numpy()
image_size = 32
label = self.one_hot_map[label]
im = im * 255 / 256
if self.noise:
im = im * self.rescale + \
np.random.uniform(0, self.rescale * 1 / 256., im.shape)
np.random.seed((index + int(time.time() * 1e7)) % 2**32)
FLAGS.datasource = 'random'
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0.0, self.rescale, (image_size, image_size, 3))
return im_corrupt, im, label
class Cifar100(Dataset):
def __init__(self, train=True, augment=False):
if augment:
transform_list = [
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
]
if FLAGS.cutout:
transform_list.append(cutout())
transform = transforms.Compose(transform_list)
else:
transform = transforms.ToTensor()
self.data = CIFAR100(
"/root/cifar100",
transform=transform,
train=train,
download=True)
self.one_hot_map = np.eye(100)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
if not FLAGS.single:
im, label = self.data[index]
else:
im, label = self.data[0]
im = np.transpose(im, (1, 2, 0)).numpy()
image_size = 32
label = self.one_hot_map[label]
im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape)
np.random.seed((index + int(time.time() * 1e7)) % 2**32)
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0.0, 1.0, (image_size, image_size, 3))
return im_corrupt, im, label
class Svhn(Dataset):
def __init__(self, train=True, augment=False):
transform = transforms.ToTensor()
self.data = SVHN("/root/svhn", transform=transform, download=True)
self.one_hot_map = np.eye(10)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
if not FLAGS.single:
im, label = self.data[index]
else:
em, label = self.data[0]
im = np.transpose(im, (1, 2, 0)).numpy()
image_size = 32
label = self.one_hot_map[label]
im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape)
np.random.seed((index + int(time.time() * 1e7)) % 2**32)
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0.0, 1.0, (image_size, image_size, 3))
return im_corrupt, im, label
class Mnist(Dataset):
def __init__(self, train=True, rescale=1.0):
self.data = MNIST(
"/root/mnist",
transform=transforms.ToTensor(),
download=True, train=train)
self.labels = np.eye(10)
self.rescale = rescale
def __len__(self):
return len(self.data)
def __getitem__(self, index):
im, label = self.data[index]
label = self.labels[label]
im = im.squeeze()
# im = im.numpy() / 2 + np.random.uniform(0, 0.5, (28, 28))
# im = im.numpy() / 2 + 0.2
im = im.numpy() / 256 * 255 + np.random.uniform(0, 1. / 256, (28, 28))
im = im * self.rescale
image_size = 28
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(0, self.rescale, (28, 28))
return im_corrupt, im, label
class DSprites(Dataset):
def __init__(
self,
cond_size=False,
cond_shape=False,
cond_pos=False,
cond_rot=False):
dat = np.load(FLAGS.dsprites_path)
if FLAGS.dshape_only:
l = dat['latents_values']
mask = (l[:, 4] == 16 / 31) & (l[:, 5] == 16 /
31) & (l[:, 2] == 0.5) & (l[:, 3] == 30 * np.pi / 39)
self.data = np.tile(dat['imgs'][mask], (10000, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (10000, 1))
self.label = self.label[:, 1:2]
elif FLAGS.dpos_only:
l = dat['latents_values']
# mask = (l[:, 1] == 1) & (l[:, 2] == 0.5) & (l[:, 3] == 30 * np.pi / 39)
mask = (l[:, 1] == 1) & (
l[:, 3] == 30 * np.pi / 39) & (l[:, 2] == 0.5)
self.data = np.tile(dat['imgs'][mask], (100, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (100, 1))
self.label = self.label[:, 4:] + 0.5
elif FLAGS.dsize_only:
l = dat['latents_values']
# mask = (l[:, 1] == 1) & (l[:, 2] == 0.5) & (l[:, 3] == 30 * np.pi / 39)
mask = (l[:, 3] == 30 * np.pi / 39) & (l[:, 4] == 16 /
31) & (l[:, 5] == 16 / 31) & (l[:, 1] == 1)
self.data = np.tile(dat['imgs'][mask], (10000, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (10000, 1))
self.label = (self.label[:, 2:3])
elif FLAGS.drot_only:
l = dat['latents_values']
mask = (l[:, 2] == 0.5) & (l[:, 4] == 16 /
31) & (l[:, 5] == 16 / 31) & (l[:, 1] == 1)
self.data = np.tile(dat['imgs'][mask], (100, 1, 1))
self.label = np.tile(dat['latents_values'][mask], (100, 1))
self.label = (self.label[:, 3:4])
self.label = np.concatenate(
[np.cos(self.label), np.sin(self.label)], axis=1)
elif FLAGS.dsprites_restrict:
l = dat['latents_values']
mask = (l[:, 1] == 1) & (l[:, 3] == 0 * np.pi / 39)
self.data = dat['imgs'][mask]
self.label = dat['latents_values'][mask]
else:
self.data = dat['imgs']
self.label = dat['latents_values']
if cond_size:
self.label = self.label[:, 2:3]
elif cond_shape:
self.label = self.label[:, 1:2]
elif cond_pos:
self.label = self.label[:, 4:]
elif cond_rot:
self.label = self.label[:, 3:4]
self.label = np.concatenate(
[np.cos(self.label), np.sin(self.label)], axis=1)
else:
self.label = self.label[:, 1:2]
self.identity = np.eye(3)
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
im = self.data[index]
image_size = 64
if not (
FLAGS.dpos_only or FLAGS.dsize_only) and (
not FLAGS.cond_size) and (
not FLAGS.cond_pos) and (
not FLAGS.cond_rot) and (
not FLAGS.drot_only):
label = self.identity[self.label[index].astype(
np.int32) - 1].squeeze()
else:
label = self.label[index]
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size)
elif FLAGS.datasource == 'random':
im_corrupt = 0.5 + 0.5 * np.random.randn(image_size, image_size)
return im_corrupt, im, label
class Imagenet(Dataset):
def __init__(self, train=True, augment=False):
if train:
for i in range(1, 11):
f = pickle.load(
open(
osp.join(
FLAGS.imagenet_path,
'train_data_batch_{}'.format(i)),
'rb'))
if i == 1:
labels = f['labels']
data = f['data']
else:
labels.extend(f['labels'])
data = np.vstack((data, f['data']))
else:
f = pickle.load(
open(
osp.join(
FLAGS.imagenet_path,
'val_data'),
'rb'))
labels = f['labels']
data = f['data']
self.labels = labels
self.data = data
self.one_hot_map = np.eye(1000)
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
if not FLAGS.single:
im, label = self.data[index], self.labels[index]
else:
im, label = self.data[0], self.labels[0]
label -= 1
im = im.reshape((3, 32, 32)) / 255
im = im.transpose((1, 2, 0))
image_size = 32
label = self.one_hot_map[label]
im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape)
np.random.seed((index + int(time.time() * 1e7)) % 2**32)
if FLAGS.datasource == 'default':
im_corrupt = im + 0.3 * np.random.randn(image_size, image_size, 3)
elif FLAGS.datasource == 'random':
im_corrupt = np.random.uniform(
0.0, 1.0, (image_size, image_size, 3))
return im_corrupt, im, label
class Textures(Dataset):
def __init__(self, train=True, augment=False):
self.dataset = ImageFolder("/mnt/nfs/yilundu/data/dtd/images")
def __len__(self):
return 2 * len(self.dataset)
def __getitem__(self, index):
idx = index % (len(self.dataset))
im, label = self.dataset[idx]
im = np.array(im)[:32, :32] / 255
im = im + np.random.uniform(-1 / 512, 1 / 512, im.shape)
return im, im, label
| 33.441176
| 102
| 0.537379
| 12,588
| 0.790803
| 0
| 0
| 0
| 0
| 0
| 0
| 2,036
| 0.127906
|
6658e5fae0f2feb228f15d275ba5e7cdca6b1e61
| 3,751
|
py
|
Python
|
controller/Specialty.py
|
ryltar/GSB-Planning-API
|
919ad95e4e7bdcac43028fa4026bb800ec6bdb2a
|
[
"Apache-2.0"
] | null | null | null |
controller/Specialty.py
|
ryltar/GSB-Planning-API
|
919ad95e4e7bdcac43028fa4026bb800ec6bdb2a
|
[
"Apache-2.0"
] | null | null | null |
controller/Specialty.py
|
ryltar/GSB-Planning-API
|
919ad95e4e7bdcac43028fa4026bb800ec6bdb2a
|
[
"Apache-2.0"
] | null | null | null |
from flask import jsonify, g, request
from flask_restful import Resource
from Authentication import *
from Service import *
def get_service():
""" Gets an instance of 'Service' from the 'g' environment. """
if not hasattr(g, 'service'):
g.service = Service()
return g.service
def specialty_queries():
""" Gets an instance of 'SpecialtyQueries' from the 'g' environment. """
if not hasattr(g, 'spec_queries'):
g.spec_queries = SpecialtyQueries()
return g.spec_queries
class SpecialtyQueries:
""" Manager for Specialty entity related SQL queries. """
def get_spec_by_id(self):
""" Returns a 'SELECT' query for a single Specialty. """
return "SELECT * FROM specialty WHERE id = %s"
def del_spec_by_id(self):
""" Returns a 'DELETE' query for a single Specialty. """
return "DELETE FROM specialty WHERE id = %s"
def get_specs(self):
""" Returns a 'SELECT' query for a single Specialty. """
return "SELECT * FROM specialty"
def post_spec(self):
""" Returns an 'INSERT' query for a single Specialty. """
return "INSERT INTO specialty(label) VALUES(%s) RETURNING id"
def put_spec(self, key_list):
""" Returns an 'UPDATE' query for a single Specialty. """
tspec = ""
for k in range(len(key_list)):
tspec += key_list[k] + " = %s, "
return "UPDATE specialty SET " + tspec[:-2] + " WHERE id = %s"
class Specialty(Resource):
""" Flask_restful Resource for Specialty entity, for routes with a parameter. """
@requires_admin_auth
def get(self, spec_id):
""" Returns a single Specialty. """
query = specialty_queries().get_spec_by_id()
specialty = get_service().get_content(query, [spec_id])
if specialty is None:
return jsonify(status=404)
return jsonify(status=200,data=SpecialtyContainer(specialty).__dict__)
@requires_admin_auth
def delete(self, spec_id):
""" Deletes a single Specialty. """
query = specialty_queries().del_spec_by_id()
value = get_service().del_content(query,[spec_id])
if value != 1:
return jsonify(status=404)
return jsonify(status=200)
@requires_admin_auth
def put(self, spec_id):
""" Edits a single Specialty. """
key_list, value_list = [], []
for key, value in request.form.items():
key_list.append(key)
value_list.append(value)
value_list.append(spec_id)
query = specialty_queries().put_spec(key_list)
value = get_service().put_content(query, value_list)
if value != 1:
jsonify(status=404)
return jsonify(status=200)
class SpecialtyContainer:
""" The Specialty entity itself. """
def __init__(self, array):
""" Builds the entity from a list. """
self.id = array[0]
self.label = array[1]
class SpecialtyList(Resource):
@requires_admin_auth
def get(self):
""" Flask_restful Resource for Specialty entity, for routes with no parameter."""
query = specialty_queries().get_specs()
specialties = get_service().get_contents(query)
if specialties is None:
return jsonify(status=404)
json_to_return = []
for e in specialties:
spec = SpecialtyContainer(e)
json_to_return.append(spec.__dict__)
return jsonify(data=json_to_return)
@requires_admin_auth
def post(self):
""" Returns every single Specialty. """
query = specialty_queries().post_spec()
label = request.form['label']
spec_id = get_service().post_content(query, [label])
if spec_id == -1:
return jsonify(status=404)
return jsonify(status=200,data=spec_id)
| 31.788136
| 89
| 0.640363
| 3,226
| 0.860037
| 0
| 0
| 1,909
| 0.508931
| 0
| 0
| 1,087
| 0.289789
|
66595693a0bfed64682ff38551b196526a22e500
| 981
|
py
|
Python
|
Collection/ms/01 Arrays/07_valid_paranthesis.py
|
kmanadkat/leetcode-101
|
8a9db22d98692d634a497ba76c7e9f792bb1f1bc
|
[
"MIT"
] | null | null | null |
Collection/ms/01 Arrays/07_valid_paranthesis.py
|
kmanadkat/leetcode-101
|
8a9db22d98692d634a497ba76c7e9f792bb1f1bc
|
[
"MIT"
] | null | null | null |
Collection/ms/01 Arrays/07_valid_paranthesis.py
|
kmanadkat/leetcode-101
|
8a9db22d98692d634a497ba76c7e9f792bb1f1bc
|
[
"MIT"
] | 1
|
2021-09-15T11:17:36.000Z
|
2021-09-15T11:17:36.000Z
|
class Solution:
def isValid(self, s: str) -> bool:
# String should have even length
if len(s) % 2 != 0:
return False
# Use list as Stack DS
bracStack = []
# If Bracket open add in Stack else Pop & Check
for ele in s:
if ele in ['(', '{', '[']:
bracStack.append(ele)
elif ele in [')', '}', ']']:
# Stack should have atleast 1 opening corresponding to closing bracket
if len(bracStack) == 0:
return False
topBracStack = bracStack.pop()
if ele == ')' and topBracStack != '(':
return False
elif ele == '}' and topBracStack != '{':
return False
elif ele == ']' and topBracStack != '[':
return False
# Stack Length Should Be Zero at End
return True if len(bracStack) == 0 else False
| 29.727273
| 86
| 0.459735
| 980
| 0.998981
| 0
| 0
| 0
| 0
| 0
| 0
| 243
| 0.247706
|
665956b19edea097158d528d01890795f973fee1
| 19,404
|
py
|
Python
|
app/apps/address/migrations/0001_initial.py
|
brsrtc/mini-erp-docker
|
f5c37c71384c76e029a26e89f4771a59ed02f925
|
[
"MIT"
] | 1
|
2021-01-18T07:11:31.000Z
|
2021-01-18T07:11:31.000Z
|
app/apps/address/migrations/0001_initial.py
|
brsrtc/mini-erp-docker
|
f5c37c71384c76e029a26e89f4771a59ed02f925
|
[
"MIT"
] | null | null | null |
app/apps/address/migrations/0001_initial.py
|
brsrtc/mini-erp-docker
|
f5c37c71384c76e029a26e89f4771a59ed02f925
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.3 on 2020-12-05 17:27
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import core.cache
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True,
verbose_name='Created At')),
('updated_at', models.DateTimeField(auto_now=True, null=True,
verbose_name='Updated At')),
('is_active',
models.BooleanField(default=True, verbose_name='Is Active')),
('is_deleted',
models.BooleanField(default=False, verbose_name='Is Deleted')),
('deleted_at', models.DateTimeField(blank=True, null=True,
verbose_name='Deleted At')),
('data', models.JSONField(blank=True, default=dict, null=True)),
('name', models.CharField(max_length=128, verbose_name='Name')),
('slot', models.IntegerField(default=1, verbose_name='Slot')),
('use_in',
models.BooleanField(default=False, verbose_name='Use In')),
],
options={
'ordering': ['slot', 'name'],
},
bases=(models.Model, core.cache.BaseCache),
),
migrations.CreateModel(
name='Township',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True,
verbose_name='Created At')),
('updated_at', models.DateTimeField(auto_now=True, null=True,
verbose_name='Updated At')),
('is_active',
models.BooleanField(default=True, verbose_name='Is Active')),
('is_deleted',
models.BooleanField(default=False, verbose_name='Is Deleted')),
('deleted_at', models.DateTimeField(blank=True, null=True,
verbose_name='Deleted At')),
('data', models.JSONField(blank=True, default=dict, null=True)),
('name', models.CharField(max_length=128, verbose_name='Name')),
('city',
models.ForeignKey(on_delete=django.db.models.deletion.PROTECT,
to='address.city', verbose_name='City')),
('created_by', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_township_created_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Created By')),
('deleted_by', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_township_deleted_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Deleted By')),
('updated_by', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_township_updated_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Updated By')),
],
options={
'ordering': ['name'],
},
bases=(models.Model, core.cache.BaseCache),
),
migrations.CreateModel(
name='State',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True,
verbose_name='Created At')),
('updated_at', models.DateTimeField(auto_now=True, null=True,
verbose_name='Updated At')),
('is_active',
models.BooleanField(default=True, verbose_name='Is Active')),
('is_deleted',
models.BooleanField(default=False, verbose_name='Is Deleted')),
('deleted_at', models.DateTimeField(blank=True, null=True,
verbose_name='Deleted At')),
('data', models.JSONField(blank=True, default=dict, null=True)),
('name', models.CharField(max_length=32, verbose_name='Name')),
('code', models.CharField(max_length=16, verbose_name='Code')),
('created_by', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_state_created_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Created By')),
('deleted_by', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_state_deleted_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Deleted By')),
('updated_by', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_state_updated_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Updated By')),
],
options={
'ordering': ['name'],
},
bases=(models.Model, core.cache.BaseCache),
),
migrations.CreateModel(
name='District',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True,
verbose_name='Created At')),
('updated_at', models.DateTimeField(auto_now=True, null=True,
verbose_name='Updated At')),
('is_active',
models.BooleanField(default=True, verbose_name='Is Active')),
('is_deleted',
models.BooleanField(default=False, verbose_name='Is Deleted')),
('deleted_at', models.DateTimeField(blank=True, null=True,
verbose_name='Deleted At')),
('data', models.JSONField(blank=True, default=dict, null=True)),
('name', models.CharField(max_length=128, verbose_name='Name')),
('created_by', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_district_created_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Created By')),
('deleted_by', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_district_deleted_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Deleted By')),
('township',
models.ForeignKey(on_delete=django.db.models.deletion.PROTECT,
to='address.township',
verbose_name='Township')),
('updated_by', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_district_updated_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Updated By')),
],
options={
'ordering': ['name'],
},
bases=(models.Model, core.cache.BaseCache),
),
migrations.CreateModel(
name='Country',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True,
verbose_name='Created At')),
('updated_at', models.DateTimeField(auto_now=True, null=True,
verbose_name='Updated At')),
('is_active',
models.BooleanField(default=True, verbose_name='Is Active')),
('is_deleted',
models.BooleanField(default=False, verbose_name='Is Deleted')),
('deleted_at', models.DateTimeField(blank=True, null=True,
verbose_name='Deleted At')),
('data', models.JSONField(blank=True, default=dict, null=True)),
('name', models.CharField(max_length=64, verbose_name='Name')),
('code', models.CharField(max_length=16, verbose_name='Code')),
('slot', models.IntegerField(default=1, verbose_name='Slot')),
('default_country', models.BooleanField(default=False)),
('created_by', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_country_created_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Created By')),
('deleted_by', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_country_deleted_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Deleted By')),
('updated_by', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_country_updated_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Updated By')),
],
options={
'ordering': ['slot', 'name'],
},
bases=(models.Model, core.cache.BaseCache),
),
migrations.AddField(
model_name='city',
name='country',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT,
to='address.country',
verbose_name='Country'),
),
migrations.AddField(
model_name='city',
name='created_by',
field=models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_city_created_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Created By'),
),
migrations.AddField(
model_name='city',
name='deleted_by',
field=models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_city_deleted_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Deleted By'),
),
migrations.AddField(
model_name='city',
name='updated_by',
field=models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_city_updated_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Updated By'),
),
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True,
verbose_name='Created At')),
('updated_at', models.DateTimeField(auto_now=True, null=True,
verbose_name='Updated At')),
('is_active',
models.BooleanField(default=True, verbose_name='Is Active')),
('is_deleted',
models.BooleanField(default=False, verbose_name='Is Deleted')),
('deleted_at', models.DateTimeField(blank=True, null=True,
verbose_name='Deleted At')),
('data', models.JSONField(blank=True, default=dict, null=True)),
('address_title', models.CharField(max_length=512,
verbose_name='Address Title')),
('address', models.TextField()),
('postal_code',
models.CharField(blank=True, max_length=10, null=True,
verbose_name='Postal Code')),
('phone',
models.CharField(help_text='Örn: 5301234567', max_length=32,
verbose_name='Phone')),
('internal',
models.CharField(blank=True, max_length=64, null=True,
verbose_name='Dahili')),
('fax',
models.CharField(blank=True, help_text='Örn: 2122454545',
max_length=64, null=True,
verbose_name='Fax')),
('name', models.CharField(blank=True, max_length=256, null=True,
verbose_name='Name')),
('identity_number',
models.CharField(blank=True, max_length=64, null=True,
verbose_name='Identity Number')),
('tax_no',
models.CharField(blank=True, max_length=256, null=True,
verbose_name='Tax No')),
('tax_office',
models.CharField(blank=True, max_length=256, null=True,
verbose_name='Tax Office')),
('is_cancelled', models.BooleanField(default=False,
verbose_name='Is Cancelled')),
('cancelled_at', models.DateTimeField(blank=True, null=True,
verbose_name='Cancelled At')),
('city',
models.ForeignKey(on_delete=django.db.models.deletion.PROTECT,
to='address.city', verbose_name='City')),
('created_by', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_address_created_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Created By')),
('deleted_by', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_address_deleted_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Deleted By')),
('district', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
to='address.district',
verbose_name='District')),
('state', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
to='address.state',
verbose_name='State')),
('township',
models.ForeignKey(on_delete=django.db.models.deletion.PROTECT,
to='address.township',
verbose_name='Township')),
('updated_by', models.ForeignKey(blank=True, null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name='address_address_updated_by',
to=settings.AUTH_USER_MODEL,
verbose_name='Updated By')),
],
options={
'ordering': ['id'],
'abstract': False,
},
bases=(models.Model, core.cache.BaseCache),
),
]
| 59.521472
| 93
| 0.44563
| 19,228
| 0.990828
| 0
| 0
| 0
| 0
| 0
| 0
| 2,613
| 0.134649
|
665bab55df7c6bcde1b85c9c43014205b79501eb
| 2,984
|
py
|
Python
|
pybf/image_settings.py
|
Sergio5714/pybf
|
bf56b353cd715c1bdb16d6cbb79aef44e3ef49bc
|
[
"Apache-2.0"
] | 1
|
2021-11-02T09:54:41.000Z
|
2021-11-02T09:54:41.000Z
|
pybf/image_settings.py
|
Sergio5714/pybf
|
bf56b353cd715c1bdb16d6cbb79aef44e3ef49bc
|
[
"Apache-2.0"
] | null | null | null |
pybf/image_settings.py
|
Sergio5714/pybf
|
bf56b353cd715c1bdb16d6cbb79aef44e3ef49bc
|
[
"Apache-2.0"
] | 2
|
2020-04-17T10:50:06.000Z
|
2021-11-02T09:54:47.000Z
|
"""
Copyright (C) 2020 ETH Zurich. All rights reserved.
Author: Sergei Vostrikov, ETH Zurich
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
class ImageSettings:
def __init__(self,
image_size_x_0,
image_size_x_1,
image_size_z_0,
image_size_z_1,
lateral_pixel_density,
transducer_obj):
# Copy transducers params
self._transducer = transducer_obj
# Copy image params
self._image_size_x_0 = image_size_x_0
self._image_size_z_0 = image_size_z_0
self._image_size_x_1 = image_size_x_1
self._image_size_z_1 = image_size_z_1
self._image_size_x = abs(image_size_x_1 - image_size_x_0)
self._image_size_z = abs(image_size_z_1 - image_size_z_0)
# Number of pixels per distance between transducers
self._lat_pixel_density = lateral_pixel_density
self._calc_min_axial_resolution()
# Calculate high resolution for images
self._calc_high_res()
return
def _calc_min_axial_resolution(self):
self._axial_res_min = 1 / self._transducer.bandwidth_hz * self._transducer.speed_of_sound
return
def _calc_high_res(self):
# Calculate number of x pixels
n_x = np.round(self._image_size_x / self._transducer._x_pitch * self._lat_pixel_density)
n_x = n_x.astype(np.int).item()
# Calculate number of z pixels
n_z = np.round(self._image_size_z / self._axial_res_min)
n_z = n_z.astype(np.int).item()
self._high_resolution = (n_x, n_z)
print('The highest resolution for the system is: ', self._high_resolution)
return
def get_pixels_coords(self, x_res=None, z_res=None):
if x_res != None:
n_x = x_res
else:
n_x = self._high_resolution[0]
if z_res != None:
n_z = z_res
else:
n_z = self._high_resolution[1]
# Calculate positions
x_coords = np.linspace(self._image_size_x_0, self._image_size_x_1, n_x)
x_coords = x_coords.reshape(-1,)
# Calculate positions
z_coords = np.linspace(self._image_size_z_0, self._image_size_z_1, n_z)
z_coords = z_coords.reshape(-1,)
self._pixels_coords = np.transpose(np.dstack(np.meshgrid(x_coords, z_coords)).reshape(-1, 2))
return self._pixels_coords
| 31.410526
| 101
| 0.655831
| 2,307
| 0.773123
| 0
| 0
| 0
| 0
| 0
| 0
| 934
| 0.313003
|
665ca8b455ad5fa005ae44eb4ff2f68155d6d9ba
| 9,912
|
py
|
Python
|
pre-receive.d/net.twistedbytes.gitlab-protector.py
|
andgeno/GitLab-Protector
|
b05f39a23213bd832cbbf30bc63731aca1fce18d
|
[
"MIT"
] | 7
|
2020-12-14T10:05:13.000Z
|
2021-11-25T15:14:26.000Z
|
pre-receive.d/net.twistedbytes.gitlab-protector.py
|
andgeno/GitLab-Protector
|
b05f39a23213bd832cbbf30bc63731aca1fce18d
|
[
"MIT"
] | 1
|
2021-04-19T13:47:12.000Z
|
2021-04-24T12:39:47.000Z
|
pre-receive.d/net.twistedbytes.gitlab-protector.py
|
andgeno/GitLab-Protector
|
b05f39a23213bd832cbbf30bc63731aca1fce18d
|
[
"MIT"
] | 1
|
2021-04-19T14:06:54.000Z
|
2021-04-19T14:06:54.000Z
|
#!/usr/bin/env python
import sys
import os
import re
import subprocess
from enum import Enum
class GitPushAction(Enum):
BRANCH_NEW = 1
BRANCH_UPDATE = 2
BRANCH_REMOVE = 3
class GitLabProtector:
"""GitLab Protector: A git pre-receive hook"""
NULL_HASH = '0000000000000000000000000000000000000000'
EMPTY_TREE_HASH = '4b825dc642cb6eb9a060e54bf8d69288fbee4904'
groups = {}
rules = []
git_modified_files = []
old_hash = None
new_hash = None
ref_name = None
git_push_action = None
def __get_repo_hash(self):
path_symlink = sys.argv[0]
file_this = os.path.basename(path_symlink)
re_pattern = '^/.+/@hashed/([0-9a-fA-F]{2}/[0-9a-fA-F]{2}/[0-9a-fA-F]{64})\.git/custom_hooks/pre-receive.d/%s$' % file_this
re_result = re.search(re_pattern, path_symlink)
if re_result is None:
print "GL-HOOK-ERR: Could not determine GitLab repository hash."
exit(1)
repo_hash_slashes = re_result.group(1)
repo_hash = repo_hash_slashes.replace('/', '-')
return repo_hash
def __get_user_config_dir(self):
path_symlink = sys.argv[0]
path_target_of_symlink = os.path.realpath(path_symlink)
dir_target_of_symlink = os.path.dirname(path_target_of_symlink)
dir_base = os.path.join(dir_target_of_symlink, '..')
dir_user_config = os.path.join(dir_base, 'user-config')
return dir_user_config
def __remove_comments_in_buf(self, buf_in):
buf_out = []
if buf_in:
for line in buf_in:
if(line.strip() == ''): continue
if(re.match(r'^\s*#', line)): continue
buf_out.append(line)
return buf_out
def load_protector_groups_config(self):
self.groups = {}
dir_user_config = self.__get_user_config_dir()
file_groups_config = os.path.join(dir_user_config, 'groups.global.conf')
try:
with open(file_groups_config, 'r') as f:
raw_groups_config = f.read()
if raw_groups_config:
buf_with_comments = raw_groups_config.splitlines()
buf_groups_config = self.__remove_comments_in_buf(buf_with_comments)
except:
print "GL-HOOK-ERR: Could not read groups config: {0}".format(file_groups_config)
exit(1)
for line in buf_groups_config:
tmp = line.split('=', 1)
if len(tmp) != 2: continue
group_name = tmp[0].strip()
if group_name == '': continue
users = tmp[1].strip()
if users == '': continue
users = users.split(',')
## Remove empty 'user' items
for user in users:
if user.strip() == '':
users.remove(user)
self.groups[group_name] = users
#print('groups == {0}'.format(self.groups))
def load_protector_repo_config(self):
rules = []
repo_hash = self.__get_repo_hash()
dir_user_config = self.__get_user_config_dir()
file_repo_config = os.path.join(dir_user_config, 'repo.{0}.conf'.format(repo_hash))
try:
with open(file_repo_config, 'r') as f:
raw_repo_config = f.read()
if raw_repo_config:
buf_with_comments = raw_repo_config.splitlines()
buf_repo_config = self.__remove_comments_in_buf(buf_with_comments)
except:
print "GL-HOOK-ERR: Could not read user config: {0}".format(file_repo_config)
exit(1)
for line in buf_repo_config:
tmp = line.split('=', 1)
if len(tmp) != 2: continue
group_name = tmp[0].strip()
if group_name == '': continue
pattern = tmp[1].strip()
if pattern == '': continue
self.rules.append({'pattern': pattern, 'group': group_name})
#print('rules == {0}'.format(self.rules))
def load_git_modified_files(self):
self.git_modified_files = []
## Incoming format on STDIN: "old_hash new_hash ref_name"
raw_stdin = sys.stdin.read()
(old_hash, new_hash, ref_name) = raw_stdin.strip().split()
#print "old_hash<{0}> new_hash<{1}> ref_name<{2}>".format(old_hash, new_hash, ref_name)
self.old_hash = old_hash
self.new_hash = new_hash
self.ref_name = ref_name
if new_hash == self.NULL_HASH:
## Don't validate branches to be removed
self.git_push_action = GitPushAction.BRANCH_REMOVE
return
if old_hash == self.NULL_HASH:
## New branch is being pushed
self.git_push_action = GitPushAction.BRANCH_NEW
old_hash = self.EMPTY_TREE_HASH
proc = subprocess.Popen(['git', 'diff','--name-only', old_hash, new_hash], stdout=subprocess.PIPE)
else:
## Branch is being updated
self.git_push_action = GitPushAction.BRANCH_UPDATE
proc = subprocess.Popen(['git', 'diff','--name-only', old_hash, new_hash], stdout=subprocess.PIPE)
raw_stdout = proc.stdout.readlines()
if raw_stdout:
for line in raw_stdout:
filename = str(line.strip('\n'))
self.git_modified_files.append(filename)
#print('git_modified_files == {0}'.format(self.git_modified_files))
def __is_user_name_in_group(self, user_name, group_name):
if group_name:
if group_name in self.groups:
for user in self.groups[group_name]:
if user == user_name: return True
return False
def validate(self):
is_success = True
if(self.git_push_action is GitPushAction.BRANCH_NEW
or self.git_push_action is GitPushAction.BRANCH_UPDATE):
if not self.validate_file_permissions(): is_success = False
#if not self.validate_file_sizes(): is_success = False
return is_success
def validate_file_sizes(self):
print u"\033[0;37;1m\u2022 Validation Phase: File Sizes\033[0m".encode('utf8')
validation_successful = True
## TODO Finish implementation of this feature.
## TODO Getting the file sizes does already work.
## TODO Make max file size configurable per repo in user config.
DUMMY_5MB_LIMIT = 1024 * 1024 * 5
max_filesize = DUMMY_5MB_LIMIT ## TODO just for testing! read from config file
errors = []
for git_modified_file in self.git_modified_files:
proc = subprocess.Popen(['git', 'cat-file', '-s', '{0}:{1}'.format(self.new_hash, git_modified_file)], stdout=subprocess.PIPE)
raw_stdout = proc.stdout.readlines()
if raw_stdout:
for line in raw_stdout:
filesize = int(line.strip('\n'))
if filesize is None:
filesize = 0
if(filesize > max_filesize):
validation_successful = False
errors.append({ 'filename': git_modified_file, 'filesize': filesize})
if validation_successful:
return True
else:
print "GL-HOOK-ERR: [POLICY] The following files exceed the maximum filesize limit of {0} byte(s):".format(max_filesize)
for error in errors:
print u'GL-HOOK-ERR: Filesize limit check: \u274C {0} - Filesize: {1} byte(s)'.format(error['filename'], error['filesize']).encode('utf8')
return False
def validate_file_permissions(self):
print u"\033[0;37;1m\u2022 Validation Phase: File Permissions\033[0m".encode('utf8')
validation_successful = True
gitlab_user_id = os.environ.get('GL_ID')
gitlab_user_name = os.environ.get('GL_USERNAME')
gitlab_project_id = os.environ.get('GL_REPOSITORY')
#print "gitlab_user_id<{0}> gitlab_user_name<{1}> gitlab_project_id<{2}>".format(gitlab_user_id, gitlab_user_name, gitlab_project_id)
errors = []
for git_modified_file in self.git_modified_files:
rule_index = -1
for rule in self.rules:
rule_index = rule_index + 1
match = re.search(rule['pattern'], git_modified_file)
if match is None: continue
## A protected and modified file was detected which requires that the user
## who is pushing this change has to be a member of that configured group.
if self.__is_user_name_in_group(gitlab_user_name, rule['group']):
print u'Protected file permission check: \u2714 {0}'.format(git_modified_file).encode('utf8')
continue
else:
validation_successful = False
errors.append({ 'filename': git_modified_file, 'rule-index': rule_index})
if validation_successful:
return True
else:
print "GL-HOOK-ERR: [POLICY] You don't have permission to push changes for the following files:"
for error in errors:
print u'GL-HOOK-ERR: Protected file permission check: \u274C {0} - Rule Index: {1}'.format(error['filename'], error['rule-index']).encode('utf8')
return False
def __init__(self):
print "\033[0;37;4mGitLab Protector\033[0m: Started"
self.load_protector_groups_config()
self.load_protector_repo_config()
self.load_git_modified_files()
is_success = self.validate()
result_string = '\033[42;30;1m SUCCESSFUL \033[0m' if is_success else '\033[41;37;1m FAILED \033[0m'
print "\033[0;37;4mGitLab Protector\033[0m: Validation {0}".format(result_string)
exit(0) if is_success else exit(1)
GitLabProtector()
| 38.123077
| 161
| 0.602502
| 9,795
| 0.988196
| 0
| 0
| 0
| 0
| 0
| 0
| 2,379
| 0.240012
|
665d3713837abc4149228da527c02f71d0d908ef
| 1,151
|
py
|
Python
|
tests/test_cli.py
|
joshbduncan/word-search-generator
|
3c527f0371cbe4550a24403c660d1c6511b4cf79
|
[
"MIT"
] | 4
|
2021-09-18T21:21:54.000Z
|
2022-03-02T03:53:54.000Z
|
tests/test_cli.py
|
joshbduncan/word-search-generator
|
3c527f0371cbe4550a24403c660d1c6511b4cf79
|
[
"MIT"
] | 4
|
2021-09-18T21:50:33.000Z
|
2022-03-22T04:29:33.000Z
|
tests/test_cli.py
|
joshbduncan/word-search-generator
|
3c527f0371cbe4550a24403c660d1c6511b4cf79
|
[
"MIT"
] | 1
|
2021-11-17T14:53:50.000Z
|
2021-11-17T14:53:50.000Z
|
import os
import pathlib
import tempfile
TEMP_DIR = tempfile.TemporaryDirectory()
def test_entrypoint():
exit_status = os.system("word-search --help")
assert exit_status == 0
def test_no_words_provided():
exit_status = os.system("word-search")
assert os.WEXITSTATUS(exit_status) == 1
def test_just_words():
exit_status = os.system("word-search some test words")
assert exit_status == 0
def test_stdin():
exit_status = os.system("echo computer robot soda | word-search")
assert os.WEXITSTATUS(exit_status) == 0
def test_export_pdf():
temp_path = TEMP_DIR.name + "/test.pdf"
exit_status = os.system(f"word-search some test words -e pdf -o {temp_path}")
assert exit_status == 0 and pathlib.Path(temp_path).exists()
def test_export_csv():
temp_path = TEMP_DIR.name + "/test.csv"
exit_status = os.system(f"word-search some test words -e csv -o {temp_path}")
assert exit_status == 0 and pathlib.Path(temp_path).exists()
def test_invalid_export_location():
exit_status = os.system("word-search some test words -e csv -o ~/RANDOMTESTLOC")
assert os.WEXITSTATUS(exit_status) == 1
| 26.159091
| 84
| 0.709818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 283
| 0.245873
|
665d77836b64427e5626b7f66bfbf1c6d819e02b
| 1,167
|
py
|
Python
|
karas/__init__.py
|
TuXiaokang/karas
|
2549502424b2d4c67047b867b0315f33b2e997c5
|
[
"MIT"
] | 3
|
2019-02-28T13:53:48.000Z
|
2022-01-18T12:53:37.000Z
|
karas/__init__.py
|
TuXiaokang/karas
|
2549502424b2d4c67047b867b0315f33b2e997c5
|
[
"MIT"
] | null | null | null |
karas/__init__.py
|
TuXiaokang/karas
|
2549502424b2d4c67047b867b0315f33b2e997c5
|
[
"MIT"
] | 1
|
2022-01-18T12:53:42.000Z
|
2022-01-18T12:53:42.000Z
|
import pickle
from karas.version import __version__
def serialize(obj, filename):
with open(filename, 'wb') as f:
f.write(pickle.dumps(obj))
def deserialize(filename):
with open(filename, 'rb') as f:
obj = pickle.load(f)
return obj
def replace_type(key):
return key.replace('scalar/', '') \
.replace('images/', '')
class KeyEntry(object):
mode = 'train'
type = None
key = ''
def __init__(self, key):
items = key.split('/')
if items[0] not in ('train', 'test'):
items.insert(0, 'train')
self.mode = items[0]
if items[1] in ('scalar', 'images', 'others'):
self.type = items[1]
self.key = '/'.join(items[2:])
else:
self.key = '/'.join(items[1:])
def __repr__(self):
return self.mode + ' ' + self.type + ' ' + self.key
def __eq__(self, other):
if self.mode != other.mode or self.key != other.key:
return False
if self.type is not None:
return self.type == other.type
return True
def compare_key(key, tag):
return KeyEntry(key) == KeyEntry(tag)
| 22.442308
| 60
| 0.548415
| 731
| 0.626392
| 0
| 0
| 0
| 0
| 0
| 0
| 98
| 0.083976
|
665e40e33fdd973b30b29de0d4999dd092a29402
| 681
|
py
|
Python
|
calc.py
|
fja05680/calc
|
6959bdd740722c7e3024f4e5a9a21607ad5ffccf
|
[
"MIT"
] | null | null | null |
calc.py
|
fja05680/calc
|
6959bdd740722c7e3024f4e5a9a21607ad5ffccf
|
[
"MIT"
] | null | null | null |
calc.py
|
fja05680/calc
|
6959bdd740722c7e3024f4e5a9a21607ad5ffccf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import calc
def main():
try:
while True:
try:
expression = input('calc> ')
# Parse the expression.
lexer = calc.Lexer(expression)
tokens = lexer.parse()
print(tokens)
parser = calc.Parser(tokens)
tree = parser.parse()
# Evaluate the expression.
if tree:
value = tree.evaluate()
print(f'{tree} = {value}')
except Exception as e:
print(e)
except KeyboardInterrupt:
print()
if __name__ == '__main__':
main()
| 23.482759
| 46
| 0.444934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 104
| 0.152717
|
666446282fcb45a4a20b926c54fc47be65a01ac8
| 8,534
|
py
|
Python
|
aiida_environ/workflows/pw/grandcanonical.py
|
environ-developers/aiida-environ
|
c39ac70227a41e084b74df630c3cb4b4caa27094
|
[
"MIT"
] | null | null | null |
aiida_environ/workflows/pw/grandcanonical.py
|
environ-developers/aiida-environ
|
c39ac70227a41e084b74df630c3cb4b4caa27094
|
[
"MIT"
] | 1
|
2021-12-07T17:03:44.000Z
|
2021-12-07T17:03:44.000Z
|
aiida_environ/workflows/pw/grandcanonical.py
|
environ-developers/aiida-environ
|
c39ac70227a41e084b74df630c3cb4b4caa27094
|
[
"MIT"
] | null | null | null |
import numpy as np
from aiida.common import AttributeDict
from aiida.engine import WorkChain, append_
from aiida.orm import Dict, List, StructureData
from aiida.orm.nodes.data.upf import get_pseudos_from_structure
from aiida.orm.utils import load_node
from aiida.plugins import WorkflowFactory
from aiida_quantumespresso.utils.mapping import prepare_process_inputs
from aiida_environ.calculations.adsorbate.gen_supercell import (
adsorbate_gen_supercell,
gen_hydrogen,
)
from aiida_environ.calculations.adsorbate.post_supercell import adsorbate_post_supercell
from aiida_environ.data.charge import EnvironChargeData
from aiida_environ.utils.charge import get_charge_range
from aiida_environ.utils.vector import get_struct_bounds
EnvPwBaseWorkChain = WorkflowFactory("environ.pw.base")
PwBaseWorkChain = WorkflowFactory("quantumespresso.pw.base")
class AdsorbateGrandCanonical(WorkChain):
@classmethod
def define(cls, spec):
super().define(spec)
spec.expose_inputs(
EnvPwBaseWorkChain,
namespace="base",
namespace_options={"help": "Inputs for the `EnvPwBaseWorkChain`."},
exclude=("pw.structure", "pw.external_charges"),
)
spec.input("vacancies", valid_type=List)
spec.input("bulk_structure", valid_type=StructureData)
spec.input("mono_structure", valid_type=StructureData)
spec.input("calculation_parameters", valid_type=Dict)
spec.outline(
cls.setup,
cls.selection,
cls.simulate,
# cls.postprocessing
)
def setup(self):
self.ctx.environ_parameters = self.inputs.base.pw.environ_parameters
self.ctx.calculation_details = {}
calculation_parameters = self.inputs.calculation_parameters.get_dict()
calculation_parameters.setdefault("charge_distance", 5.0)
calculation_parameters.setdefault("charge_max", 1.0)
calculation_parameters.setdefault("charge_min", -1.0)
calculation_parameters.setdefault("charge_increment", 0.2)
calculation_parameters.setdefault("charge_spread", 0.5)
calculation_parameters.setdefault("system_axis", 3)
calculation_parameters.setdefault("cell_shape_x", 2)
calculation_parameters.setdefault("cell_shape_y", 2)
calculation_parameters.setdefault("reflect_vacancies", True)
self.ctx.calculation_parameters = Dict(dict=calculation_parameters)
# TODO: check sanity of inputs
def selection(self):
d = adsorbate_gen_supercell(
self.ctx.calculation_parameters,
self.inputs.mono_structure,
self.inputs.vacancies,
)
self.ctx.struct_list = d["output_structs"]
self.ctx.num_adsorbate = d["num_adsorbate"]
self.report(f"struct_list written: {self.ctx.struct_list}")
self.report(f"num_adsorbate written: {self.ctx.num_adsorbate}")
def simulate(self):
distance = self.ctx.calculation_parameters["charge_distance"]
axis = self.ctx.calculation_parameters["system_axis"]
charge_max = self.ctx.calculation_parameters["charge_max"]
charge_inc = self.ctx.calculation_parameters["charge_increment"]
charge_spread = self.ctx.calculation_parameters["charge_spread"]
charge_range = get_charge_range(charge_max, charge_inc)
# TODO: maybe do this at setup and change the cell if it's too big?
cpos1, cpos2 = get_struct_bounds(self.inputs.mono_structure, axis)
# change by 5 angstrom
cpos1 -= distance
cpos2 += distance
npcpos1 = np.zeros(3)
npcpos2 = np.zeros(3)
npcpos1[axis - 1] = cpos1
npcpos2[axis - 1] = cpos2
nsims = (len(charge_range) * (len(self.ctx.struct_list) + 1)) + 1
self.report(f"number of simulations to run = {nsims}")
for i, charge_amt in enumerate(charge_range):
self.ctx.calculation_details[charge_amt] = {}
# loop over charges
charges = EnvironChargeData()
# get position of charge
charges.append_charge(
-charge_amt / 2, tuple(npcpos1), charge_spread, 2, axis
)
charges.append_charge(
-charge_amt / 2, tuple(npcpos2), charge_spread, 2, axis
)
for j, structure_pk in enumerate(self.ctx.struct_list):
# regular monolayer simulation with adsorbate/charge
inputs = AttributeDict(
self.exposed_inputs(EnvPwBaseWorkChain, namespace="base")
)
inputs.pw.parameters = inputs.pw.parameters.get_dict()
structure = load_node(structure_pk)
self.report(f"{structure}")
inputs.pw.structure = structure
inputs.pw.parameters["SYSTEM"]["tot_charge"] = charge_amt
inputs.pw.parameters["ELECTRONS"]["mixing_mode"] = "local-TF"
inputs.pw.external_charges = charges
inputs.pw.pseudos = get_pseudos_from_structure(structure, "SSSPe")
inputs.metadata.call_link_label = f"s{j}_c{i}"
inputs = prepare_process_inputs(EnvPwBaseWorkChain, inputs)
running = self.submit(EnvPwBaseWorkChain, **inputs)
self.report(f"<s{j}_c{i}> launching EnvPwBaseWorkChain<{running.pk}>")
self.ctx.calculation_details[charge_amt][structure_pk] = running.pk
self.to_context(workchains=append_(running))
# base monolayer simulation
inputs = AttributeDict(
self.exposed_inputs(EnvPwBaseWorkChain, namespace="base")
)
structure = self.inputs.mono_structure
self.report(f"{structure}")
inputs.pw.structure = structure
inputs.pw.external_charges = charges
inputs.pw.pseudos = get_pseudos_from_structure(structure, "SSSPe")
inputs.metadata.call_link_label = f"smono_c{i}"
inputs = prepare_process_inputs(EnvPwBaseWorkChain, inputs)
running = self.submit(EnvPwBaseWorkChain, **inputs)
self.report(f"<smono_c{i}> launching EnvPwBaseWorkChain<{running.pk}>")
self.ctx.calculation_details[charge_amt]["mono"] = running.pk
self.to_context(workchains=append_(running))
# bulk simulation
inputs = AttributeDict(
self.exposed_inputs(EnvPwBaseWorkChain, namespace="base")
)
structure = self.inputs.bulk_structure
self.report(f"{structure}")
inputs.pw.structure = structure
inputs.pw.pseudos = get_pseudos_from_structure(structure, "SSSPe")
inputs.metadata.call_link_label = "sbulk"
inputs.pw.metadata.options.parser_name = "quantumespresso.pw"
delattr(inputs.pw.metadata.options, "debug_filename")
delattr(inputs.pw, "environ_parameters")
inputs = prepare_process_inputs(PwBaseWorkChain, inputs)
running = self.submit(PwBaseWorkChain, **inputs)
self.report(f"<sbulk> launching PwBaseWorkChain<{running.pk}>")
self.ctx.calculation_details["bulk"] = running.pk
self.to_context(workchains=append_(running))
# hydrogen simulation
inputs = AttributeDict(
self.exposed_inputs(EnvPwBaseWorkChain, namespace="base")
)
structure = gen_hydrogen()
self.report(f"{structure}")
inputs.pw.pseudos = get_pseudos_from_structure(structure, "SSSPe")
inputs.pw.structure = structure
inputs.metadata.call_link_label = "sads_neutral"
inputs.pw.metadata.options.parser_name = "quantumespresso.pw"
inputs = prepare_process_inputs(PwBaseWorkChain, inputs)
delattr(inputs.pw.metadata.options, "debug_filename")
delattr(inputs.pw, "environ_parameters")
running = self.submit(PwBaseWorkChain, **inputs)
self.report(f"<sads_neutral> launching PwBaseWorkChain<{running.pk}>")
self.ctx.calculation_details["adsorbate"] = running.pk
self.to_context(workchains=append_(running))
self.report(f"calc_details written: {self.ctx.calculation_details}")
def postprocessing(self):
adsorbate_post_supercell(
self.inputs.mono_struct,
self.inputs.bulk_struct,
self.ctx.calculation_parameters,
self.ctx.calculation_details,
self.ctx.struct_list,
self.ctx.num_adsorbate,
)
| 44.915789
| 88
| 0.664284
| 7,675
| 0.899344
| 0
| 0
| 687
| 0.080502
| 0
| 0
| 1,490
| 0.174596
|
6664aaeb4a16b83003b59cd285e9bdc4f631fdb5
| 6,481
|
py
|
Python
|
tabnet/utils.py
|
huangyz0918/tabnet
|
a93d52c6f33e9ea8ad0f152cdaf5a0cabec8e6d4
|
[
"MIT"
] | 1
|
2021-06-17T04:47:41.000Z
|
2021-06-17T04:47:41.000Z
|
tabnet/utils.py
|
huangyz0918/tabnet
|
a93d52c6f33e9ea8ad0f152cdaf5a0cabec8e6d4
|
[
"MIT"
] | null | null | null |
tabnet/utils.py
|
huangyz0918/tabnet
|
a93d52c6f33e9ea8ad0f152cdaf5a0cabec8e6d4
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import pandas as pd
from collections import OrderedDict
class TrainingDataset(torch.utils.data.Dataset):
is_categorical = False
def __init__(
self,
X,
y,
output_mapping=None,
categorical_mapping=None,
columns=None,
device=None,
):
self.columns = columns
self.device = device
# Preprocess categoricals
if categorical_mapping:
X_slices = OrderedDict()
for key, val in sorted(
categorical_mapping.items(), key=lambda k: k[1]["idx"]
):
X_slices[val["idx"]] = map_categoricals_to_ordinals(
X[:, val["idx"]], val["map"]
).to(self.device)
idx_slice = sorted([val["idx"] for key, val in categorical_mapping.items()])
X_continuous = (
torch.from_numpy(np.delete(X, idx_slice, -1).astype(float))
.float()
.to(self.device)
)
self.X = (X_continuous, X_slices)
else:
self.X = (torch.from_numpy(X).float().to(self.device), OrderedDict())
# Preprocess targets
if output_mapping:
self.y = map_categoricals_to_ordinals(y, output_mapping).to(self.device)
self.n_output_dims = len(output_mapping.keys())
else:
self.y = torch.from_numpy(y.astype(float)).float()
if len(self.y.size()) == 1:
self.y = self.y.unsqueeze(-1).to(self.device)
self.n_output_dims = list(self.y.size())[-1]
def __len__(self):
return len(self.X[0])
def __getitem__(self, index):
return (
self.X[0][index, ...],
OrderedDict({key: self.X[1][key][index, ...] for key in self.X[1]}),
self.y[index, ...],
)
def random_batch(self, n_samples):
"""Generates a random batch of `n_samples` with replacement."""
random_idx = np.random.randint(0, self.__len__() - 1, size=n_samples)
return self.__getitem__(random_idx)
class InferenceDataset(torch.utils.data.Dataset):
"""Creates a PyTorch Dataset object for a set of points for inference."""
def __init__(self, X, categorical_mapping=None, columns=None, device=None):
self.columns = columns
self.device = device
# Preprocess categoricals
if categorical_mapping:
X_slices = OrderedDict()
for key, val in sorted(
categorical_mapping.items(), key=lambda k: k[1]["idx"]
):
X_slices[val["idx"]] = map_categoricals_to_ordinals(
X[:, val["idx"]], val["map"]
).to(self.device)
idx_slice = sorted([val["idx"] for key, val in categorical_mapping.items()])
X_continuous = torch.from_numpy(
np.delete(X, idx_slice, -1).astype(float)
).float()
self.X = (X_continuous.to(self.device), X_slices)
else:
self.X = (torch.from_numpy(X).float().to(self.device), OrderedDict())
def __len__(self):
return len(self.X[0])
def __getitem__(self, index):
return (
self.X[0][index, ...],
OrderedDict({key: self.X[1][key][index, ...] for key in self.X[1]}),
)
class EarlyStopping(object):
"""
Implemented from: https://gist.github.com/stefanonardo/693d96ceb2f531fa05db530f3e21517d
"""
def __init__(self, mode="min", min_delta=0, patience=10, percentage=False):
self.mode = mode
self.min_delta = min_delta
self.patience = patience
self.best = None
self.num_bad_epochs = 0
self.is_better = None
self._init_is_better(mode, min_delta, percentage)
if patience == 0:
self.is_better = lambda a, b: True
self.step = lambda a: False
def step(self, metrics):
if self.best is None:
self.best = metrics
return False
if torch.isnan(metrics):
return True
if self.is_better(metrics, self.best):
self.num_bad_epochs = 0
self.best = metrics
else:
self.num_bad_epochs += 1
if self.num_bad_epochs >= self.patience:
return True
return False
def _init_is_better(self, mode, min_delta, percentage):
if mode not in {"min", "max"}:
raise ValueError("mode " + mode + " is unknown!")
if not percentage:
if mode == "min":
self.is_better = lambda a, best: a < best - min_delta
if mode == "max":
self.is_better = lambda a, best: a > best + min_delta
else:
if mode == "min":
self.is_better = lambda a, best: a < best - (best * min_delta / 100)
if mode == "max":
self.is_better = lambda a, best: a > best + (best * min_delta / 100)
def generate_categorical_to_ordinal_map(inputs):
if isinstance(inputs, pd.Series):
inputs = inputs.values
uq_inputs = np.unique(inputs)
return dict(zip(list(uq_inputs), list(range(len(uq_inputs)))))
def map_categoricals_to_ordinals(categoricals, mapping):
unmapped_targets = set(np.unique(categoricals).flatten()) - set(mapping.keys())
if len(unmapped_targets) > 0:
raise ValueError(
"Mapping missing the following keys: {}".format(unmapped_targets)
)
return torch.from_numpy(
np.vectorize(mapping.get)(categoricals).astype(float)
).long()
def map_categoricals_to_one_hot(categoricals, mapping):
unmapped_elements = set(np.unique(categoricals).flatten()) - set(mapping.keys())
if len(unmapped_elements) > 0:
raise ValueError(
"Mapping missing the following keys: {}".format(unmapped_elements)
)
return torch.from_numpy(
np.squeeze(
np.eye(len(mapping.keys()))[
np.vectorize(mapping.get)(categoricals).reshape(-1)
]
).astype(float)
).long()
def map_ordinals_to_categoricals(ordinals, mapping):
if isinstance(ordinals, torch.Tensor):
ordinals = ordinals.detach().cpu().numpy()
elif isinstance(ordinals, list):
ordinals = np.array(ordinals)
inv_target_mapping = {v: k for k, v in mapping.items()}
return np.vectorize(inv_target_mapping.get)(ordinals).squeeze()
| 33.755208
| 91
| 0.577843
| 4,914
| 0.758216
| 0
| 0
| 0
| 0
| 0
| 0
| 495
| 0.076377
|
6664d9c361d76731e630fab7db18a3314ba27f7a
| 699
|
py
|
Python
|
ex022.py
|
nascimentobrenda24/PythonExercises
|
2055f42a0454ae25cba6a6457c85822eaad2df01
|
[
"MIT"
] | 1
|
2021-11-23T21:41:25.000Z
|
2021-11-23T21:41:25.000Z
|
ex022.py
|
nascimentobrenda24/PythonExercises
|
2055f42a0454ae25cba6a6457c85822eaad2df01
|
[
"MIT"
] | null | null | null |
ex022.py
|
nascimentobrenda24/PythonExercises
|
2055f42a0454ae25cba6a6457c85822eaad2df01
|
[
"MIT"
] | null | null | null |
# Analisador de textos
# Crie um programa que leia o nome completo de uma pessoa e mostre:
# - O nome com todas as letras maiúsculas e minúsculas.
# - Quantas letras ao todo (sem considerar espaços).
print('=*'*20, 'CADASTRO', '=*'*20)
nome = str(input('Nome Completo:')).strip()#Para ler com letras maiúsculas
print('Analisando seu nome...')
print('Seu nome em minúsculo é {}'.format(nome.lower()))
print('Seu nome em MAIÚSCULO é {}'.format(nome.upper()))
print('Seu nome tem ano todo {} letras'.format(len(nome)-nome.count(' ')))#menos o contador de espaços
primeiro_nome = nome.split() #Vai quebrar os caracteres
print('Seu primeiro nome tem {} letras'.format(len(primeiro_nome[0])))
| 34.95
| 102
| 0.703863
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 478
| 0.675141
|
66654d5cfc565e697020cd64524f69662efe7ca5
| 312
|
py
|
Python
|
urls.py
|
stephenmcd/gamblor
|
a12f43339e2a6d34e4ed5ea3d02a3629ed5b8616
|
[
"BSD-2-Clause"
] | 12
|
2015-06-09T02:31:43.000Z
|
2021-12-11T21:35:38.000Z
|
urls.py
|
binarygrrl/gamblor
|
a12f43339e2a6d34e4ed5ea3d02a3629ed5b8616
|
[
"BSD-2-Clause"
] | null | null | null |
urls.py
|
binarygrrl/gamblor
|
a12f43339e2a6d34e4ed5ea3d02a3629ed5b8616
|
[
"BSD-2-Clause"
] | 9
|
2016-11-14T23:56:51.000Z
|
2021-04-14T07:47:44.000Z
|
from django.conf.urls.defaults import patterns, include, url
from django.contrib import admin
from core import game
admin.autodiscover()
game.autodiscover()
urlpatterns = patterns("",
("^admin/", include(admin.site.urls)),
url("", include("social_auth.urls")),
url("", include("core.urls")),
)
| 18.352941
| 60
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 44
| 0.141026
|
666552755de681921ce121bf7878b38237804c08
| 3,258
|
py
|
Python
|
DCGAN/train.py
|
drone911/Mnist-GANs
|
6b5ffc6ecf5070522ebcb6a41374cfffd674b684
|
[
"MIT"
] | null | null | null |
DCGAN/train.py
|
drone911/Mnist-GANs
|
6b5ffc6ecf5070522ebcb6a41374cfffd674b684
|
[
"MIT"
] | null | null | null |
DCGAN/train.py
|
drone911/Mnist-GANs
|
6b5ffc6ecf5070522ebcb6a41374cfffd674b684
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 13 20:11:22 2019
@author: drone911
"""
from helper import *
from models import *
import numpy as np
from keras.datasets import mnist
from tqdm import tqdm
import warnings
def train(train_images, generator, discriminator, gan, num_classes=120, random_dim=128, epochs=100, batch_size=128):
num_train_images=train_images.shape[0]
num_batches=int(num_train_images/batch_size)
hist_disc_avg, hist_gen_avg=[], []
for e in range(epochs):
fake_img_y=np.zeros((batch_size, 1))
fake_img_y[:]=0
real_img_y=np.zeros((batch_size, 1))
real_img_y[:]=0.9
gan_y=np.ones((batch_size, 1))
hist_disc, hist_gen=[], []
iterator=tqdm(range(num_batches))
try:
for i in iterator:
sampled_noise = generate_inputs(random_dim, batch_size)
real_img_x=train_images[np.random.randint(0,train_images.shape[0],size=batch_size)]
fake_img_x=generator.predict(sampled_noise)
train_disc_x=np.concatenate((real_img_x, fake_img_x), axis=0)
train_disc_y=np.concatenate((real_img_y, fake_img_y), axis=0)
discriminator.trainable=True
hist_disc.append(discriminator.train_on_batch(train_disc_x, train_disc_y))
noise=generate_inputs(random_dim, batch_size)
discriminator.trainable=False
hist_gen.append(gan.train_on_batch(noise, gan_y))
hist_disc_avg.append(np.mean(hist_disc[0:num_batches]))
hist_gen_avg.append(np.mean(hist_gen[0:num_batches]))
print("-------------------------------------------------------")
print("discriminator loss at epoch {}:{}".format(e, hist_disc_avg[-1]))
print("generator loss at epoch {}:{}".format(e, hist_gen_avg[-1]))
print("-------------------------------------------------------")
plot_generated_images(e, generator, random_dim=random_dim)
plot_loss(hist_disc, hist_gen)
if e % 10 == 0:
discriminator.save_weights("models\\disc_v1_epoch_{}.h5".format(e))
generator.save_weights("models\\gen_v1_epoch_{}.h5".format(e))
except KeyboardInterrupt:
iterator.close()
print("Interrupted")
break
if __name__=="__main__":
warnings.filterwarnings("ignore")
(train_images, train_labels), (test_images, test_labels)=mnist.load_data()
random_dim=100
batch_size=128
lr=0.0002
beta_1=0.5
train_images=np.concatenate((train_images, test_images), axis=0)
train_images=train_images.reshape(-1,28,28,1)
train_images=(train_images.astype(np.float32) - 127.5) / 127.5
generator=get_gen_nn(random_dim=random_dim, lr=lr, beta_1=beta_1,verbose=False)
discriminator=get_disc_nn(lr=lr, beta_1=beta_1,verbose=False)
gan=create_gan(discriminator, generator, random_dim=random_dim, lr=lr, beta_1=beta_1,verbose=False)
train(train_images, generator, discriminator, gan, random_dim=random_dim, epochs=50, batch_size=128)
| 41.240506
| 116
| 0.612339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 353
| 0.108349
|
6666b27d9a32939d312fcb0f1e04eb3582ec3f56
| 275
|
py
|
Python
|
03 - Types/3.2 - InbuiltTypes-ListsTuples/07-method-errors-index.py
|
python-demo-codes/basics
|
2a151bbff4b528cefd52978829c632fd087c8f20
|
[
"DOC"
] | 2
|
2019-08-23T06:05:55.000Z
|
2019-08-26T03:56:07.000Z
|
03 - Types/3.2 - InbuiltTypes-ListsTuples/07-method-errors-index.py
|
python-lang-codes/basics
|
2a151bbff4b528cefd52978829c632fd087c8f20
|
[
"DOC"
] | null | null | null |
03 - Types/3.2 - InbuiltTypes-ListsTuples/07-method-errors-index.py
|
python-lang-codes/basics
|
2a151bbff4b528cefd52978829c632fd087c8f20
|
[
"DOC"
] | 4
|
2020-10-01T07:16:07.000Z
|
2021-07-17T07:55:08.000Z
|
# HEAD
# DataType - List method -index() Usage Error
# DESCRIPTION
# Describes index method of lists
# and its error incase item is not there
# RESOURCES
#
lists = ['hello', 'hi', 'howdy', 'heyas']
# returns an error - ValueError
print(lists.index('hello hello'))
| 21.153846
| 46
| 0.676364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 226
| 0.821818
|
6667684709a7e3192cfea4fd79e3ee7e997e694d
| 2,418
|
py
|
Python
|
Model/predictor-dl-model/tests/experiments/7day_variance_uckey_weight_in_slotid.py
|
rangaswamymr/blue-marlin
|
2ab39a6af01e14f40386f640fe087aeb284b5524
|
[
"Apache-2.0"
] | null | null | null |
Model/predictor-dl-model/tests/experiments/7day_variance_uckey_weight_in_slotid.py
|
rangaswamymr/blue-marlin
|
2ab39a6af01e14f40386f640fe087aeb284b5524
|
[
"Apache-2.0"
] | null | null | null |
Model/predictor-dl-model/tests/experiments/7day_variance_uckey_weight_in_slotid.py
|
rangaswamymr/blue-marlin
|
2ab39a6af01e14f40386f640fe087aeb284b5524
|
[
"Apache-2.0"
] | null | null | null |
from pyspark import SparkContext, SparkConf, SQLContext
from pyspark.sql.functions import count, lit, col, udf, expr, collect_list, explode
from pyspark.sql.types import IntegerType, StringType, MapType, ArrayType, BooleanType, FloatType
from pyspark.sql import HiveContext
from datetime import datetime, timedelta
from pyspark.sql.functions import broadcast
def _list_to_map(count_array):
count_map = {}
for item in count_array:
key_value = item.split(':')
count_map[key_value[0]] = key_value[1]
return count_map
def add_count_map(df):
# Convert count_array to count_map
list_to_map_udf = udf(_list_to_map, MapType(
StringType(), StringType(), False))
df = df.withColumn('count_map', list_to_map_udf(df.count_array))
return df
def variance(plist):
l = len(plist)
ex = sum(plist)/l
ex2 = sum([i*i for i in plist])/l
return ex2-ex*ex
query = "select count_array,day,uckey from factdata where day in ('2020-05-15','2020-05-14','2020-05-13','2020-05-12','2020-05-11','2020-05-10','2020-05-09')"
sc = SparkContext()
hive_context = HiveContext(sc)
df = hive_context.sql(query)
df = add_count_map(df)
df = df.select('uckey', 'day', explode(df.count_map)).withColumnRenamed("value", "impr_count")
df = df.withColumn('impr_count', udf(lambda x: int(x), IntegerType())(df.impr_count))
df = df.groupBy('uckey', 'day').sum('impr_count').withColumnRenamed("sum(impr_count)", 'impr_count')
split_uckey_udf = udf(lambda x: x.split(","), ArrayType(StringType()))
df = df.withColumn('col', split_uckey_udf(df.uckey))
df = df.select('uckey', 'impr_count', 'day', df.col[1]).withColumnRenamed("col[1]", 'slot_id')
df_slot = df.select('slot_id', 'impr_count', 'day')
df_slot = df_slot.groupBy('slot_id', 'day').sum('impr_count').withColumnRenamed("sum(impr_count)", "impr_total")
bc_df_slot = broadcast(df_slot)
df_new = df.join(bc_df_slot, on=["slot_id", 'day'], how="inner")
df_new = df_new.withColumn('percent', udf(lambda x, y: (x*100)/y, FloatType())(df_new.impr_count, df_new.impr_total))
df2 = df_new.groupBy("uckey").agg(collect_list('percent').alias('percent'))
df2 = df2.withColumn('var', udf(lambda x: variance(x), FloatType())(df2.percent))
df2.select("uckey", "var").orderBy(["var"], ascending=False).show(300, truncate=False)
df2.cache()
print("% uckeys having varience > 0.01 ", df2.filter((df2.var <= 0.01)).count()*100/df2.count())
| 37.78125
| 158
| 0.706369
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 536
| 0.221671
|
66698e346f68c9e447122b0d937db33190f58a61
| 4,443
|
py
|
Python
|
tests/test_metrohash.py
|
thihara/pyfasthash
|
20a53f9bb7bf15f98e3e549f523b49e1e0f62e15
|
[
"Apache-2.0"
] | 234
|
2015-02-05T13:41:58.000Z
|
2022-03-30T08:55:23.000Z
|
tests/test_metrohash.py
|
thihara/pyfasthash
|
20a53f9bb7bf15f98e3e549f523b49e1e0f62e15
|
[
"Apache-2.0"
] | 50
|
2015-03-19T05:53:34.000Z
|
2022-03-30T16:20:17.000Z
|
tests/test_metrohash.py
|
thihara/pyfasthash
|
20a53f9bb7bf15f98e3e549f523b49e1e0f62e15
|
[
"Apache-2.0"
] | 44
|
2015-04-23T18:51:43.000Z
|
2022-03-30T21:07:57.000Z
|
import pytest
import pyhash
def test_metro_64_1(hash_tester):
hash_tester(hasher_type=pyhash.metro_64_1,
bytes_hash=7555593383206836236,
seed_hash=9613011798576657330,
unicode_hash=5634638029758084150)
@pytest.mark.skipif(not pyhash.build_with_int128, reason="requires int128 support")
def test_metro_128_1(hash_tester):
hash_tester(hasher_type=pyhash.metro_128_1,
bytes_hash=310240039238111093048322555259813357218,
seed_hash=330324289553816260191102680044286377986,
unicode_hash=160639312567243412360084738183177128736)
def test_metro_64_2(hash_tester):
hash_tester(hasher_type=pyhash.metro_64_2,
bytes_hash=13328239478646503906,
seed_hash=16521803336796657060,
unicode_hash=5992985172783395072)
@pytest.mark.skipif(not pyhash.build_with_int128, reason="requires int128 support")
def test_metro_128_2(hash_tester):
hash_tester(hasher_type=pyhash.metro_128_2,
bytes_hash=308979041176504703647272401075625691044,
seed_hash=156408679042779357342816971045969684594,
unicode_hash=169904568621124891123383613748925830588)
def test_metro_Crc64_1(hash_tester):
hash_tester(hasher_type=pyhash.metro_crc_64_1,
bytes_hash=6872506084457499713,
seed_hash=14064239385324957326,
unicode_hash=5634638029758084150)
@pytest.mark.skipif(not pyhash.build_with_int128, reason="requires int128 support")
def test_metro_Crc128_1(hash_tester):
hash_tester(hasher_type=pyhash.metro_crc_128_1,
bytes_hash=44856800307026421677415827141042094245,
seed_hash=199990471895323666720887863107514038076,
unicode_hash=53052528140813423722778028047086277728)
def test_metro_Crc64_2(hash_tester):
hash_tester(hasher_type=pyhash.metro_crc_64_2,
bytes_hash=9168163846307153532,
seed_hash=11235719994915751828,
unicode_hash=15697829093445668111)
@pytest.mark.skipif(not pyhash.build_with_int128, reason="requires int128 support")
def test_metro_Crc128_2(hash_tester):
hash_tester(hasher_type=pyhash.metro_crc_128_2,
bytes_hash=29039398407115405218669555123781288008,
seed_hash=26197404070933777589488526163359489061,
unicode_hash=136212167639765185451107230087801381416)
@pytest.mark.benchmark(group='hash64', disable_gc=True)
def test_metro_hash64_1_perf(benchmark, hash_bencher):
hash_bencher(benchmark, pyhash.metro_64_1, 6897098198286496634)
@pytest.mark.skipif(not pyhash.build_with_int128, reason="requires int128 support")
@pytest.mark.benchmark(group='hash128', disable_gc=True)
def test_metro_hash128_1_perf(benchmark, hash_bencher):
hash_bencher(benchmark, pyhash.metro_128_1,
284089860902754045805586152203438670446)
@pytest.mark.benchmark(group='hash64', disable_gc=True)
def test_metro_hash64_2_perf(benchmark, hash_bencher):
hash_bencher(benchmark, pyhash.metro_64_2, 9928248983045338067)
@pytest.mark.skipif(not pyhash.build_with_int128, reason="requires int128 support")
@pytest.mark.benchmark(group='hash128', disable_gc=True)
def test_metro_hash128_2_perf(benchmark, hash_bencher):
hash_bencher(benchmark, pyhash.metro_128_2,
298961466275459716490100873977629041349)
@pytest.mark.benchmark(group='hash64', disable_gc=True)
def test_metro_hash_crc64_1_perf(benchmark, hash_bencher):
hash_bencher(benchmark, pyhash.metro_crc_64_1, 15625740387403976237)
@pytest.mark.skipif(not pyhash.build_with_int128, reason="requires int128 support")
@pytest.mark.benchmark(group='hash128', disable_gc=True)
def test_metro_hash_crc128_1_perf(benchmark, hash_bencher):
hash_bencher(benchmark, pyhash.metro_crc_128_1,
221795002586229010982769362009963170208)
@pytest.mark.benchmark(group='hash64', disable_gc=True)
def test_metro_hash_crc64_2_perf(benchmark, hash_bencher):
hash_bencher(benchmark, pyhash.metro_crc_64_2, 9313388757605283934)
@pytest.mark.skipif(not pyhash.build_with_int128, reason="requires int128 support")
@pytest.mark.benchmark(group='hash128', disable_gc=True)
def test_metro_hash_crc128_2_perf(benchmark, hash_bencher):
hash_bencher(benchmark, pyhash.metro_crc_128_2,
319940271611864595969873671463832146628)
| 39.669643
| 83
| 0.765699
| 0
| 0
| 0
| 0
| 3,447
| 0.775827
| 0
| 0
| 268
| 0.06032
|
66698ee5453f94b084a237ee9ea9e607d1b0395c
| 9,922
|
py
|
Python
|
main_fed.py
|
berserkersss/FL_CNN_Diff_Acc
|
f78651b426ff700108b62f2afbd99134b30af1e6
|
[
"MIT"
] | null | null | null |
main_fed.py
|
berserkersss/FL_CNN_Diff_Acc
|
f78651b426ff700108b62f2afbd99134b30af1e6
|
[
"MIT"
] | null | null | null |
main_fed.py
|
berserkersss/FL_CNN_Diff_Acc
|
f78651b426ff700108b62f2afbd99134b30af1e6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Python version: 3.6
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import copy
import numpy as np
from torchvision import datasets, transforms
import torch
import math
from utils.sampling import mnist_iid, mnist_noniid, cifar_iid
from utils.options import args_parser
from models.Update import LocalUpdate
from models.Update import CLUpdate
from models.Nets import MLP, CNNMnist, CNNCifar
from models.Fed import FedAvg
from models.test import test_img
if __name__ == '__main__':
# parse args
args = args_parser()
args.device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
# load dataset and split users
if args.dataset == 'mnist':
trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
dataset_train = datasets.MNIST('../data/mnist/', train=True, download=True, transform=trans_mnist)
dataset_test = datasets.MNIST('../data/mnist/', train=False, download=True, transform=trans_mnist)
# sample users
#if args.iid:
dict_users_iid_temp = mnist_iid(dataset_train, args.num_users)
#else:
dict_users = mnist_noniid(dataset_train, args.num_users)
#dict_users_iid_temp = dict_users
elif args.dataset == 'cifar':
trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
dataset_train = datasets.CIFAR10('../data/cifar', train=True, download=True, transform=trans_cifar)
dataset_test = datasets.CIFAR10('../data/cifar', train=False, download=True, transform=trans_cifar)
if args.iid:
dict_users = cifar_iid(dataset_train, args.num_users)
else:
exit('Error: only consider IID setting in CIFAR10')
else:
exit('Error: unrecognized dataset')
img_size = dataset_train[0][0].shape
#print('img_size=',img_size)
# build model
if args.model == 'cnn' and args.dataset == 'cifar':
net_glob = CNNCifar(args=args).to(args.device)
elif args.model == 'cnn' and args.dataset == 'mnist':
net_glob = CNNMnist(args=args).to(args.device)
elif args.model == 'mlp':
len_in = 1
for x in img_size:
len_in *= x
net_glob_fl = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes).to(args.device)
net_glob_cl = MLP(dim_in=len_in, dim_hidden=64, dim_out=args.num_classes).to(args.device)
else:
exit('Error: unrecognized model')
net_glob_fl.train()
net_glob_cl.train()
# copy weights
w_glob_fl = net_glob_fl.state_dict()
w_glob_cl = net_glob_cl.state_dict()
# training
eta = 0.01
Nepoch = 5 # num of epoch
loss_train_fl, loss_train_cl = [], []
cv_loss, cv_acc = [], []
val_loss_pre, counter = 0, 0
net_best = None
best_loss = None
val_acc_list, net_list = [], []
para_g = []
loss_grad = []
delta_batch_loss_list = []
beta_list = []
count_list = np.zeros(256).tolist()
line1_iter_list = []
line2_iter_list = []
wgfed_list = []
wgcl_list = []
w_locals, loss_locals = [], []
w0_locals,loss0_locals =[], []
weight_div_list = []
para_cl = []
para_fl = []
beta_locals, mu_locals, sigma_locals = [],[],[]
x_stat_loacals, pxm_locals =[],[]
data_locals = [[] for i in range(args.epochs)]
w_fl_iter,w_cl_iter = [], []
beta_max_his, mu_max_his, sigma_max_his = [], [], []
acc_train_cl_his, acc_train_fl_his = [], []
net_glob_fl.eval()
acc_train_cl, loss_train_clxx = test_img(net_glob_cl, dataset_train, args)
acc_test_cl, loss_test_clxx = test_img(net_glob_cl, dataset_test, args)
acc_train_cl_his.append(acc_test_cl)
acc_train_fl_his.append(acc_test_cl)
print("Training accuracy: {:.2f}".format(acc_train_cl))
print("Testing accuracy: {:.2f}".format(acc_test_cl))
dict_users_iid = []
for iter in range(args.num_users):
dict_users_iid.extend(dict_users_iid_temp[iter])
# Centralized learning
for iter in range(args.epochs):
w_locals, loss_locals = [], []
glob_cl = CLUpdate(args=args, dataset=dataset_train, idxs=dict_users_iid)
w_cl, loss_cl = glob_cl.cltrain(net=copy.deepcopy(net_glob_cl).to(args.device))
w_cl_iter.append(copy.deepcopy(w_cl))
net_glob_cl.load_state_dict(w_cl)
loss_train_cl.append(loss_cl) # loss of CL
print('cl,iter = ', iter, 'loss=', loss_cl)
# testing
acc_train_cl, loss_train_clxx = test_img(net_glob_cl, dataset_train, args)
acc_test_cl, loss_test_clxx = test_img(net_glob_cl, dataset_test, args)
print("Training accuracy: {:.2f}".format(acc_train_cl))
print("Testing accuracy: {:.2f}".format(acc_test_cl))
acc_train_cl_his.append(acc_test_cl.item())
# FL setting
for iter in range(args.epochs): # num of iterations
w_locals, loss_locals, d_locals = [], [], []
beta_locals, mu_locals, sigma_locals = [], [], []
x_stat_loacals, pxm_locals =[],[]
# M clients local update
m = max(int(args.frac * args.num_users), 1) # num of selected users
idxs_users = np.random.choice(range(args.num_users), m, replace=False) # select randomly m clients
for idx in idxs_users:
local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx]) # data select
w, loss, delta_bloss, beta, x_stat, d_local = local.train(net=copy.deepcopy(net_glob_fl).to(args.device))
x_value, count = np.unique(x_stat,return_counts=True) # compute the P(Xm)
w_locals.append(copy.deepcopy(w))# collect local model
loss_locals.append(copy.deepcopy(loss))#collect local loss fucntion
d_locals.extend(d_local)# collect the isx of local training data in FL
beta_locals.append(np.max(beta))# beta value
mu_locals.append(np.max(delta_bloss)) # mu value
sigma_locals.append(np.std(delta_bloss))#sigma value
x_stat_loacals.append(x_stat) # Xm
pxm_locals.append(np.array(count/(np.sum(count)))) #P(Xm)
data_locals[iter] = d_locals#collect dta
w_glob_fl = FedAvg(w_locals)# update the global model
net_glob_fl.load_state_dict(w_glob_fl)# copy weight to net_glob
w_fl_iter.append(copy.deepcopy(w_glob_fl))
loss_fl = sum(loss_locals) / len(loss_locals)
loss_train_fl.append(loss_fl) # loss of FL
# compute P(Xg)
xg_value, xg_count = np.unique(x_stat_loacals,return_counts=True)
xg_count = np.array(xg_count)/(np.sum(xg_count))
print('fl,iter = ',iter,'loss=',loss_fl)
# compute beta, mu, sigma
beta_max = (np.max(beta_locals))
mu_max = (np.max(mu_locals))
sigma_max = (np.max(sigma_locals))
beta_max_his.append(np.max(beta_locals))
mu_max_his.append(np.max(mu_locals))
sigma_max_his.append(np.max(sigma_locals))
# print('beta=', beta_max)
# print('mu=', mu_max)
# print('sigma=', sigma_max)
# testing
net_glob_fl.eval()
acc_train_fl, loss_train_flxx = test_img(net_glob_fl, dataset_train, args)
acc_test_fl, loss_test_flxx = test_img(net_glob_fl, dataset_test, args)
print("Training accuracy: {:.2f}".format(acc_train_fl))
print("Testing accuracy: {:.2f}".format(acc_test_fl))
line1_list=[]
# the weight divergence of numerical line
for j in range(len(pxm_locals)):
lditem1 = sigma_max*(np.sqrt(2/(np.pi*50*(iter+1)))+np.sqrt(2/(np.pi*50*m*(iter+1))))
lditem2 = mu_max*(np.abs(pxm_locals[j]-xg_count))
lditem3= 50*(iter+1)*(((1+eta*beta_max)**((iter+1)*Nepoch))-1)/(50*m*(iter+1)*beta_max) # 50 is batch size (10)* num of epoch (5)
line1 = lditem3*(lditem1+lditem2)
line1_list.append(line1) # m clients
line1_iter_list.append(np.sum(line1_list)) # iter elements
acc_train_fl_his.append(acc_test_fl.item())
#weight divergence of simulation
for i in range(len(w_cl_iter)):
para_cl = w_cl_iter[i]['layer_input.weight']
para_fl = w_fl_iter[i]['layer_input.weight']
line2 = torch.norm(para_cl-para_fl)
print(torch.norm(para_cl-para_fl)/torch.norm(para_cl))
line2_iter_list.append(line2.item())
print('y_line1=',line1_iter_list)# numerical
print('y_line2=',line2_iter_list) # simulation
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(line2_iter_list, c="red")
plt.xlabel('Iterations')
plt.ylabel('Difference')
plt.savefig('Figure/different.png')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(beta_max_his, c="red")
plt.xlabel('Iterations')
plt.ylabel('Beta_max')
plt.savefig('Figure/beta_max.png')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(sigma_max_his, c="red")
plt.xlabel('Iterations')
plt.ylabel('Sigma_max')
plt.savefig('Figure/sigma_max.png')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(mu_max_his, c="red")
plt.xlabel('Iterations')
plt.ylabel('Mu_max')
plt.savefig('Figure/mu_max.png')
colors = ["blue", "red"]
labels = ["non-iid", "iid"]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(acc_train_fl_his, c=colors[0], label=labels[0])
ax.plot(acc_train_cl_his, c=colors[1], label=labels[1])
ax.legend()
plt.xlabel('Iterations')
plt.ylabel('Accuracy')
plt.savefig('Figure/Accuracy_non_iid2_temp.png')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(line1_iter_list, c=colors[0])
plt.xlabel('Local_Iterations')
plt.ylabel('Grad')
plt.savefig('Figure/numerical _temp.png')
| 36.884758
| 141
| 0.651078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,756
| 0.17698
|
666a08a2699afb54d288c230c2b9f22bf4716df5
| 1,375
|
py
|
Python
|
scaner/controllers/communities.py
|
dearbornlavern/scaner
|
401de0ec7caef5c5a23aedec106db136bd4e4658
|
[
"Apache-2.0"
] | 12
|
2016-09-30T12:43:44.000Z
|
2022-02-17T17:17:02.000Z
|
scaner/controllers/communities.py
|
dearbornlavern/scaner
|
401de0ec7caef5c5a23aedec106db136bd4e4658
|
[
"Apache-2.0"
] | null | null | null |
scaner/controllers/communities.py
|
dearbornlavern/scaner
|
401de0ec7caef5c5a23aedec106db136bd4e4658
|
[
"Apache-2.0"
] | 7
|
2016-09-28T09:48:48.000Z
|
2020-05-15T04:56:11.000Z
|
from flask import current_app
from scaner.utils import add_metadata
import json
# PRUEBA EXTRACION USUARIOS
# @add_metadata()
# def get(userId, fields=None, *args, **kwargs):
# #get_task = current_app.tasks.get_users_from_twitter.delay()
# get_task = current_app.tasks.execute_metrics.delay()
# return {'result': "In progress"}, 200
@add_metadata('communities')
def get(communityId, *args, **kwargs):
get_task = current_app.tasks.get_community.delay(communityId)
return {'communities': get_task.get(timeout = 100)}, 200
@add_metadata('users')
def get_network(communityId, *args, **kwargs):
community_network_task = current_app.tasks.get_community_network.delay(communityId)
return {'users': community_network_task.get(timeout = 100)}, 200
@add_metadata('communities')
def search(*args, **kwargs):
search_task = current_app.tasks.get_communities_list.delay()
return {'communities': search_task.get(timeout = 100)}, 200
@add_metadata()
def get_emotion(communityId, *args, **kwargs):
emotion_task = current_app.tasks.get_community_emotion.delay(communityId)
return {'result': emotion_task.get(timeout = 100)}, 200
@add_metadata()
def get_sentiment(communityId, *args, **kwargs):
sentiment_task = current_app.tasks.get_community_sentiment.delay(communityId)
return {'communities': sentiment_task.get(timeout = 100)}, 200
| 39.285714
| 87
| 0.749818
| 0
| 0
| 0
| 0
| 1,019
| 0.741091
| 0
| 0
| 347
| 0.252364
|
666ce6df66f28481199af4b25376a59418b9191f
| 395
|
py
|
Python
|
cct/cases/create_snapshot.py
|
LmangoLemon/mind
|
1b269acca41f840c5c71cb6c92ec92ecfb977ad4
|
[
"Apache-2.0"
] | null | null | null |
cct/cases/create_snapshot.py
|
LmangoLemon/mind
|
1b269acca41f840c5c71cb6c92ec92ecfb977ad4
|
[
"Apache-2.0"
] | null | null | null |
cct/cases/create_snapshot.py
|
LmangoLemon/mind
|
1b269acca41f840c5c71cb6c92ec92ecfb977ad4
|
[
"Apache-2.0"
] | null | null | null |
import logging
from time import sleep
from cct.case import Case
logger = logging.getLogger(__file__)
class create_snapshot(Case):
def pre_test(self):
logger.info ('doing something before create snapshot')
sleep(3)
def process(self):
logger.info('create snapshot')
sleep(5)
def post_test(self):
logger.info('create snapshot finished')
| 17.173913
| 62
| 0.668354
| 287
| 0.726582
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 0.210127
|
666d3c5b51416d64a4d8d00ca1cc2533f85b4bf8
| 296
|
py
|
Python
|
venv/Lib/site-packages/IPython/terminal/ptshell.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
venv/Lib/site-packages/IPython/terminal/ptshell.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
venv/Lib/site-packages/IPython/terminal/ptshell.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
raise DeprecationWarning("""DEPRECATED:
After Popular request and decision from the BDFL:
`IPython.terminal.ptshell` has been moved back to `IPython.terminal.interactiveshell`
during the beta cycle (after IPython 5.0.beta3) Sorry about that.
This file will be removed in 5.0 rc or final.
""")
| 32.888889
| 85
| 0.777027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 269
| 0.908784
|
6670c507913d776c7f3759690ef2c0ab2aa02880
| 591
|
py
|
Python
|
ex078.py
|
raquelEllem/exerciciosPython
|
489c2360de84c69dbe9da7710660fb064cd605fa
|
[
"MIT"
] | null | null | null |
ex078.py
|
raquelEllem/exerciciosPython
|
489c2360de84c69dbe9da7710660fb064cd605fa
|
[
"MIT"
] | null | null | null |
ex078.py
|
raquelEllem/exerciciosPython
|
489c2360de84c69dbe9da7710660fb064cd605fa
|
[
"MIT"
] | null | null | null |
lista = []
for n in range(0, 5):
lista.append(int(input(f'Digite um valor para a posição {n}: ')))
print('=-=' * 10)
print(f'Você digitou os valores {lista}')
maior = lista[0]
menor = lista[0]
for n in lista:
if maior < n:
maior = n
if menor > n:
menor = n
print(f'O maior valor digitado foi {maior} nas posições ', end='')
for i, v in enumerate(lista):
if v == maior:
print(f'{i}...', end='')
print()
print(f'O menor valor digitado foi {menor} nas posições ', end='')
for i, v in enumerate(lista):
if v == menor:
print(f'{i}...', end='')
| 26.863636
| 69
| 0.575296
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 213
| 0.356187
|
6674228e20201842275a8416c646d65895ba336f
| 6,461
|
py
|
Python
|
chb/x86/opcodes/X86RotateLeftCF.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
chb/x86/opcodes/X86RotateLeftCF.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
chb/x86/opcodes/X86RotateLeftCF.py
|
kestreltechnology/CodeHawk-Binary
|
aa0b2534e0318e5fb3770ec7b4d78feb0feb2394
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2020 Henny Sipma
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from typing import cast, List, Sequence, TYPE_CHECKING
from chb.app.InstrXData import InstrXData
from chb.invariants.XVariable import XVariable
from chb.invariants.XXpr import XXpr
import chb.simulation.SimUtil as SU
import chb.simulation.SimValue as SV
import chb.util.fileutil as UF
from chb.util.IndexedTable import IndexedTableValue
from chb.x86.X86DictionaryRecord import x86registry
from chb.x86.X86Opcode import X86Opcode
from chb.x86.X86Operand import X86Operand
if TYPE_CHECKING:
from chb.x86.X86Dictionary import X86Dictionary
from chb.x86.simulation.X86SimulationState import X86SimulationState
@x86registry.register_tag("rcl", X86Opcode)
class X86RotateLeftCF(X86Opcode):
"""RCL dst, src
args[0]: index of dst in x86dictionary
args[1]: index of src in x86dictionary
"""
def __init__(
self,
x86d: "X86Dictionary",
ixval: IndexedTableValue) -> None:
X86Opcode.__init__(self, x86d, ixval)
@property
def dst_operand(self) -> X86Operand:
return self.x86d.operand(self.args[0])
@property
def src_operand(self) -> X86Operand:
return self.x86d.operand(self.args[1])
@property
def operands(self) -> Sequence[X86Operand]:
return [self.dst_operand, self.src_operand]
def annotation(self, xdata: InstrXData) -> str:
"""data format: a:vxx
vars[0]: dst
xprs[0]: src (number of bits to rotate)
xprs[1]: dst-rhs (value to rotate)
"""
lhs = str(xdata.vars[0])
rhs1 = str(xdata.xprs[0])
rhs2 = str(xdata.xprs[2])
return lhs + ' = ' + rhs2 + ' rotate-left-by' + rhs1 + ' CF'
def lhs(self, xdata: InstrXData) -> List[XVariable]:
return xdata.vars
def rhs(self, xdata: InstrXData) -> List[XXpr]:
return xdata.xprs
# --------------------------------------------------------------------------
# Rotates the bits of the first operand (destination operand) the number of
# bit positions specified in the second operand (count operand) and stores
# the result in the destination operand. The count operand is an unsigned
# integer that can be an immediate or a value in the CL register. In legacy
# and compatibility mode, the processor restricts the count to a number
# between 0 and 31 by masking all the bits in the count operand except
# the 5 least-significant bits.
#
# The otate through carry left (RCL) instruction shifts all the bits
# toward more-significant bit positions, except for the most-significant
# bit, which is rotated to the least-significant bit location.
#
# The RCL instruction includes the CF flag in the rotation. The RCL
# instruction shifts the CF flag into the least-significant bit and shifts
# the most-significant bit into the CF flag.
#
# The OF flag is defined only for the 1-bit rotates; it is undefined in all
# other cases (except that a zero-bit rotate does nothing, that is affects
# no flags). For left rotates, the OF flag is set to the exclusive OR of
# the CF bit (after the rotate) and the most-significant bit of the result.
# CASE size:
# 8: tempcount = (count & 31) % 9
# 16: tempcount = (count & 31) % 17
# 32: tempcount = (count & 31)
# WHILE tempcount != 0 DO:
# tempCF = msb(dest)
# dest = (dest * 2) + CF
# CF = tempCF
# tempcount = tempcount - 1
# IF count == 1:
# OF = msb(dest) xor CF
# ELSE:
# OF is undefined
#
# Flags affected:
# The CF flag contains the value of the bit shifted into it. The OF flag
# is affected only for single-bit rotates; it is undefined for multi-bit
# rotates. The SF, ZF, AF, and PF flags are not affected.
# --------------------------------------------------------------------------
def simulate(self, iaddr: str, simstate: "X86SimulationState") -> None:
srcop = self.src_operand
dstop = self.dst_operand
srcval = simstate.get_rhs(iaddr, srcop)
dstval = simstate.get_rhs(iaddr, dstop)
cflag = simstate.get_flag_value(iaddr, 'CF')
if cflag is None:
simstate.set(iaddr, dstop, SV.mk_undefined_simvalue(dstop.size))
elif (dstval.is_literal
and dstval.is_defined
and srcval.is_literal
and srcval.is_defined):
dstval = cast(SV.SimLiteralValue, dstval)
srcval = cast(SV.SimLiteralValue, srcval)
(cflag, result) = dstval.bitwise_rcl(srcval, cflag)
simstate.set(iaddr, dstop, result)
if srcval.value > 0:
simstate.update_flag(iaddr, 'CF', cflag == 1)
if srcval.value == 1:
oflag = result.msb ^ cflag
simstate.update_flag(iaddr, 'OF', oflag == 1)
else:
simstate.update_flag(iaddr, "OF", None)
| 40.130435
| 80
| 0.629005
| 4,289
| 0.663829
| 0
| 0
| 4,333
| 0.670639
| 0
| 0
| 3,643
| 0.563845
|
6674ff922f4c82dfa03dc7390843f76b68565580
| 283
|
py
|
Python
|
error_handlers/access_token.py
|
Egor2005l/cho
|
c7cb165394089b277be5c306edde0b8fb42e466d
|
[
"MIT"
] | null | null | null |
error_handlers/access_token.py
|
Egor2005l/cho
|
c7cb165394089b277be5c306edde0b8fb42e466d
|
[
"MIT"
] | null | null | null |
error_handlers/access_token.py
|
Egor2005l/cho
|
c7cb165394089b277be5c306edde0b8fb42e466d
|
[
"MIT"
] | null | null | null |
from asyncio import sleep
from vkbottle.exceptions import VKError
from vkbottle.framework.blueprint.user import Blueprint
user = Blueprint(
name='access_token_error_blueprint'
)
@user.error_handler.error_handler(5)
async def rps_handler(e: VKError):
exit(1)
| 20.214286
| 56
| 0.756184
| 0
| 0
| 0
| 0
| 85
| 0.300353
| 47
| 0.166078
| 30
| 0.106007
|
667689203557923536a76893ffda9eef2e58e85a
| 2,135
|
py
|
Python
|
test_challenges.py
|
UPstartDeveloper/Graph-Applications
|
45a3fa83f9e3fff243be35dd169edfcfd020f1a1
|
[
"MIT"
] | null | null | null |
test_challenges.py
|
UPstartDeveloper/Graph-Applications
|
45a3fa83f9e3fff243be35dd169edfcfd020f1a1
|
[
"MIT"
] | null | null | null |
test_challenges.py
|
UPstartDeveloper/Graph-Applications
|
45a3fa83f9e3fff243be35dd169edfcfd020f1a1
|
[
"MIT"
] | null | null | null |
import challenges
import unittest
class RottingOrangesTests(unittest.TestCase):
def test_time_to_rot(self):
"""
Graph BFS problem. Tells the time taken for oranges to all rot.
Test cases from LeetCode.
"""
# Test Cases
oranges1 = [
[2,1,1],
[1,1,0],
[0,1,1]
]
assert challenges.time_to_rot(oranges1) == 4
oranges2 = [
[2,1,1],
[0,1,1],
[1,0,1]
]
assert challenges.time_to_rot(oranges2) == -1
oranges3 = [
[0,2]
]
assert challenges.time_to_rot(oranges3) == 0
class NumIslandsTests(unittest.TestCase):
def test_num_islands(self):
'''Returns the number of distinct land masses from a 2D grid.'''
# Test Cases
map1 = [
[1, 1, 1, 1, 0],
[1, 1, 0, 1, 0],
[1, 1, 0, 0, 0],
[0, 0, 0, 0, 0]
]
assert challenges.num_islands(map1) == 1
map2 = [
[1, 1, 0, 0, 0],
[1, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 1]
]
assert challenges.num_islands(map2) == 3
class ClassSchedulingTests(unittest.TestCase):
def test_course_order(self):
"""Returns the order in which courses must be taken,
in order to meet prerequisites.
"""
courses1 = [ [1,0] ]
assert challenges.course_order(2, courses1) == [0, 1]
courses2 = [ [1,0], [2,0], [3,1], [3,2] ]
possibleSchedules = [ [0, 1, 2, 3], [0, 2, 1, 3] ]
assert challenges.course_order(4, courses2) in possibleSchedules
class WordLadderTests(unittest.TestCase):
def test_word_ladder_length(self):
"""Returns the minimum amount of 1-letter transformations to change
one word to another.
"""
beginWord = "hit"
endWord = "cog"
wordList = ["hot","dot","dog","lot","log","cog"]
assert challenges.word_ladder_length(beginWord, endWord, wordList) == 5
if __name__ == '__main__':
unittest.main()
| 26.6875
| 79
| 0.516628
| 2,042
| 0.95644
| 0
| 0
| 0
| 0
| 0
| 0
| 487
| 0.228103
|
66769c379769d62d8db4f6ca3c7ed84d674f3460
| 1,293
|
py
|
Python
|
2020-08-month-long-challenge/day06.py
|
jkbockstael/leetcode
|
8ef5c907fb153c37dc97f6524493ceca2044ea38
|
[
"Unlicense"
] | null | null | null |
2020-08-month-long-challenge/day06.py
|
jkbockstael/leetcode
|
8ef5c907fb153c37dc97f6524493ceca2044ea38
|
[
"Unlicense"
] | null | null | null |
2020-08-month-long-challenge/day06.py
|
jkbockstael/leetcode
|
8ef5c907fb153c37dc97f6524493ceca2044ea38
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# Day 6: Find All Duplicates in an Array
#
# Given an array of integers, 1 ≤ a[i] ≤ n (n = size of array), some elements
# appear twice and others appear once.
# Find all the elements that appear twice in this array.
# Could you do it without extra space and in O(n) runtime?
class Solution:
def findDuplicates(self, nums: [int]) -> [int]:
# We have an array of length N that contains values from 1 to n, n ≤ N
# We need to keep track of the number we've already seen, for this we
# would need a list of m elements, m < ≤ n ≤ N
# This means we can actually use the input array as it is large enough,
# given that all values are positive we can flip them to negative to
# encode the seen values
duplicates = []
for number in nums:
value = abs(number) # Maybe this position has been used as a marker
seen = abs(number) - 1 # indices start at 0, values at 1
if nums[seen] < 0:
# We already found this number before
duplicates.append(value)
else:
# Mark the array for this number
nums[seen] *= -1
return duplicates
# Test
assert Solution().findDuplicates([4,3,2,7,8,2,3,1]) == [2,3]
| 40.40625
| 79
| 0.608662
| 928
| 0.712203
| 0
| 0
| 0
| 0
| 0
| 0
| 805
| 0.617805
|
66776ed63d7e38eb38a9559cc44798e48137c63c
| 10,519
|
py
|
Python
|
napari/_vispy/experimental/tiled_image_visual.py
|
harripj/napari
|
7a284b1efeb14b1f812f0d98c608f70f0dd66ad2
|
[
"BSD-3-Clause"
] | null | null | null |
napari/_vispy/experimental/tiled_image_visual.py
|
harripj/napari
|
7a284b1efeb14b1f812f0d98c608f70f0dd66ad2
|
[
"BSD-3-Clause"
] | null | null | null |
napari/_vispy/experimental/tiled_image_visual.py
|
harripj/napari
|
7a284b1efeb14b1f812f0d98c608f70f0dd66ad2
|
[
"BSD-3-Clause"
] | null | null | null |
"""TiledImageVisual class
A visual that draws tiles using a texture atlas.
"""
from typing import List, Set
import numpy as np
from ...layers.image.experimental.octree_util import OctreeChunk
from ..vendored import ImageVisual
from ..vendored.image import _build_color_transform
from .texture_atlas import TextureAtlas2D
from .tile_set import TileSet
# Shape of she whole texture in tiles. Hardcode for now.
SHAPE_IN_TILES = (16, 16)
class TiledImageVisual(ImageVisual):
"""An image that is drawn using smaller tiles.
TiledImageVisual draws a single large image using a set of square image
tiles. The size of the tiles is configurable, but 256x256 or 512x512
might be good choices. All the tiles in one TiledImageVisual are the
same size.
The tiles are stored in larger textures as an "atlas". An atlas is
basically just a texture which looks like a grid of smaller images. The
grid has no borders between the tiles. The size of the larger textures
is also configurable. For example a single 4096x4096 texture could
store 256 different 256x256 tiles.
Adding or removing tiles from a TiledImageVisual is efficient. Only the
bytes in the tile(s) being updated are sent to the card. The Vispy
method BaseTexture.set_data() has an "offset" argument. When setting
texture data with an offset under the hood Vispy calls
glTexSubImage2D(). It will only update the rectangular region within
the texture that's being updated.
In addition, uploading new tiles does not cause the shader to be
rebuilt. This is another reason TiledImageVisual is faster than
creating a stand-alone ImageVisuals to draw each tile.
Finally, rendering the tiles is efficient. TiledImageVisual renders by
drawing one single list of quads. The texture coordinates of the quads
point to the various tiles in the texture atlas. If all the tiles are
stored in the same large texture, there will be zero texture swaps,
which are expensive.
Parameters
----------
tile_shape : np.ndarray
The shape of one tile like (256, 256, 3).
"""
def __init__(self, tile_shape: np.ndarray, *args, **kwargs):
self.tile_shape = tile_shape
self._tiles = TileSet() # The tiles we are drawing.
self._clim = np.array([0, 1]) # TOOD_OCTREE: need to support clim
# Initialize our parent ImageVisual.
super().__init__(*args, **kwargs)
# Must create the texture atlas after calling __init__ so
# the attribute self._interpolation exists.
self.unfreeze()
self._texture_atlas = self._create_texture_atlas(tile_shape)
self.freeze()
def _create_texture_atlas(self, tile_shape: np.ndarray) -> TextureAtlas2D:
"""Create texture atlas up front or if we change texture shape.
Attributes
----------
tile_shape : np.ndarray
The shape of our tiles such as (256, 256, 4).
Return
------
TextureAtlas2D
The newly created texture atlas.
"""
interp = 'linear' if self._interpolation == 'bilinear' else 'nearest'
return TextureAtlas2D(tile_shape, SHAPE_IN_TILES, interpolation=interp)
def set_data(self, image) -> None:
"""Set data of the ImageVisual.
VispyImageLayer._on_display_change calls this with an empty image, but
we can just ignore it. When created we are "empty" by virtue of not
drawing any tiles yet.
"""
def set_tile_shape(self, tile_shape: np.ndarray) -> None:
"""Set the shape of our tiles.
All tiles are the same shape in terms of texels. However they might
be drawn different physical sizes. For example drawing a single
view into a quadtree might end up drawing some tiles 2X or 4X
bigger than others. Typically you want to draw the "best available"
data which might be on a different level.
Parameters
----------
tile_shape : np.ndarray
Our tiles shape like (256, 256, 4)
"""
# Clear all our previous tile information and set the new shape.
self._tiles.clear()
self.tile_shape = tile_shape
# Create the new atlas and tell the shader about it.
self._texture_atlas = self._create_texture_atlas(tile_shape)
self._data_lookup_fn['texture'] = self._texture_atlas
@property
def size(self):
# TODO_OCTREE: need to compute the size...
#
# ImageVisual.size() does
# return self._data.shape[:2][::-1]
#
# We don't have a self._data so what do we put here? Maybe need
# a bounds for all the currently visible tiles?
# return self._texture_atlas.texture_shape[:2]
return (1024, 1024)
@property
def num_tiles(self) -> int:
"""Return the number tiles currently being drawn.
Return
------
int
The number of tiles currently being drawn.
"""
return self._texture_atlas.num_slots_used
@property
def octree_chunk(self) -> List[OctreeChunk]:
"""Return data for the chunks we are drawing.
List[OctreeChunk]
The data for the chunks we are drawing.
"""
return self._tiles.chunks
def add_chunks(self, chunks: List[OctreeChunk]):
"""Any any chunks that we are not already drawing.
Parameters
----------
chunks : List[OctreeChunk]
Add any of these we are not already drawing.
"""
for octree_chunk in chunks:
if not self._tiles.contains_octree_chunk(octree_chunk):
self.add_one_tile(octree_chunk)
def add_one_tile(self, octree_chunk: OctreeChunk) -> None:
"""Add one tile to the tiled image.
Parameters
----------
octree_chunk : OctreeChunk
The data for the tile we are adding.
Return
------
int
The tile's index.
"""
atlas_tile = self._texture_atlas.add_tile(octree_chunk)
if atlas_tile is None:
return # No slot available in the atlas.
self._tiles.add(octree_chunk, atlas_tile)
self._need_vertex_update = True
def remove_tile(self, tile_index: int) -> None:
"""Remove one tile from the image.
Parameters
----------
tile_index : int
The tile to remove.
"""
try:
self._tiles.remove(tile_index)
self._texture_atlas.remove_tile(tile_index)
self._need_vertex_update = True
except IndexError:
raise RuntimeError(f"Tile index {tile_index} not found.")
def prune_tiles(self, visible_set: Set[OctreeChunk]) -> None:
"""Remove tiles that are not part of the given visible set.
visible_set : Set[OctreeChunk]
The set of currently visible chunks.
"""
for tile_data in list(self._tiles.tile_data):
if tile_data.octree_chunk.key not in visible_set:
tile_index = tile_data.atlas_tile.index
self.remove_tile(tile_index)
def _build_vertex_data(self) -> None:
"""Build vertex and texture coordinate buffers.
This overrides ImageVisual._build_vertex_data(), it is called from
our _prepare_draw().
This is the heart of tiled rendering. Instead of drawing one quad
with one texture, we draw one quad per tile. And for each quad its
texture coordinates will pull from the right slot in the atlas.
So as the card draws the tiles, where it's sampling from the
texture will hop around in the atlas texture.
"""
if len(self._tiles) == 0:
return # Nothing to draw.
verts = np.zeros((0, 2), dtype=np.float32)
tex_coords = np.zeros((0, 2), dtype=np.float32)
# TODO_OCTREE: We can probably avoid vstack here if clever,
# maybe one one vertex buffer sized according to the max
# number of tiles we expect. But grow if needed.
for tile_data in self._tiles.tile_data:
tile = tile_data.atlas_tile
verts = np.vstack((verts, tile.verts))
tex_coords = np.vstack((tex_coords, tile.tex_coords))
# Set the base ImageVisual _subdiv_ buffers
self._subdiv_position.set_data(verts)
self._subdiv_texcoord.set_data(tex_coords)
self._need_vertex_update = False
def _build_texture(self) -> None:
"""Override of ImageVisual._build_texture().
TODO_OCTREE: This needs work. Need to do the clim stuff in in the
base ImageVisual._build_texture but do it for each tile?
"""
self._clim = np.array([0, 1])
self._texture_limits = np.array([0, 1]) # hardcode
self._need_colortransform_update = True
self._need_texture_upload = False
def _prepare_draw(self, view) -> None:
"""Override of ImageVisual._prepare_draw()
TODO_OCTREE: See how much this changes from base class, if we can
avoid too much duplication. Or factor out some common methods.
"""
if self._need_interpolation_update:
# Call the base ImageVisual._build_interpolation()
self._build_interpolation()
# But override to use our texture atlas.
self._data_lookup_fn['texture'] = self._texture_atlas
# We call our own _build_texture
if self._need_texture_upload:
self._build_texture()
# TODO_OCTREE: how does colortransform change for tiled?
if self._need_colortransform_update:
prg = view.view_program
grayscale = len(self.tile_shape) == 2 or self.tile_shape[2] == 1
self.shared_program.frag[
'color_transform'
] = _build_color_transform(
grayscale, self.clim_normalized, self.gamma, self.cmap
)
self._need_colortransform_update = False
prg['texture2D_LUT'] = (
self.cmap.texture_lut()
if (hasattr(self.cmap, 'texture_lut'))
else None
)
# We call our own _build_vertex_data()
if self._need_vertex_update:
self._build_vertex_data()
# Call the normal ImageVisual._update_method() unchanged.
if view._need_method_update:
self._update_method(view)
| 35.537162
| 79
| 0.64027
| 10,078
| 0.958076
| 0
| 0
| 889
| 0.084514
| 0
| 0
| 6,001
| 0.570491
|
6683c0d1956dae22490efd4a21cbb16c9e118a7c
| 339
|
py
|
Python
|
tf_prac.py
|
akapoorx00/machinelearning-stuff
|
53184019b77d3387fd15b13d3bfa75529b8ed003
|
[
"Apache-2.0"
] | null | null | null |
tf_prac.py
|
akapoorx00/machinelearning-stuff
|
53184019b77d3387fd15b13d3bfa75529b8ed003
|
[
"Apache-2.0"
] | null | null | null |
tf_prac.py
|
akapoorx00/machinelearning-stuff
|
53184019b77d3387fd15b13d3bfa75529b8ed003
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
x = tf.constant(35, name='x')
print(x)
y = tf.Variable(x+5, name='y')
with tf.Session() as session:
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("output", session.graph)
model = tf.global_variables_initializer()
session.run(model)
print (session.run(y))
writer.close()
| 21.1875
| 59
| 0.672566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.041298
|