max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
mysolution/day23_crab_cups/solve.py
|
abhabongse/aoc2020
| 0
|
12784351
|
from __future__ import annotations
import itertools
from collections.abc import Iterator, Sequence
import more_itertools
from tqdm import trange
Arrows = dict[int, int]
def main():
# arrangement = [3, 8, 9, 1, 2, 5, 4, 6, 7]
arrangement = [4, 5, 9, 6, 7, 2, 8, 1, 3]
# Part 1
arrows = build_circular_arrows(arrangement)
crab_modify_arrows(arrows, arrangement[0], plucks=3, repeat=100)
final_arrangement = list(nodes_in_circle(arrows, start=1))
p1_answer = ''.join(str(n) for n in final_arrangement[1:])
print(p1_answer)
# Part 2
arrangement = arrangement + list(range(10, 1_000_001))
arrows = build_circular_arrows(arrangement)
crab_modify_arrows(arrows, arrangement[0], plucks=3, repeat=10_000_000)
fst, snd, trd = more_itertools.take(3, nodes_in_circle(arrows, start=1))
p2_answer = snd * trd
print(p2_answer)
def build_circular_arrows(arrangement: Sequence[int]) -> Arrows:
"""
Builds a circular graph as a dictionary mapping from one node label to the next.
"""
looped_arrangement = itertools.chain(arrangement, arrangement[:1])
arrows = {u: v for u, v in more_itertools.windowed(looped_arrangement, n=2)}
return arrows
def nodes_in_circle(arrows: Arrows, start: int) -> Iterator[int]:
"""
Obtains a sequence of node labels around the arrows graph
starting from the given `start` label until it reaches back to start.
"""
current = start
while True:
yield current
current = arrows[current]
if current == start:
break
def crab_modify_arrows(arrows: Arrows, current: int, plucks: int, repeat: int = 1) -> int:
"""
Modifies the arrows graph in-place according to crab's challenge
starting at the given current node label.
It returns the next *current* node label to resume the next step.
"""
for _ in trange(repeat):
plucked = more_itertools.take(plucks, nodes_in_circle(arrows, arrows[current]))
candidates = count_in_modulus(current - 1, -1, modulo=len(arrows))
dest = more_itertools.first_true(candidates, pred=lambda v: v not in plucked)
rear = plucked[-1]
arrows[current], arrows[rear], arrows[dest] = arrows[rear], arrows[dest], arrows[current]
current = arrows[current]
return current
def count_in_modulus(start: int, step: int = 1, *, modulo: int) -> Iterator[int]:
"""
Produces an arithmetic sequence of numbers under the given modulus
with 1-indexing (so remainder 0 would actually yield the modulus itself).
"""
for value in itertools.count(start, step):
yield value % modulo or modulo
if __name__ == '__main__':
main()
| 3.34375
| 3
|
Python/openjudge/12560.py
|
bic-potato/codeforces_learning
| 0
|
12784352
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 25 15:05:59 2020
@author: zuoxichen
"""
def game_rule(i,j,arrayin,state_transfer):
if state_transfer.count(1)<2 or state_transfer.count(1)>3:
a=0
elif arrayin[i][j]==0 and state_transfer.count(1)==3:
a=1
else:
a=arrayin[i][j]
return a
mn=list(int(i) for i in input().split())
n=mn[0]# n lines
m=mn[1]
i=0
arrayin=[]
while i<n:
arrayin.append(list(int(i) for i in input().split()))
i+=1
arrayout=[]
for i in range(n):
arrayout.append(list(i for i in range(m)))
for i in range(1,n-1):
for j in range(1,m-1):
state_transfer=[arrayin[i][j-1],arrayin[i][j+1],arrayin[i-1][j],arrayin[i+1][j],arrayin[i-1][j-1],arrayin[i-1][j+1],arrayin[i+1][j-1],arrayin[i+1][j+1]]
arrayout[i][j]=game_rule(i,j,arrayin, state_transfer)
for j in range(1,m-1):
state_transfer_up=[arrayin[0][j-1],arrayin[1][j-1],arrayin[1][j+1],arrayin[0][j+1],arrayin[1][j]]
state_transfer_down=[arrayin[n-1][j-1],arrayin[n-2][j-1],arrayin[n-2][j+1],arrayin[n-1][j+1],arrayin[n-2][j]]
arrayout[0][j]=game_rule(0,j,arrayin, state_transfer_up)
arrayout[n-1][j]=game_rule(n-1, j, arrayin, state_transfer_down)
for i in range(1,n-1):
state_transfer_up=[arrayin[i-1][0],arrayin[i+1][0],arrayin[i][1],arrayin[i-1][1],arrayin[i+1][1]]
state_transfer_down=[arrayin[i-1][m-1],arrayin[i+1][m-1],arrayin[i][m-2],arrayin[i-1][m-2],arrayin[i+1][m-2]]
arrayout[i][0]=game_rule(i,0,arrayin, state_transfer_up)
arrayout[i][m-1]=game_rule(i, m-1, arrayin, state_transfer_down)
arrayout[0][0]=game_rule(0,0,arrayin,[arrayin[0][1],arrayin[1][1],arrayin[1][0]])
arrayout[0][m-1]=game_rule(0,m-1,arrayin,[arrayin[0][m-2],arrayin[1][m-1],arrayin[1][m-2]])
arrayout[n-1][0]=game_rule(n-1,0,arrayin,[arrayin[n-1][1],arrayin[n-2][1],arrayin[n-2][0]])
arrayout[n-1][m-1]=game_rule(n-1,m-1,arrayin,[arrayin[n-2][m-1],arrayin[n-1][m-2],arrayin[n-2][m-2]])
for i in range(n):
for j in range(m):
if j==m-1:
print(arrayout[i][j])
else:
print(arrayout[i][j],end=' ')
| 3.15625
| 3
|
markov_generator.py
|
bmd/markov-at-the-movies
| 0
|
12784353
|
<reponame>bmd/markov-at-the-movies<gh_stars>0
import random
from collections import defaultdict
class MarkovGenerator(object):
def __init__(self, corpus, tuple_size=3):
""" Initialize the MarkovGenerator object.
Digests the corpus of text provided as a list of tokens and creates a cache of
predicted next-word values
Code for markov generator based on: http://agiliq.com/blog/2009/06/
generating-pseudo-random-text-with-markov-chains-u/
:param corpus: (list) a source text of word tokens to generate random text from
:param tuple_size: (int: default 3) the size of the tuple to use to generate
text. A larger tuple will increase memory usage, but produce
more realistic results
"""
self.corpus = corpus
self.corpus_size = len(corpus)
self.tuple_size = tuple_size
self.cache = defaultdict(list)
self._initialize_cache()
def _generate_ngrams(self):
""" Generate ngrams from the corpus
For each token in the corpus, generate a list of likely next words for the
Markov text generator to return.
:yield: (tuple) a tuple of length n
"""
n = self.tuple_size
if len(self.corpus) < n:
return
for i in range(len(self.corpus) - (n - 1)):
yield tuple([self.corpus[i + x] for x in range(n)])
def _initialize_cache(self):
""" Initialize the cache
Set up the cache object to generate predicted strings.
"""
for word_tuple in self._generate_ngrams():
self.cache[word_tuple[0:-1]].append(word_tuple[-1])
def generate_markov_text(self, size, override_seed=None):
""" Generate a pseudo-random block of text
:param size: (int) Length of text to generate. Should be << than the
size of the total corpus for good results
:param override_seed: (str: default None) Word to seed the generator
with if set
:return: (str) a string of randomly-generated text
"""
if not override_seed:
seed = random.randint(0, self.corpus_size - self.tuple_size)
else:
indices = [i for i, x in enumerate(self.corpus) if x == override_seed]
try:
seed = random.choice(indices)
except IndexError:
seed = random.randint(0, self.corpus_size - self.tuple_size)
seed_words = self.corpus[seed: seed + self.tuple_size]
gen_words = []
for i in xrange(size):
gen_words.append(seed_words[0])
seed_words.pop(0)
try:
seed_words.append(random.choice(self.cache[tuple(seed_words)]))
# catch cases where there isn't a word to pick
except IndexError:
seed_words.append(random.choice(self.corpus))
gen_words.append(seed_words[0])
return ' '.join(gen_words)
| 3.4375
| 3
|
model/seedsortnet.py
|
Huanyu2019/Seedsortnet
| 4
|
12784354
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 12 15:44:11 2021
@author: lihuanyu
"""
import torch.nn as nn
import torch
import math
import torchsummary as summary
from torchstat import stat
import torch.nn.functional as F
from blurpool import BlurPool
__all__ = ["seedsortnet","seedsortnet75"]
class SubSpace_SFSAM(nn.Module):
def __init__(self, nin):
super(SubSpace_SFSAM, self).__init__()
self.conv_7x7 = nn.Conv2d(2, 1, kernel_size=7, stride=1, padding=3, groups=1)
self.bn_point = nn.BatchNorm2d(1, momentum=0.9)
self.relu_point = nn.ReLU(inplace=False)
self.softmax = nn.Softmax(dim=2)
def forward(self, x):
out_mean = torch.mean(x, dim=1, keepdim=True)
out_max, _ = torch.max(x, dim=1, keepdim=True)
out = [out_max, out_mean]
out = torch.cat(out,dim=1)
out = self.conv_7x7(out)
out = self.bn_point(out)
out = self.relu_point(out)
m, n, p, q = out.shape
out = self.softmax(out.view(m, n, -1))
out = out.view(m, n, p, q)
out = out.expand(x.shape[0], x.shape[1], x.shape[2], x.shape[3])
out = torch.mul(out, x)
out = out + x
return out
class SFSAM(nn.Module):
def __init__(self, nin, nout, h, w, num_splits):
super(SFSAM, self).__init__()
assert nin % num_splits == 0
self.nin = nin
self.nout = nout
self.h = h
self.w = w
self.num_splits = num_splits
self.subspaces = nn.ModuleList(
[SubSpace_SFSAM(int(self.nin / self.num_splits)) for i in range(self.num_splits)]
)
def forward(self, x):
group_size = int(self.nin / self.num_splits)
sub_feat = torch.chunk(x, self.num_splits, dim=1)
out = []
for idx, l in enumerate(self.subspaces):
out.append(self.subspaces[idx](sub_feat[idx]))
out = torch.cat(out, dim=1)
return out
class BasicConv2d(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(BasicConv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(out_channels)
self.relu6 = nn.ReLU6(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return self.relu6(x)
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
class subsampled(nn.Module):
def __init__(self,in_channels,out_channels,filter_size=2,**kwargs):
super(subsampled, self).__init__()
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=1)
self.blurpool=BlurPool(in_channels, filt_size=filter_size, stride=2)
def forward(self, x):
x = self.maxpool(x)
x = self.blurpool(x)
return x
class Root_module(nn.Module):
def __init__(self, in_channels,ch3x3_conv,ch1x1_first,ch3x3,pool_proj):
super(Root_module, self).__init__()
self.conv1 = BasicConv2d(in_channels, ch3x3_conv, kernel_size=3, stride=1, padding=1)
self.branch1 = nn.Sequential(
BasicConv2d(ch3x3_conv, ch1x1_first, kernel_size=3,padding=1,stride=2),
BasicConv2d(ch1x1_first, ch3x3, kernel_size=1)
)
self.branch2 = nn.Sequential(
subsampled(16,16)
)
def forward(self, x):
x = self.conv1(x)
branch1 = self.branch1(x)
branch2 = self.branch2(x)
outputs = [branch1, branch2]
return torch.cat(outputs, 1)
class shield_block(nn.Module):
def __init__(self, inp, oup, expand_ratio,expand_channel):
self.identity_map = False
super(shield_block, self).__init__()
hidden_dim = inp // expand_ratio
if hidden_dim < oup / 6.:
hidden_dim = math.ceil(oup / 6.)
hidden_dim = _make_divisible(hidden_dim, 16)
oup1 = math.ceil((oup/6.) * expand_channel)
oup2 = oup - oup1
if inp != oup:
self.conv = nn.Sequential(
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True),
)
if inp == oup:
self.identity_map = True
self.conv = nn.Sequential(
nn.Conv2d(inp, inp, 3, 1, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU6(inplace=True),
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.Conv2d(hidden_dim, oup1, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup1),
nn.ReLU6(inplace=True),
nn.Conv2d(oup1, oup1, 3, 1, 1, groups=oup1, bias=False),
nn.BatchNorm2d(oup1),
)
self.branch1 = nn.Sequential(
nn.Conv2d(inp, inp, 3, 1, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU6(inplace=True),
nn.Conv2d(inp, oup2, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup2),
nn.ReLU6(inplace=True),
)
def forward(self, x):
out = self.conv(x)
if self.identity_map == True:
identity = x
branch1 = self.branch1(x)
out = [out, branch1]
out = torch.cat(out, 1)
out += identity
return out
class Seedsortnet(nn.Module):
def __init__(self, num_classes=2, width=1,groups=4,expand_channel=4,init_weights=True):
super(Seedsortnet, self).__init__()
self.root_module = Root_module(3,16,32,16,16) # [-1, 32, 112, 112]
out1 = int(64*width)
out2 = int(128 *width)
out3 = int(192*width)
out4 = int(256 *width)
self.stage1_up = shield_block(32,out1,2,1)
self.stage1_1 = shield_block(out1,out1,6,expand_channel)
self.sfsam1 = SFSAM(out1,out1,112,112, groups)
self.translayer1 = subsampled(out1,out1)
self.stage2_up = shield_block(out1,out2,2,1)
self.stage2_1 = shield_block(out2,out2,6,expand_channel)
self.stage2_2 = shield_block(out2,out2,6,expand_channel)
self.stage2_3 = shield_block(out2,out2,6,expand_channel)
self.sfsam2 = SFSAM(out2,out2,56,56, groups)
self.translayer2 = subsampled(out2,out2)
self.stage3_up = shield_block(out2,out3,2,1)
self.stage3_1 = shield_block(out3,out3,6,expand_channel)
self.stage3_2 = shield_block(out3,out3,6,expand_channel)
self.stage3_3 = shield_block(out3,out3,6,expand_channel)
self.stage3_4 = shield_block(out3,out3,6,expand_channel)
self.sfsam3 = SFSAM(out3,out3,28,28,groups)
self.translayer3 = subsampled(out3,out3)
self.stage4_up = shield_block(out3,out4,2,1)
self.stage4_1 = shield_block(out4,out4,6,expand_channel)
self.stage4_2 = shield_block(out4,out4,6,expand_channel)
self.stage4_3 = shield_block(out4,out4,6,expand_channel)
self.sfsam4 = SFSAM(out4,out4,14,14,groups)
self.translayer4 = subsampled(out4,out4)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(out4, num_classes))
if init_weights==True:
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
X = stats.truncnorm(-2, 2, scale=0.01)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.root_module(x)
x = self.stage1_up(x)
x = self.stage1_1(x)
x = self.sfsam1(x)
x = self.translayer1(x)
x = self.stage2_up(x)
x = self.stage2_1(x)
x = self.stage2_2(x)
x = self.stage2_3(x)
x = self.sfsam2(x)
x = self.translayer2(x)
x = self.stage3_up(x)
x = self.stage3_1(x)
x = self.stage3_2(x)
x = self.stage3_3(x)
x = self.stage3_4(x)
x = self.sfsam3(x)
x = self.translayer3(x)
x = self.stage4_up(x)
x = self.stage4_1(x)
x = self.stage4_2(x)
x = self.stage4_3(x)
x = self.sfsam4(x)
x = self.translayer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def seedsortnet(**kwargs):
"""
Constructs a Seedsortnet model
"""
return Seedsortnet(**kwargs)
def seedsortnet75(**kwargs):
"""
Constructs a Seedsortnet model
"""
return Seedsortnet(width=0.75,**kwargs)
if __name__=='__main__':
model = seedsortnet(groups=4)
model.eval()
print(model)
stat(model,(3, 224, 224))
| 2.4375
| 2
|
tracker/migrations/0002_processedblock.py
|
shapeshift-legacy/watchtower
| 0
|
12784355
|
<filename>tracker/migrations/0002_processedblock.py
# Generated by Django 2.0.7 on 2018-08-11 00:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tracker', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProcessedBlock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('block_height', models.IntegerField()),
('block_hash', models.CharField(max_length=500)),
('block_time', models.DateTimeField()),
('processed_at', models.DateTimeField(auto_now_add=True)),
('previous_hash', models.CharField(max_length=500)),
('previous_block', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='tracker.ProcessedBlock')),
],
),
]
| 1.632813
| 2
|
blog/models.py
|
mohammadanarul/Django-Blog-YT
| 0
|
12784356
|
<filename>blog/models.py
from email.mime import text
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import gettext_lazy as _
from django.db.models.signals import post_save
class Profile(models.Model):
GENDER_CHOICES = (
('Male', 'Male'),
('Female', 'Female'),
('Other', 'Other'),
)
user = models.OneToOneField(User, verbose_name=_("user"), on_delete=models.CASCADE)
# avatar = models.ImageField(_("avatar"), upload_to='profile')
# banner = models.ImageField(_('banner'), upload_to='banner')
bio = models.TextField(_("bio"))
gender = models.CharField(_("gender"), max_length=20, choices=GENDER_CHOICES)
address = models.CharField(_("address"), max_length=150)
class Meta:
ordering = ['-pk']
def __str__(self):
return self.user.username
class Post(models.Model):
STATUS_CHOICES = (
('Draft', 'Draft'),
('Publish', 'Publish'),
)
user = models.ForeignKey(Profile, verbose_name=_("user"), on_delete=models.CASCADE)
title = models.CharField(_("title"), max_length=255)
slug = models.SlugField(_("slug"), unique=True, blank=True)
# thumnail = models.ImageField(_("thumnail"), upload_to='post')
description = models.TextField(_("description"))
status = models.CharField(_("status"), max_length=20, choices=STATUS_CHOICES, default='Draft')
view_count = models.IntegerField(_("view count"), default=0)
created_at = models.DateTimeField(_("created at"), auto_now_add=True)
updated_at = models.DateTimeField(_("updated at"), auto_now=True)
class Meta:
ordering = ['-pk']
@property
def comment_counter(self):
return self.comments.all().count()
def __str__(self):
return self.title
class Comment(models.Model):
user = models.ForeignKey(Profile, verbose_name=_("user"), on_delete=models.CASCADE)
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name='comments')
comment_text = models.TextField(_("comment text"))
created_at = models.DateTimeField(_("created at"), auto_now=False, auto_now_add=True)
updated_at = models.DateTimeField(_("updated at"), auto_now=True, auto_now_add=False)
class Meta:
ordering = ['-pk']
class Contact(models.Model):
first_name = models.CharField(_("first name"), max_length=30)
last_name = models.CharField(_("last name"), max_length=30)
subject = models.CharField(_("subject"), max_length=50)
email = models.EmailField(_("email"), max_length=254)
detail = models.TextField(_("detail"))
def __str__(self):
return f'{self.first_name}-{self.last_name}'
def create_profile_signal(sender, instance, created, *args, **kwargs):
if created:
Profile.objects.create(user=instance)
post_save.connect(create_profile_signal, sender=User)
| 2.234375
| 2
|
spec/data/logic/_expr_transformer_spec.py
|
PhilHarnish/forge
| 2
|
12784357
|
import ast
import Numberjack
from data.logic import _dimension_factory, _expr_transformer, _model, \
_predicates, _reference, dsl
from spec.mamba import *
with description('_expr_transformer.ExprTransformer'):
with it('instantiates'):
expect(calling(_expr_transformer.ExprTransformer, None)).not_to(raise_error)
with description('compile'):
with before.each:
self.factory = _dimension_factory._DimensionFactory()
self.model = _model._Model(self.factory)
self.transformer = _expr_transformer.ExprTransformer(self.model)
self.andy, self.bob = self.name = self.factory(name=['andy', 'bob'])
self.cherries, self.dates = self.fruit = self.factory(
fruit=['cherries', 'dates'])
self._10, self._11 = self.age = self.factory(age=[10, 11])
with it('resolves names'):
node = ast.Name(id='name["andy"].fruit["cherries"]', ctx=ast.Load())
transformed = self.transformer.visit(node)
expect(transformed).to(be_a(_reference.Reference))
expect(transformed._constraints).to(equal({
'name': 'andy',
'fruit': 'cherries'
}))
with it('resolves numbers'):
node = ast.Num(n=10)
transformed = self.transformer.visit(node)
expect(transformed).to(be_a(_reference.Reference))
expect(transformed._constraints).to(equal({'age': 10}))
with it('resolves strings'):
node = ast.Str(s='cherries')
transformed = self.transformer.visit(node)
expect(transformed).to(be_a(_reference.Reference))
expect(transformed._constraints).to(equal({'fruit': 'cherries'}))
with it('fails to visit unsupported nodes'):
expect(calling(self.transformer.compile, ast.Await())).to(
raise_error(NotImplementedError))
expect(calling(self.transformer.visit, ast.Await())).to(
raise_error(NotImplementedError))
expect(calling(self.transformer.generic_visit, ast.Await())).to(
raise_error(NotImplementedError))
with it('supports precise (2d) assignment'):
expr = self.name['andy'].fruit == self.fruit['cherries']
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal(
'(name["andy"].fruit["cherries"] == True)'))
with it('supports OR operation'):
expr = (self.name['andy'].fruit['cherries'] |
self.fruit['cherries'].name['bob'])
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal(
'(name["andy"].fruit["cherries"] or name["bob"].fruit["cherries"])'))
with it('supports XOR operation'):
expr = (self.name['andy'].fruit['cherries'] ^
self.fruit['cherries'].name['bob'])
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal(
'((name["andy"].fruit["cherries"] +'
' name["bob"].fruit["cherries"]) == 1)'))
with it('supports + operation, int on right'):
expr = self.name['andy'].age + 2
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal('(name["andy"].age in {10,11} + 2)'))
with it('supports + operation, int on left'):
expr = 2 + self.name['andy'].age
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal('(2 + name["andy"].age in {10,11})'))
with it('supports - operation, int on right'):
expr = self.name['andy'].age - 2
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal('(name["andy"].age in {10,11} - 2)'))
with it('supports - operation, int on left'):
expr = 2 - self.name['andy'].age
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal('(2 - name["andy"].age in {10,11})'))
with it('supports * operation, int on right'):
expr = self.name['andy'].age[10] * 10
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal('((name["andy"].age == 10) * 10)'))
with it('supports * operation, int on left'):
expr = 10 * self.name['andy'].age[10]
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
# For some reason(?) the operations are switched here.
expect(str(compiled)).to(equal('((name["andy"].age == 10) * 10)'))
with it('supports & operation'):
expr = self.andy[10] & self.bob[11]
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal(
'((name["andy"].age == 10) & (name["bob"].age == 11))'))
with it('supports ~ operation'):
expr = ~self.andy[10]
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal(
'((1 - (name["andy"].age == 10)) == True)'))
with it('supports call expressions'):
expr = dsl.abs(self.andy.age - self.bob.age)
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
# For some reason(?) the operations are switched here.
s = str(compiled).replace(' in {0,1}', '')
expect(s).to(equal(
'Abs((name["andy"].age in {10,11} -'
' name["bob"].age in {10,11}))'
))
with it('supports naked _DimensionSlice expressions'):
expr = self.name['andy'].age[10]
compiled = self.transformer.compile(expr)
expect(compiled).to(be_a(_predicates.Predicates))
expect(str(compiled)).to(equal('((name["andy"].age == 10) == True)'))
with it('supports Call with builtin functions'):
expr = ast.parse('max(1, 3)').body[0]
compiled = self.transformer.compile(expr)
expect(str(compiled)).to(equal('3'))
with it('supports Call with function pointers'):
fn = mock.Mock(return_value=3)
expr = ast.Call(
func=fn,
args=[],
keywords=[],
)
compiled = self.transformer.compile(expr)
expect(fn).to(have_been_called)
expect(str(compiled)).to(equal('3'))
with description('regression tests'):
with before.each:
self.model = Numberjack.Model()
with it('resolves values before Numberjack uses them'):
a = dsl.variable('a')
b = dsl.variable('b')
c = a * b
expr = c == 1462
compiled = self.transformer.compile(expr)
expect(str(compiled)).to(equal('((a * b) == 1462)'))
expect(calling(self.model.add, compiled)).not_to(raise_error)
| 2.4375
| 2
|
games/views.py
|
sarahboyce/play-connect-four
| 0
|
12784358
|
<filename>games/views.py
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.views import generic
from .forms import GameForm
from .models import Game
class GameListView(LoginRequiredMixin, generic.ListView):
model = Game
def get_queryset(self):
return Game.objects.filter(
Q(player_1=self.request.user) | Q(player_2=self.request.user)
).order_by("-status", "-created_date")
class GameCreateView(LoginRequiredMixin, generic.CreateView):
model = Game
form_class = GameForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
class GamePlayerMixin(UserPassesTestMixin):
def test_func(self):
game = get_object_or_404(Game, pk=self.kwargs["pk"])
return self.request.user in (game.player_1, game.player_2)
class GameDetailView(LoginRequiredMixin, GamePlayerMixin, generic.DetailView):
model = Game
class GameCoinRedirectView(LoginRequiredMixin, GamePlayerMixin, generic.RedirectView):
permanent = False
pattern_name = "game_detail"
def get_redirect_url(self, *args, **kwargs):
game = get_object_or_404(Game, pk=kwargs["pk"])
column = kwargs.pop("column")
game.create_coin(user=self.request.user, column=column)
return super().get_redirect_url(*args, **kwargs)
class GameCheckRedirectView(LoginRequiredMixin, GamePlayerMixin, generic.View):
def get(self, request, *args, **kwargs):
game = get_object_or_404(Game, pk=kwargs["pk"])
return JsonResponse(
{
"is_users_turn": game.is_users_turn(request.user.id),
"is_game_over": not game.is_pending,
}
)
| 2.25
| 2
|
crowdsourcing/interfaces/simulator.py
|
sbranson/online_crowdsourcing
| 4
|
12784359
|
import numpy as np
import os
import sys
import copy
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
sys.path.append(os.path.join(os.path.dirname(__file__),"../"))
from crowdsourcing.annotation_types.classification import *
def combine_dicts(ds):
v = {}
for d in ds:
for k in d:
v[k] = d[k]
return v
# Defining a useful suite of lesion study experiments and standardized plot styles across different annotation types
#-------------------------------------------------------
DEFAULT_PLOT_PARAMS = {'line-width':3, 'bar-color':'g', 'bar-width':0.8, 'axis-font-size':20, 'title-font-size':30, 'tick-font-size':16, 'legend-font-size':14}
PROB_WORKER_IMAGE_CV_ONLINE = {'name':'prob-worker-cv-online', 'line_style':'-', 'color':'r', 'use_computer_vision':True, 'online':True, 'simple_crowdsourcing':False, 'learn_worker_params':True, 'learn_image_params':True, 'naive_computer_vision':False, 'batch_size':1000, 'sort_method':'num_annos'}
PROB_WORKER_IMAGE_CV_NAIVE_ONLINE = {'name':'prob-worker-cv-naive-online', 'line_style':'-', 'color':'c', 'use_computer_vision':True, 'online':True, 'simple_crowdsourcing':False, 'learn_worker_params':True, 'learn_image_params':True, 'naive_computer_vision':True, 'batch_size':1000, 'sort_method':'num_annos'}
PROB_WORKER_IMAGE_ONLINE = {'name':'prob-worker-online', 'line_style':'-', 'color':'g', 'use_computer_vision':False, 'online':True, 'simple_crowdsourcing':False, 'learn_worker_params':True, 'learn_image_params':True, 'batch_size':1000, 'sort_method':'num_annos'}
PROB_ONLINE = {'name':'prob-online', 'line_style':'-', 'color':'b', 'use_computer_vision':False, 'online':True, 'simple_crowdsourcing':False, 'learn_worker_params':False, 'learn_image_params':False, 'batch_size':1000, 'sort_method':'num_annos'}
PROB_WORKER_IMAGE_CV_ONLINE_0005 = combine_dicts([PROB_WORKER_IMAGE_CV_ONLINE, {'name':'prob-worker-cv-online-.005', 'min_risk':0.005, 'color':'#FF0000'}])
PROB_WORKER_IMAGE_CV_ONLINE_001 = combine_dicts([PROB_WORKER_IMAGE_CV_ONLINE, {'name':'prob-worker-cv-online-.01', 'min_risk':0.01, 'color':'#BB0000'}])
PROB_WORKER_IMAGE_CV_ONLINE_002 = combine_dicts([PROB_WORKER_IMAGE_CV_ONLINE, {'name':'prob-worker-cv-online-.02', 'min_risk':0.02, 'color':'#770000'}])
PROB_WORKER_IMAGE_CV = {'name':'prob-worker-cv', 'line_style':'-.s', 'color':'r', 'use_computer_vision':True, 'online':False, 'simple_crowdsourcing':False, 'learn_worker_params':True, 'learn_image_params':True, 'naive_computer_vision':False, 'batch_size':1000, 'sort_method':'num_annos'}
PROB_WORKER_IMAGE = {'name':'prob-worker', 'line_style':'-.o', 'color':'g', 'use_computer_vision':False, 'online':False, 'simple_crowdsourcing':False, 'learn_worker_params':True, 'learn_image_params':True, 'batch_size':1000, 'sort_method':'num_annos'}
PROB_WORKER = {'name':'prob-worker-noim', 'line_style':'-.o', 'color':'k', 'use_computer_vision':False, 'online':False, 'simple_crowdsourcing':False, 'learn_worker_params':True, 'learn_image_params':False, 'batch_size':1000, 'sort_method':'num_annos'}
PROB = {'name':'prob', 'line_style':'-.*', 'color':'b', 'use_computer_vision':False, 'online':False, 'simple_crowdsourcing':False, 'learn_worker_params':False, 'learn_image_params':False, 'batch_size':1000, 'sort_method':'num_annos'}
SIMPLE_CROWDSOURCING = {'name':'majority-vote', 'line_style':'-.v', 'color':'m', 'use_computer_vision':False, 'online':False, 'simple_crowdsourcing':True, 'learn_worker_params':False, 'learn_image_params':False, 'batch_size':1000, 'sort_method':'num_annos'}
ALL_METHODS_NO_CV = [PROB_WORKER_IMAGE_ONLINE, PROB_ONLINE, PROB_WORKER_IMAGE, PROB_WORKER, PROB, SIMPLE_CROWDSOURCING]
ALL_PLOTS_NO_CV = [combine_dicts([{'title':'Method Comparison', 'name':'method_comparison_semilog', 'type':'semilog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER['name'],'x':'num', 'y':'err'}, {'name':PROB['name'],'x':'num', 'y':'err'}, {'name':SIMPLE_CROWDSOURCING['name'],'x':'num', 'y':'err'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Method Comparison', 'name':'method_comparison_loglog', 'type':'loglog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image #', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER['name'],'x':'num', 'y':'err'}, {'name':PROB['name'],'x':'num', 'y':'err'}, {'name':SIMPLE_CROWDSOURCING['name'],'x':'num', 'y':'err'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Method Comparison', 'name':'method_comparison', 'type':'plot', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER['name'],'x':'num', 'y':'err'}, {'name':PROB['name'],'x':'num', 'y':'err'}, {'name':SIMPLE_CROWDSOURCING['name'],'x':'num', 'y':'err'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Risk Estimation', 'name':'risk_estimation_semilog', 'type':'semilog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE['name'], 'x':'num', 'y':'err', 'title':'Actual Error', 'line_style':'b-'}, {'name':PROB_WORKER_IMAGE['name'], 'x':'num', 'y':'risk', 'title':'Estimated Error', 'line_style':'g-'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Risk Estimation', 'name':'risk_estimation_loglog', 'type':'loglog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE['name'], 'x':'num', 'y':'err', 'title':'Actual Error', 'line_style':'b-'}, {'name':PROB_WORKER_IMAGE['name'], 'x':'num', 'y':'risk', 'title':'Estimated Error', 'line_style':'g-'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Risk Estimation', 'name':'risk_estimation', 'type':'plot', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE['name'], 'x':'num', 'y':'err', 'title':'Actual Error', 'line_style':'b-'}, {'name':PROB_WORKER_IMAGE['name'], 'x':'num', 'y':'risk', 'title':'Estimated Error', 'line_style':'g-'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Worker Skill', 'name':'worker_skill', 'type':'skill', 'methods':[{'name':PROB_WORKER_IMAGE['name']}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Num Annotations', 'name':'num_annotations', 'type':'hist', 'xlabel':'Annotations Per Image', 'ylabel':'Image Count', 'methods':[{'name':PROB_WORKER_IMAGE_ONLINE['name'], 'x':'num_annos', 'y':'num_annos_bins'}]}, DEFAULT_PLOT_PARAMS])
]
ALL_METHODS = [PROB_WORKER_IMAGE_CV_ONLINE_002, PROB_WORKER_IMAGE_CV_ONLINE_001, PROB_WORKER_IMAGE_CV_ONLINE_0005, PROB_WORKER_IMAGE_CV_NAIVE_ONLINE, PROB_WORKER_IMAGE_ONLINE, PROB_ONLINE, PROB_WORKER_IMAGE_CV, PROB_WORKER_IMAGE, PROB, SIMPLE_CROWDSOURCING]
ALL_PLOTS = [combine_dicts([{'title':'Method Comparison', 'name':'method_comparison_semilog', 'type':'semilog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_CV_ONLINE_002['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV_ONLINE_0005['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV_NAIVE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE['name'],'x':'num', 'y':'err'}, {'name':PROB['name'],'x':'num', 'y':'err'}, {'name':SIMPLE_CROWDSOURCING['name'],'x':'num', 'y':'err'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Method Comparison', 'name':'method_comparison_loglog', 'type':'loglog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_CV_ONLINE_002['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV_ONLINE_0005['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV_NAIVE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE['name'],'x':'num', 'y':'err'}, {'name':PROB['name'],'x':'num', 'y':'err'}, {'name':SIMPLE_CROWDSOURCING['name'],'x':'num', 'y':'err'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Method Comparison', 'name':'method_comparison', 'type':'plot', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_CV_ONLINE_002['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV_ONLINE_0005['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV_NAIVE_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_ONLINE['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE_CV['name'],'x':'num', 'y':'err'}, {'name':PROB_WORKER_IMAGE['name'],'x':'num', 'y':'err'}, {'name':PROB['name'],'x':'num', 'y':'err'}, {'name':SIMPLE_CROWDSOURCING['name'],'x':'num', 'y':'err'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Risk Estimation', 'name':'risk_estimation_semilog', 'type':'semilog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_CV['name'], 'x':'num', 'y':'err', 'title':'Actual Error', 'line_style':'b-'}, {'name':PROB_WORKER_IMAGE_CV['name'], 'x':'num', 'y':'risk', 'title':'Estimated Error', 'line_style':'g-'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Risk Estimation', 'name':'risk_estimation_loglog', 'type':'loglog', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_CV['name'], 'x':'num', 'y':'err', 'title':'Actual Error', 'line_style':'b-'}, {'name':PROB_WORKER_IMAGE_CV['name'], 'x':'num', 'y':'risk', 'title':'Estimated Error', 'line_style':'g-'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Risk Estimation', 'name':'risk_estimation', 'type':'plot', 'legend':True, 'xlabel':'Avg Number of Human Workers Per Image', 'ylabel':'Error', 'methods':[{'name':PROB_WORKER_IMAGE_CV['name'], 'x':'num', 'y':'err', 'title':'Actual Error', 'line_style':'b-'}, {'name':PROB_WORKER_IMAGE_CV['name'], 'x':'num', 'y':'risk', 'title':'Estimated Error', 'line_style':'g-'}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Worker Skill', 'name':'worker_skill', 'type':'skill', 'methods':[{'name':PROB_WORKER_IMAGE['name']}]}, DEFAULT_PLOT_PARAMS]),
combine_dicts([{'title':'Num Annotations', 'name':'num_annotations', 'type':'hist', 'xlabel':'Annotations Per Image', 'ylabel':'Image Count', 'methods':[{'name':PROB_WORKER_IMAGE_CV_ONLINE['name'], 'x':'num_annos', 'y':'num_annos_bins'}]}, DEFAULT_PLOT_PARAMS])
]
#-------------------------------------------------------
class SimulatedCrowdsourcer(object):
def __init__(self, full_dataset, expert_dataset=None, save_prefix=None, output_dir='output', online=True, simple_crowdsourcing=False, learn_worker_params=True, learn_image_params=True, use_computer_vision=False, naive_computer_vision=False, batch_size=1000, num_rand_perms=1, sort_method='num_annos', name=None, line_style='-', color='r', save_all_perms=False, min_risk=0.005):
self.full_dataset, self.expert_dataset = full_dataset, expert_dataset
self.online, self.simple_crowdsourcing = online, simple_crowdsourcing
self.learn_worker_params, self.learn_image_params = learn_worker_params, learn_image_params
self.use_computer_vision, self.batch_size = use_computer_vision, (batch_size if online else len(full_dataset.images))
self.naive_computer_vision = naive_computer_vision
self.num_rand_perms, self.sort_method = num_rand_perms, sort_method
self.save_prefix, self.output_dir = save_prefix, output_dir
self.max_workers_per_image, self.save_all_perms = 1, save_all_perms
self.min_risk = min_risk
def run(self):
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
sum_plot_data, num_plot_data, fnames = [], [], []
all_plot_data = {}
for rand_perm in range(self.num_rand_perms):
plot_data = {'num':[], 'err':[], 'risk':[]}
iter = 0
self.rand_perms = {}
#if self.full_dataset.computer_vision_predictor and hasattr(self.full_dataset.computer_vision_predictor, 'iteration'):
# self.full_dataset.computer_vision_predictor.iteration = 0
self.dataset = self.full_dataset.__class__(debug=0, learn_worker_params=self.learn_worker_params, learn_image_params=self.learn_image_params, computer_vision_predictor=(self.full_dataset.computer_vision_predictor if self.use_computer_vision else None), naive_computer_vision=self.naive_computer_vision, min_risk=self.min_risk)
self.dataset.copy_parameters_from(self.full_dataset, full=False)
for i in self.full_dataset.images:
self.dataset.images[i] = self.full_dataset._CrowdImageClass_(i, self.dataset)
self.dataset.images[i].copy_parameters_from(self.full_dataset.images[i], full=False)
for w in self.full_dataset.workers:
self.dataset.workers[w] = self.full_dataset._CrowdWorkerClass_(w, self.dataset)
self.dataset.workers[w].copy_parameters_from(self.full_dataset.workers[w], full=False)
while self.dataset.num_unfinished(full_dataset=self.full_dataset) > 0:
if self.simple_crowdsourcing:
self.dataset.crowdsource_simple()
else:
self.dataset.estimate_parameters(avoid_if_finished=True)
self.dataset.check_finished_annotations(set_finished=self.online)
if self.expert_dataset:
err,num = self.dataset.compute_error(self.expert_dataset), self.dataset.num_annotations()
if not self.simple_crowdsourcing:
plot_data["risk"].append(self.dataset.risk())
plot_data["num"].append(float(num)/len(self.dataset.images))
plot_data["err"].append(err)
if self.save_prefix and rand_perm==0 or self.save_all_perms:
fname = self.output_dir+'/'+self.save_prefix+str(rand_perm)+'_'+str(iter)+'.json'
self.dataset.save(fname)
fnames.append({str(rand_perm)+'_'+str(iter):fname})
self.augment_annotations_if_necessary()
iter += 1
if hasattr(self.dataset, 'parts'):
plot_data["num_annos"] = []
for p in range(len(self.dataset.parts)):
plot_data["num_annos"] += [self.dataset.parts[p].images[i].num_annotations() for i in self.dataset.parts[p].images]
else:
plot_data["num_annos"] = [self.dataset.images[i].num_annotations() for i in self.dataset.images]
plot_data["num_annos_bins"] = np.arange(-.5, np.asarray(plot_data["num_annos"]).max()+.5, 1).tolist()
if hasattr(self.dataset.workers[w],'skill') and self.dataset.workers[w].skill:
for s in range(len(self.dataset.workers[self.dataset.workers.keys()[0]].skill)):
plot_data["skill"+str(s)] = [self.dataset.workers[w].skill[s] for w in self.dataset.workers]
if self.dataset.cv_worker: plot_data["skill_cv"+str(s)] = [self.dataset.cv_worker.skill[s]]
plot_data["worker_num_annos"] = [len(self.dataset.workers[w].images) for w in self.dataset.workers]
for k in plot_data:
if not k in all_plot_data:
all_plot_data[k] = []
all_plot_data[k].append(plot_data[k])
plot_data = {}
for k in all_plot_data:
ml = int(np.asarray([len(c) for c in all_plot_data[k]]).max())
a = np.zeros((self.num_rand_perms, ml))
valid = np.zeros((self.num_rand_perms, ml))
for i in range(self.num_rand_perms):
a[i,:len(all_plot_data[k][i])] = all_plot_data[k][i]
valid[i,:len(all_plot_data[k][i])] = 1
if k == 'num_annos':
plot_data[k] = a.flatten().tolist()
else:
plot_data[k] = (a.sum(axis=0) / valid.sum(axis=0)).tolist()
plot_data[k+'_var'] = ((((a-plot_data[k])**2)*valid).sum(axis=0) / valid.sum(axis=0)).tolist()
return plot_data, fnames, all_plot_data
def augment_annotations_if_necessary(self):
processed = []
num = 0
image_ids = self.dataset.choose_images_to_annotate_next(max_workers_per_image=self.max_workers_per_image, sort_method=self.sort_method, full_dataset=self.full_dataset)
for i in image_ids:
processed.append(i)
workers = self.full_dataset.images[i].workers
if not i in self.rand_perms:
self.rand_perms[i] = np.random.permutation(len(self.full_dataset.images[i].z))
#print str(i) + " " + str(len(workers)) + " " + str(workers)
has_cv = (1 if (self.dataset.cv_worker and self.dataset.cv_worker.id in self.dataset.images[i].z) else 0)
fd_has_cv = (1 if (self.full_dataset.cv_worker and self.full_dataset.cv_worker.id in self.full_dataset.images[i].z) else 0)
for j in range(len(self.dataset.images[i].z)-has_cv, min(len(self.dataset.images[i].z)-has_cv+self.max_workers_per_image, len(self.full_dataset.images[i].z)-fd_has_cv)):
w = workers[self.rand_perms[i][j]]
if not self.dataset.images[i].finished:
assert not w in self.dataset.images[i].z, "Duplicate worker " + str(w) + " for image " + str(i) + " calling augment_annotations()"
z = self.dataset._CrowdLabelClass_(self.dataset.images[i], self.dataset.workers[w])
z.parse(self.full_dataset.images[i].z[w].raw_data)
self.dataset.images[i].z[w] = z
self.dataset.images[i].workers.append(w)
self.dataset.workers[w].images[i] = self.dataset.images[i]
num += 1
if num >= self.batch_size:
break
'''
if self.use_computer_vision:
for i in processed:
self.dataset.images[i].predict_true_labels(avoid_if_finished=True) # Initialize training label for computer vision
self.dataset.synch_computer_vision_labels()
'''
def RunSimulatedExperiments(full_dataset, methods, output_dir, plots, expert_dataset=None, title=None, num_rand_perms=5, force_compute=False, show_intermediate_results=True):
results, fnames, methods_d, all_plot_data = {}, {}, {}, {}
fnames_i = []
for a in methods:
m = a['name']
if not os.path.exists(os.path.join('output', output_dir, 'plot_data_'+m+'.json')) or force_compute:
sc = SimulatedCrowdsourcer(full_dataset, expert_dataset=(expert_dataset if expert_dataset else full_dataset), num_rand_perms=num_rand_perms, output_dir='output/'+output_dir, save_prefix=m, **a)
results[m], fnames[m], all_plot_data[m] = sc.run()
with open(os.path.join('output', output_dir, 'plot_data_'+m+'.json'), 'w') as f:
json.dump({'results':results[m], 'fnames':fnames[m], 'all_plot_data':all_plot_data[m]}, f)
else:
with open(os.path.join('output', output_dir, 'plot_data_'+m+'.json')) as f:
data = json.load(f)
results[m], fnames[m], all_plot_data[m] = data['results'], data['fnames'], data['all_plot_data']
methods_d[m] = a
fnames_c = []
for f in fnames[a['name']]:
fnames_c.append({f.keys()[0] : f[f.keys()[0]][len('output/'):]})
fnames_i.append({'name':a['name'], 'files':fnames_c})
if show_intermediate_results:
GeneratePlotResults(full_dataset, methods, output_dir, plots, results, methods_d, fnames_i, title)
GeneratePlotResults(full_dataset, methods, output_dir, plots, results, methods_d, fnames_i, title)
def GeneratePlotResults(full_dataset, methods, output_dir, plots, results, methods_d, fnames_i, title):
plot_files = []
for i in range(len(plots)):
handles, labels = [], []
fig = plt.figure(i+1)
plt.clf()
plot = plots[i]
if 'xlim' in plot: plt.xlim(plot['xlim'][0], plot['xlim'][1])
if 'ylim' in plot: plt.ylim(plot['ylim'][0], plot['ylim'][1])
for a in plots[i]['methods']:
m = a['name']
if not m in methods_d:
continue
print str(i) + ' ' + str(m)
line_style = a['line_style'] if 'line_style' in a else methods_d[m]['line_style']
color = a['color'] if 'color' in a else methods_d[m]['color']
if plot['type'] == 'skill':
plot = copy.deepcopy(plots[i])
plot['type'] = 'scatter' if len(full_dataset.skill_names) <= 2 else 'scatter3d'
if not 'xlabel' in plot: plot['xlabel'] = full_dataset.skill_names[0]
if not 'ylabel' in plot: plot['ylabel'] = full_dataset.skill_names[1]
if not 'zlabel' in plot and len(full_dataset.skill_names) > 2: plot['zlabel'] = full_dataset.skill_names[2]
if not 'x' in a: a['x'] = 'skill0'
if not 'y' in a: a['y'] = 'skill1'
if not 'z' in a and len(full_dataset.skill_names) > 2: a['z'] = 'skill2'
if plot['type'] == 'semilog':
print str(len(results[m][a['x']])) + ' ' + str(len(results[m][a['y']]))
h = plt.semilogy(results[m][a['x']], results[m][a['y']], line_style, lw=plot["line-width"], color=color)
handles.append(h[0])
elif plot['type'] == 'loglog':
h = plt.loglog(results[m][a['x']], results[m][a['y']], line_style, lw=plot["line-width"], color=color)
handles.append(h[0])
elif plot['type'] == 'plot':
h = plt.plot(results[m][a['x']], results[m][a['y']], line_style, lw=plot["line-width"])
handles.append(h[0])
elif plot['type'] == 'hist':
if 'y' in a:
h = plt.hist(results[m][a['x']], results[m][a['y']], histtype='bar', rwidth=plot["bar-width"], color=plot["bar-color"])
else:
h = plt.hist(results[m][a['x']], histtype='bar', rwidth=plot["bar-width"], color=plot["bar-color"])
handles.append(h[0])
elif plot['type'] == 'scatter':
h = plt.scatter(results[m][a['x']], results[m][a['y']], c='r', marker='o')
handles.append(h)
elif plot['type'] == 'scatter3d':
ax = fig.add_subplot(111, projection='3d')
h = ax.scatter(results[m][a['x']], results[m][a['y']], results[m][a['z']], c='r', marker='o')
handles.append(h)
labels.append(a['title'] if 'title' in a else m)
if plot['type'] == 'scatter3d':
if 'xlabel' in plot: ax.set_xlabel(plot['xlabel'], fontsize=plot['axis-font-size'])
if 'ylabel' in plot: ax.set_ylabel(plot['ylabel'], fontsize=plot['axis-font-size'])
if 'zlabel' in plot: ax.set_zlabel(plot['zlabel'], fontsize=plot['axis-font-size'])
else:
if 'xlabel' in plot: plt.xlabel(plot['xlabel'], fontsize=plot['axis-font-size'])
if 'ylabel' in plot: plt.ylabel(plot['ylabel'], fontsize=plot['axis-font-size'])
if 'zlabel' in plot: plt.zlabel(plot['zlabel'], fontsize=plot['axis-font-size'])
if 'title' in plot: plt.title(plot['title'], fontsize=plot['title-font-size'])
plt.tick_params(axis='both', which='major', labelsize=plot['tick-font-size'])
plt.tick_params(axis='both', which='minor', labelsize=plot['tick-font-size'])
if 'legend' in plot: plt.legend(handles, labels, prop={'size':plot['legend-font-size']})
plt.savefig(os.path.join('output', output_dir, plot['name']+'.pdf'))
plt.savefig(os.path.join('output', output_dir, plot['name']+'.png'))
plot_files.append(output_dir + '/' + plot['name']+'.png')
with open(os.path.join('output', output_dir, 'galleries.json'), 'w') as f:
json.dump({'plots':plot_files, 'methods':fnames_i, 'title':title}, f)
| 2.046875
| 2
|
examples/time_price_bot.py
|
yuepaang/tiny_bot
| 11
|
12784360
|
<filename>examples/time_price_bot.py<gh_stars>10-100
from tiny_bot import *
store = RedisStore("test", "redis://localhost:6379/0")
class QueryAction(Action):
def run(self, bot, tracker, msg):
result = "从<%s>邮寄到<%s>,重量:<%s>,时间:<%s> 不要钱,哈哈!" % (
tracker._from, tracker.to, tracker.weight, tracker.time)
return Response(result)
class MyTracker(store.Tracker):
__domain__ = "price_time_bot"
_from = StringField()
to = StringField()
weight = FloatField()
time = StringField()
class MyActionHub(ActionHub):
query = QueryAction()
# utter template action , will trandom choose a reply
utter_bye = ['再见', 'bye', '滚蛋']
utter_greeting = ['你好', '小垃圾,咋了?']
utter_ask__from = "从哪邮寄?"
utter_ask_to = ['要邮寄到哪?']
utter_ask_weight = "多重?"
utter_ask_time = "什么时候邮寄?"
class MyPolicy(Policy):
def predict(self, bot, tracker, msg):
if msg.intent == "bye":
return ['utter_bye'], 0
elif msg.intent == "greeting":
return ['utter_greeting'], None
elif msg.intent in ("query", "inform"):
for slot in ['_from', 'to', 'weight', 'time']:
if tracker[slot] is None:
return ['utter_ask_%s' % slot], 100
return ['query', 'utter_bye'], 1
else:
raise Exception("should never happened!")
class MyNLU(NLU):
def parse(self, bot, tracker, msg):
return msg
class MyBot(Bot):
__domain__ = "price_time_bot"
TRACKER = MyTracker
ACTIONS = MyActionHub
INTENTS = ["query", "inform", "greeting", "bye"]
NLU = MyNLU
POLICIES = [MyPolicy]
if __name__ == '__main__':
bot = MyBot()
class Error1(Exception):
pass
class Error2(Exception):
pass
class Error3(Exception):
pass
@bot.catch(Error1)
def catch1(tracker, req):
return Response("catch1")
@bot.catch((Error1, Error2))
def catch1(tracker, req):
return Response("catch2")
@bot.before_request
def before_request(tracker, req):
print("before_request", req)
return req
@bot.after_request
def after_request(tracker, res):
print("after_request", res)
return res
@bot.before_action
def before_acton(act, tracker, req):
print("before_acton", act)
@bot.after_action
def after_action(act, tracker, req):
print("after_action", act)
_from = [
{'entity': '_from', 'value': '北京'},
]
to = [
{'entity': 'to', 'value': '上海'},
]
weight = [
{'entity': 'weight', 'value': 12.0},
]
time = [
{'entity': 'time', 'value': "今天"},
]
res = bot.handle_msg(
Request(body="", intent="query", entities=_from+to), "1111")
assert len(res) == 1 and res[0].body == "多重?"
res = bot.handle_msg(
Request(body="", intent="inform", entities=weight), "1111")
assert len(res) == 1 and res[0].body == "什么时候邮寄?"
res = bot.handle_msg(
Request(body="", intent="inform", entities=time), "1111")
assert len(res) == 2 and res[1].body in ['再见', 'bye', '滚蛋']
assert res[0].body == "从<北京>邮寄到<上海>,重量:<12.0>,时间:<今天> 不要钱,哈哈!"
| 2.5
| 2
|
camper/tests/test_waitinglist.py
|
mrtopf/camper
| 13
|
12784361
|
<gh_stars>10-100
from conftest import create_user
from camper import *
import pytest
@pytest.skip()
def test_subscribe_user(barcamp, app):
user = create_user(app, "user1")
barcamp.subscribe(user)
bc = app.get_barcamp("test")
assert unicode(user._id) in bc.subscribers
def test_register_user_duplicate(barcamp, app):
# we do this via the main event
user = create_user(app, "user1")
barcamp = app.get_barcamp("test") # retrieve it again to have an event initialized
event = barcamp.event
event.add_participant(user)
event.add_participant(user)
barcamp.save()
bc = app.get_barcamp("test")
assert unicode(user._id) in bc.event.participants
assert len(bc.event.participants) == 1
def test_register_users_until_waitinglist(barcamp, app):
users = []
barcamp = app.get_barcamp("test") # retrieve it again to have an event initialized
event = barcamp.event
for i in range(1,6):
user = create_user(app, "user1")
users.append(unicode(user._id))
event.add_participant(user)
user = create_user(app, "user1")
pytest.raises(ParticipantListFull, event.add_participant, user)
barcamp.save()
bc = app.get_barcamp("test")
assert unicode(user._id) not in bc.event.participants
assert unicode(user._id) in bc.event.waiting_list
assert len(bc.event.participants) == 5
assert len(bc.event.waiting_list) == 1
| 2.21875
| 2
|
graphlearning.py
|
shekkizh/GraphLearning
| 1
|
12784362
|
'''
GraphPy: Python Module for Graph-based learning algorithms. Efficient implementations of modern methods for graph-based semi-supervised learning, and graph clustering.
See README.md file for usage.
Author: <NAME>, 2020
'''
import numpy as np
import datetime
import matplotlib.pyplot as plt
import matplotlib
import scipy.spatial as spatial
import scipy.optimize as opt
import numpy.random as random
import scipy.sparse as sparse
import scipy.sparse.linalg as splinalg
import scipy.sparse.csgraph as csgraph
import sklearn.cluster as cluster
from sklearn.decomposition import PCA
import sys, getopt, time, csv, torch, os, multiprocessing
from joblib import Parallel, delayed
from utils.non_neg_qpsolver import non_negative_qpsolver
clustering_algorithms = ['incres', 'spectral', 'spectralshimalik', 'spectralngjordanweiss']
# Print iterations progress
def printProgressBar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end='\r')
# Print New Line on Complete
if iteration == total:
print()
def load_mbo_eig(dataset, metric, k):
# Load eigenvector data if MBO selected
try:
location = os.path.dirname(os.path.realpath(__file__))
dataFile = dataset + "_" + metric + "_k%d" % k + "_spectrum.npz"
dataFile_path = os.path.join(location, 'MBOdata', dataFile)
M = np.load(dataFile_path, allow_pickle=True)
eigvals = M['eigenvalues']
eigvecs = M['eigenvectors']
except:
print("Could not find MBOdata/" + dataset + "_" + metric + "_k%d" % k + "_spectrum.npz")
print('You need to run ComputeEigenvectorsMBO.py first.')
sys.exit(2)
return eigvals, eigvecs
def load_label_permutation(dataset, label_perm='', t='-1'):
location = os.path.dirname(os.path.realpath(__file__))
dataFile = dataset + label_perm + "_permutations.npz"
dataFile_path = os.path.join(location, 'LabelPermutations', dataFile)
# Load label permutation
try:
M = np.load(dataFile_path, allow_pickle=True)
perm = M['perm']
except:
print('Cannot find ' + dataFile)
print('You need to run CreateLabelPermutation.py first.')
sys.exit(2)
# Restrict trials
t = [int(e) for e in t.split(',')]
if t[0] > -1:
if len(t) == 1:
perm = perm[0:t[0]]
else:
perm = perm[(t[0] - 1):t[1]]
return perm
def load_dataset(dataset, metric='L2'):
# For variational autoencoder the vae data, e.g., Data/MNIST_vae.npz must exist.
if metric[0:3] == 'vae' or metric[0:3] == 'aet':
dataFile = dataset + "_" + metric + ".npz"
else:
dataFile = dataset + "_raw.npz"
location = os.path.dirname(os.path.realpath(__file__))
dataFile_path = os.path.join(location, 'Data', dataFile)
# Try to Load data
try:
M = np.load(dataFile_path, allow_pickle=True)
data = M['data']
except:
print('Cannot find ' + dataFile + '.')
sys.exit(2)
return data
def load_labels(dataset):
location = os.path.dirname(os.path.realpath(__file__))
dataFile = dataset + "_labels.npz"
dataFile_path = os.path.join(location, 'Data', dataFile)
# Load labels
try:
M = np.load(dataFile_path, allow_pickle=True)
labels = M['labels']
except:
print('Cannot find dataset Data/' + dataFile)
sys.exit(2)
return labels
def load_kNN_data(dataset, metric='L2'):
location = os.path.dirname(os.path.realpath(__file__))
dataFile = dataset + "_" + metric + ".npz"
dataFile_path = os.path.join(location, 'kNNData', dataFile)
# Load kNN data
try:
M = np.load(dataFile_path, allow_pickle=True)
I = M['I']
J = M['J']
D = M['D']
except:
print('Cannot find ' + dataFile)
print('You need to run ComputeKNN.py.')
sys.exit(2)
return I, J, D
# Compute sizes of each class
def label_proportions(labels):
L = np.unique(labels)
L = L[L >= 0]
k = len(L)
# n = len(labels)
n = np.sum(labels >= 0)
beta = np.zeros((k,))
for i in range(k):
beta[i] = np.sum(labels == L[i]) / n
return beta
# Constructs a weight matrix for graph on mxn grid with NSEW neighbors
# def grid_graph(m, n):
# X, Y = np.mgrid[:m, :n]
#
# return W
# Reweights the graph to use self-tuning weights
def self_tuning(W, D, alpha):
if alpha != 0:
n = D.shape[0]
k = D.shape[1]
d = D[:, k - 1]
d = sparse.spdiags(d ** (-alpha), 0, n, n)
W = d * W * d
return W
# Reweights the graph based on a clustering prior
def cluster_prior(W, cluster_labels):
n = W.shape[0]
I, J, V = sparse.find(W)
K = cluster_labels[I] == cluster_labels[J]
V[K] = V[K] * 10
V = V / np.max(V)
W = sparse.coo_matrix((V, (I, J)), shape=(n, n)).tocsr()
return W
# Computes scattering transform of depth 2 of I
# Bruna, Joan, and <NAME>. "Invariant scattering convolution networks." IEEE transactions on pattern analysis and machine intelligence 35.8 (2013): 1872-1886.
def scattering_transform(I, n, m, depth=2):
from kymatio import Scattering2D
num_pts = I.shape[0]
K = torch.from_numpy(I.reshape((num_pts, n, m))).float().contiguous()
scattering = Scattering2D(J=depth, shape=(n, m))
Z = scattering(K).numpy()
l = Z.shape[1] * Z.shape[2] * Z.shape[3]
return Z.reshape((num_pts, l))
# Label permutations
# labels = labels
# T = number of trials
# r = label rate in (0,1)
def create_label_permutations_rate(labels, T, R):
perm = list()
n = labels.shape[0]
labelvals = np.unique(labels)
labelvals = labelvals[labelvals >= 0]
num_labels = len(labelvals)
num = np.zeros((num_labels,))
for i in range(num_labels):
num[i] = np.sum(labels == labelvals[i])
J = np.arange(n).astype(int)
for k in range(T):
for r in R:
L = []
for i in range(num_labels):
l = labelvals[i]
I = labels == l
K = J[I]
m = round(num[i] * r / 100)
L = L + random.choice(K, size=m.astype(int), replace=False).tolist()
L = np.array(L)
perm.append(L)
return perm
# Label permutations
# labels = labels
# T = number of trials
# m = vector of number of labels
def create_label_permutations(labels, T, m, multiplier=None):
# Find all unique labels >= 0
# Negative numbers indicate unlabeled nodes
unique_labels = np.unique(labels)
unique_labels = unique_labels[unique_labels >= 0]
perm = list()
n = labels.shape[0]
J = np.arange(n).astype(int)
for k in range(T):
for i in m:
L = []
ind = 0
for l in unique_labels:
I = labels == l
K = J[I]
if multiplier is None:
L = L + random.choice(K, size=i, replace=False).tolist()
else:
sze = int(np.round(i * multiplier[ind]))
L = L + random.choice(K, size=sze, replace=False).tolist()
ind = ind + 1
L = np.array(L)
perm.append(L)
return perm
# Randomly choose m labels per class
def randomize_labels(L, m):
perm = create_label_permutations(L, 1, [m])
return perm[0]
# Default function
def exp_weight(x):
return np.exp(-x)
# Pointwise max of non-negative sparse matrices A and B
def sparse_max(A, B):
I = (A + B) > 0
IB = B > A
IA = I - IB
return A.multiply(IA) + B.multiply(IB)
# Compute degrees of weight matrix W
def degrees(W):
return np.squeeze(np.array(np.sum(W, axis=1)))
# Multiply diagonal of matrix by degree
def diag_multiply(W, b):
n = W.shape[0] # Number of points
D = sparse.spdiags(W.diagonal(), 0, n, n)
return W - (1 - b) * D
# Compute degrees of weight matrix W
# Returns sparse matrix with degrees on diagonal
def degree_matrix(W, p=1):
n = W.shape[0] # Number of points
# Construct sparse degree matrix
d = degrees(W)
D = sparse.spdiags(d ** p, 0, n, n)
return D.tocsr()
# Construct robin boundary condition matrix
def robin_bc_matrix(X, nu, eps, gamma):
n = X.shape[0]
Xtree = spatial.cKDTree(X)
_, nn_ind = Xtree.query(X + eps * nu)
# nn_dist = np.linalg.norm(X - X[nn_ind,:],axis=1)
nn_dist = eps * np.ones((n,))
# Robin matrix
A = sparse.spdiags(gamma + (1 - gamma) / nn_dist, 0, n, n)
B = sparse.coo_matrix(((1 - gamma) / nn_dist, (range(n), nn_ind)), shape=(n, n))
R = (A - B).tocsr()
return R
# Laplace matrix
# W = weight matrix
# norm = type of normalization
# Options: none, randomwalk, normalized
def graph_laplacian(W, norm="none"):
D = degree_matrix(W)
if norm == "none":
L = D - W
elif norm == "randomwalk1":
Dinv = degree_matrix(W, p=-1)
L = Dinv * (D - W)
elif norm == "randomwalk2":
Dinv = degree_matrix(W, p=-1)
L = (D - W) * Dinv
elif norm == "normalized":
Dinv2 = degree_matrix(W, p=-1 / 2)
L = Dinv2 * (D - W) * Dinv2
else:
print("Invalid option for graph Laplacian normalization. Returning unnormalized Laplacian.")
L = D - W
return L.tocsr()
# Graph infinity Laplacian
# W = sparse weight matrix
# u = function on graph
def graph_phi_laplacian(W, u, phi, I=None, J=None, V=None):
n = W.shape[0]
if I is None or J is None:
I, J, V = sparse.find(W)
w = u[J] - u[I]
a = np.absolute(w)
pa = phi(a)
m = pa / (a + 1e-13)
M = sparse.coo_matrix((V * pa / (a + 1e-13), (I, J)), shape=(n, n)).tocsr()
m = degrees(M)
M = sparse.coo_matrix((V * pa * np.sign(w), (I, J)), shape=(n, n)).tocsr()
M = np.squeeze(np.array(np.sum(M, axis=1)))
return M, m
# Graph infinity Laplacian
# W = sparse weight matrix
# u = function on graph
def graph_infinity_laplacian(W, u, I=None, J=None, V=None):
n = W.shape[0]
if I is None or J is None:
I, J, V = sparse.find(W)
M = sparse.coo_matrix((V * (u[J] - u[I]), (I, J)), shape=(n, n)).tocsr()
M = M.min(axis=1) + M.max(axis=1)
return M.toarray().flatten()
# Construct epsilon-graph sparse distance matrix
def eps_weight_matrix(X, eps, f=exp_weight):
n = X.shape[0] # Number of points
# Rangesearch to find nearest neighbors
Xtree = spatial.cKDTree(X)
M = Xtree.query_pairs(eps)
M = np.array(list(M))
# Differences between points and neighbors
V = X[M[:, 0], :] - X[M[:, 1], :]
D = np.sum(V * V, axis=1)
# Weights
D = f(4 * D / (eps * eps))
# Symmetrize weights and add diagonal entries
D = np.concatenate((D, D, f(0) * np.ones(n, )))
M1 = np.concatenate((M[:, 0], M[:, 1], np.arange(0, n)))
M2 = np.concatenate((M[:, 1], M[:, 0], np.arange(0, n)))
# Construct sparse matrix and convert to Compressed Sparse Row (CSR) format
W = sparse.coo_matrix((D, (M1, M2)), shape=(n, n))
return W.tocsr()
# Exact knnsearch
def knnsearch(X, k):
# KDtree to find nearest neighbors
n = X.shape[0]
Xtree = spatial.cKDTree(X)
D, J = Xtree.query(X, k=k)
I = np.ones((n, k), dtype=int) * J[:, 0][:, None]
return I, J, D
# Perform approximate nearest neighbor search, returning indices I,J of neighbors, and distance D
# Metric can be "angular", "euclidean", "manhattan", "hamming", or "dot".
def knnsearch_annoy(X, k, similarity='euclidean'):
from annoy import AnnoyIndex
n = X.shape[0] # Number of points
dim = X.shape[1] # Dimension
print('kNN search with Annoy approximate nearest neighbor package...')
printProgressBar(0, n, prefix='Progress:', suffix='Complete', length=50)
u = AnnoyIndex(dim, similarity) # Length of item vector that will be indexed
for i in range(n):
u.add_item(i, X[i, :])
u.build(10) # 10 trees
D = []
I = []
J = []
for i in range(n):
printProgressBar(i + 1, n, prefix='Progress:', suffix='Complete', length=50)
A = u.get_nns_by_item(i, k, include_distances=True, search_k=-1)
I.append([i] * k)
J.append(A[0])
D.append(A[1])
I = np.array(I)
J = np.array(J)
D = np.array(D)
return I, J, D
# Compute weight matrix from nearest neighbor indices I,J and distances D
def weight_matrix_selftuning(I, J, D):
n = I.shape[0]
k = I.shape[1]
# Distance to kth nearest neighbor as a matrix
sigma = D[:, k - 1]
sigma = sparse.spdiags(1 / sigma, 0, n, n)
sigma = sigma.tocsr()
# Flatten
I = I.flatten()
J = J.flatten()
D = D.flatten()
# Symmetrize and remove redundant entries
M1 = np.vstack((I, J, D))
M2 = np.vstack((J, I, D))
M = np.concatenate((M1, M2), axis=1)
M = np.unique(M, axis=1)
# Construct sparse matrix and convert to Compressed Sparse Row (CSR) format
I = M[0, :]
J = M[1, :]
D = M[2, :]
dist = sparse.coo_matrix((D, (I, J)), shape=(n, n)).tocsr()
B = sparse.coo_matrix((np.ones(len(D), ), (I, J)), shape=(n, n)).tocsr() # Ones in all entries
# Self-tuning weights
E = -4 * sigma * (dist ** 2) * sigma
W = E.expm1()
W = W.multiply(B) + B
return W
# Compute weight matrix from nearest neighbor indices I,J and distances D
# k = number of neighbors
# Chooses k neighbors at random from I.shape[1] nearset neighbors
def weight_matrix_homogenized(I, J, D, k, f=exp_weight):
# I = I[:,:10]
# J = J[:,:10]
# D = D[:,:10]
# Restrict I,J,D to k neighbors
k = np.minimum(I.shape[1], k)
n = I.shape[0]
for i in range(n):
ind = random.choice(I.shape[1], k, replace=False)
I[i, :k] = I[i, ind]
J[i, :k] = J[i, ind]
D[i, :k] = 1
n = I.shape[0]
k = I.shape[1]
D = D * D
eps = D[:, k - 1] / 4
D = f(D / eps[:, None])
# Flatten
I = I.flatten()
J = J.flatten()
D = D.flatten()
# Construct sparse matrix and convert to Compressed Sparse Row (CSR) format
W = sparse.coo_matrix((D, (I, J)), shape=(n, n)).tocsr()
return W
# Compute distance matrix from nearest neighbor indices I,J and distances D
# k = number of neighbors
def dist_matrix(I, J, D, k):
# Restrict I,J,D to k neighbors
k = np.minimum(I.shape[1], k)
I = I[:, :k]
J = J[:, :k]
D = D[:, :k]
n = I.shape[0]
k = I.shape[1]
# Flatten
I = I.flatten()
J = J.flatten()
D = D.flatten()
# Construct sparse matrix and convert to Compressed Sparse Row (CSR) format
W = sparse.coo_matrix((D, (I, J)), shape=(n, n)).tocsr()
return W
# Adds weights to an adjacency matrix W using similarity in data X
def add_weights(W, X, labels):
n = W.shape[0]
# pca = PCA(n_components=20)
# X = pca.fit_transform(X)
# print(X.shape)
I, J, V = sparse.find(W)
# Dot products
Y = X[I, :] - X[J, :]
Y = np.sum(Y * Y, axis=1)
W = sparse.coo_matrix((Y, (I, J)), shape=(n, n)).tocsr()
max_dist = np.reshape(np.max(W, axis=1).todense().tolist(), (n,))
D = sparse.spdiags((max_dist + 1e-10) ** (-1), 0, n, n).tocsr()
W = D * W
I, J, V = sparse.find(W)
V = np.exp(-2 * V)
W = sparse.coo_matrix((V, (I, J)), shape=(n, n)).tocsr()
return W
# Finds largest connected component of the graph represented by adjacency matrix W
# Returns the weighted adjacency matrix, along with a boolean mask indicating the
# vertices from the input matrix that were selected
def largest_conn_component(W):
ncomp, labels = csgraph.connected_components(W, directed=False)
num_verts = np.zeros((ncomp,))
for i in range(ncomp):
num_verts[i] = np.sum(labels == i)
i_max = np.argmax(num_verts)
ind = labels == i_max
A = W[ind, :]
A = A[:, ind]
print("Found %d" % ncomp + " connected components.")
print("Returning component with %d" % num_verts[i_max] + " vertices out of %d" % W.shape[0] + " total vertices.")
return A, ind
# Compute weight matrix from nearest neighbor indices I,J and distances D
# k = number of neighbors
def weight_matrix(I, J, D, k, f=exp_weight, symmetrize=True):
# Restrict I,J,D to k neighbors
k = np.minimum(I.shape[1], k)
I = I[:, :k]
J = J[:, :k]
D = D[:, :k]
n = I.shape[0]
k = I.shape[1]
D = D * D
eps = D[:, k - 1] / 4
D = f(D / eps[:, None])
# Flatten
I = I.flatten()
J = J.flatten()
D = D.flatten()
# Construct sparse matrix and convert to Compressed Sparse Row (CSR) format
W = sparse.coo_matrix((D, (I, J)), shape=(n, n)).tocsr()
if symmetrize:
W = (W + W.transpose()) / 2;
return W
def nnk_weight_matrix(dataset, metric, mask, knn_param, reg=1e-10, symmetrize=True):
# Try to Load data
X = load_dataset(dataset=dataset, metric=metric)
X_normalized = X / np.linalg.norm(X, axis=1, keepdims=True)
num_of_nodes = mask.shape[0]
neighbor_indices = np.zeros((num_of_nodes, knn_param))
weight_values = np.zeros((num_of_nodes, knn_param))
error_values = np.ones((num_of_nodes, knn_param))
for node_i in range(num_of_nodes):
non_zero_index = np.array(mask[node_i, :])
non_zero_index = np.delete(non_zero_index, np.where(non_zero_index == node_i))
if len(non_zero_index) > knn_param:
non_zero_index = non_zero_index[:knn_param]
x_neighbors = X_normalized[non_zero_index]
g_i = 0.5 + np.dot(x_neighbors, X_normalized[node_i]) / 2
G_i = 0.5 + np.dot(x_neighbors, x_neighbors.T) / 2
# x_opt, check = non_negative_qpsolver(G_i, g_i, g_i, reg)
# error_values[node_i, :] = 1 - 2 * np.dot(x_opt, g_i) + np.dot(x_opt, np.dot(G_i, x_opt))
x_opt = g_i
weight_values[node_i, :] = x_opt / np.sum(x_opt)
neighbor_indices[node_i, :] = non_zero_index
row_indices = np.expand_dims(np.arange(0, num_of_nodes), 1)
row_indices = np.tile(row_indices, [1, knn_param])
adjacency = sparse.coo_matrix((weight_values.ravel(), (row_indices.ravel(), neighbor_indices.ravel())),
shape=(num_of_nodes, num_of_nodes))
if symmetrize:
error = sparse.coo_matrix((error_values.ravel(), (row_indices.ravel(), neighbor_indices.ravel())),
shape=(num_of_nodes, num_of_nodes))
# Alternate way of doing: error_index = sparse.find(error > error.T); adjacency[error_index[0], error_index[
# 1]] = 0
adjacency = adjacency.multiply(error < error.T)
adjacency = adjacency.maximum(adjacency.T)
adjacency.eliminate_zeros()
error_values = error_values[:, 0]
return adjacency.tocsr(), error_values
# Compute boundary points
# k = number of neighbors to use
def boundary_points_new(X, k, I=None, J=None, D=None, ReturnNormals=False):
if (I is None) or (J is None) or (D is None):
n = X.shape[0]
d = X.shape[1]
if d <= 5:
I, J, D = knnsearch(X, k)
else:
I, J, D = knnsearch_annoy(X, k)
# Restrict I,J,D to k neighbors
k = np.minimum(I.shape[1], k)
n = X.shape[0]
I = I[:, :k]
J = J[:, :k]
D = D[:, :k]
W = weight_matrix(I, J, D, k, f=lambda x: np.ones_like(x), symmetrize=False)
L = graph_laplacian(W)
# Estimates of normal vectors
nu = -L * X
nu = np.transpose(nu)
norms = np.sqrt(np.sum(nu * nu, axis=0))
nu = nu / norms
nu = np.transpose(nu)
print(nu.shape)
# Boundary test
NN = X[J]
NN = np.swapaxes(NN[:, 1:, :], 0, 1) # This is kxnxd
V = NN - X # This is x^i-x^0 kxnxd array
NN_nu = nu[J]
W = (np.swapaxes(NN_nu[:, 1:, :], 0, 1) + nu) / 2
xd = np.sum(V * W, axis=2) # dist to boundary
Y = np.max(-xd, axis=0)
if ReturnNormals:
return Y, nu
else:
return Y
# Compute boundary points
# k = number of neighbors to use
def boundary_points(X, k, I=None, J=None, D=None, ReturnNormals=False, R=np.inf):
if (I is None) or (J is None) or (D is None):
n = X.shape[0]
d = X.shape[1]
if d <= 5:
I, J, D = knnsearch(X, k)
else:
I, J, D = knnsearch_annoy(X, k)
# Restrict I,J,D to k neighbors
k = np.minimum(I.shape[1], k)
n = X.shape[0]
I = I[:, :k]
J = J[:, :k]
D = D[:, :k]
W = weight_matrix(I, J, D, k, f=lambda x: np.ones_like(x), symmetrize=False)
L = graph_laplacian(W)
# Estimates of normal vectors
nu = -L * X
nu = np.transpose(nu)
norms = np.sqrt(np.sum(nu * nu, axis=0))
nu = nu / norms
nu = np.transpose(nu)
# Boundary test
NN = X[J]
NN = np.swapaxes(NN[:, 1:, :], 0, 1) # This is kxnxd
V = NN - X # This is x^i-x^0 kxnxd array
xd = np.sum(V * nu, axis=2) # xd coordinate (kxn)
sqdist = np.sum(V * V, axis=2)
Y = np.max((xd * xd - sqdist) / (2 * R) - xd, axis=0)
if ReturnNormals:
return Y, nu
else:
return Y
# Construct k-nn sparse distance matrix
# Note: Matrix is not symmetric
def knn_weight_matrix(X, k, f=exp_weight):
I, J, D = knnsearch_annoy(X, k)
W = weight_matrix(I, J, D, k, f=f)
return W
# Solves Lx=f subject to Rx=g at ind points
def gmres_bc_solve(L, f, R, g, ind):
# Mix matrices based on boundary points
A = L.copy()
A = A.tolil()
A[ind, :] = R[ind, :]
A = A.tocsr()
# Right hand side
b = f.copy()
b[ind] = g[ind]
# Preconditioner
m = A.shape[0]
M = A.diagonal()
M = sparse.spdiags(1 / M, 0, m, m).tocsr()
# GMRES solver
# start_time = time.time()
u, info = sparse.linalg.gmres(A, b, M=M)
# print("--- %s seconds ---" % (time.time() - start_time))
# print('gmres_err = %f'%np.max(np.absolute(A*u-b)))
return u
# Poisson solve
# Solves Lu = f with preconditioned conjugate gradient
def pcg_solve(L, f, x0=None, tol=1e-10):
# start_time = time.time()
L = L.tocsr()
# Conjugate gradient with Jacobi preconditioner
m = L.shape[0]
M = L.diagonal()
M = sparse.spdiags(1 / M, 0, m, m).tocsr()
if x0 is None:
u, i = splinalg.cg(L, f, tol=tol, M=M)
else:
u, i = splinalg.cg(L, f, x0=x0, tol=tol, M=M)
# print("--- %s seconds ---" % (time.time() - start_time))
return u
# Finds k Dirichlet eigenvectors
# Solves Lu = lambda u subject to u(I)=0
def dirichlet_eigenvectors(L, I, k):
L = L.tocsr()
n = L.shape[0]
# Locations of labels
idx = np.full((n,), True, dtype=bool)
idx[I] = False
# Left hand side matrix
A = L[idx, :]
A = A[:, idx]
# Eigenvector solver
vals, vec = sparse.linalg.eigs(A, k=k, which='SM')
vec = vec.real
vals = vals.real
# Add labels back into array
u = np.zeros((n, k))
u[idx, :] = vec
if k == 1:
u = u.flatten()
return u, vals
# Constrained linear solve
# Solves Lu = f subject to u(I)=g
def constrained_solve(L, I, g, f=None, x0=None, tol=1e-10):
L = L.tocsr()
n = L.shape[0]
# Locations of labels
idx = np.full((n,), True, dtype=bool)
idx[I] = False
# Right hand side
b = -L[:, I] * g
b = b[idx]
if f is not None:
b = b + f[idx]
# Left hand side matrix
A = L[idx, :]
A = A[:, idx]
# start_time = time.time()
# Conjugate gradient with Jacobi preconditioner
m = A.shape[0]
M = A.diagonal()
M = sparse.spdiags(1 / (M + 1e-10), 0, m, m).tocsr()
if x0 is None:
v, i = splinalg.cg(A, b, tol=tol, M=M)
else:
v, i = splinalg.cg(A, b, x0=x0[idx], tol=tol, M=M)
# print("--- %s seconds ---" % (time.time() - start_time))
# Add labels back into array
u = np.ones((n,))
u[idx] = v
u[I] = g
return u
# Returns n random points in R^d
def rand(n, d):
return random.rand(n, d)
# Returns n random points in annulus (r1,r2)
def rand_annulus(n, d, r1, r2):
N = 0
X = np.zeros((1, d))
while X.shape[0] <= n:
Y = r2 * (2 * rand(n, d) - 1)
dist2 = np.sum(Y * Y, axis=1)
I = (dist2 < r2 * r2) & (dist2 > r1 * r1)
Y = Y[I, :]
X = np.vstack((X, Y))
X = X[1:(n + 1)]
return X
# Returns n random points in unit ball in R^d
def rand_ball(n, d):
N = 0
X = np.zeros((1, d))
while X.shape[0] <= n:
Y = 2 * rand(n, d) - 1
I = np.sum(Y * Y, axis=1) < 1
Y = Y[I, :]
X = np.vstack((X, Y))
X = X[1:(n + 1)]
return X
def randn(n, d):
X = np.zeros((n, d))
for i in range(d):
X[:, i] = np.random.normal(0, 1, n)
return X
def bean_data(n, h):
# n = number of points
# h = height of bridge (h=0.2)
a = -1
b = 1
x = a + (b - a) * random.rand(3 * n);
c = -0.6
d = 0.6;
y = c + (d - c) * random.rand(3 * n);
X = np.transpose(np.vstack((x, y)))
dist_from_x_axis = 0.4 * np.sqrt(1 - x ** 2) * (1 + h - np.cos(3 * x))
in_bean = abs(y) <= dist_from_x_axis
X = X[in_bean, :]
if X.shape[0] < n:
print('Not enough samples');
else:
X = X[:n, :]
return X
def mesh(X):
T = spatial.Delaunay(X[:, :2]);
return T.simplices
def box_mesh(X, u=None):
n = X.shape[0]
d = X.shape[1]
if d > 2:
X = X[:, 0:2]
x1 = X[:, 0].min()
x2 = X[:, 0].max()
y1 = X[:, 1].min()
y2 = X[:, 1].max()
corners = np.array([[x1, y1], [x2, y2], [x1, y2], [x2, y1]])
X = np.append(X, corners, axis=0)
Tri = mesh(X)
if u is not None:
u = np.append(u, [0, 0, 0, 0])
for i in range(n, n + 4):
I = (Tri[:, 0] == i) | (Tri[:, 1] == i) | (Tri[:, 2] == i)
nn_tri = Tri[I, :].flatten()
nn_tri = np.unique(nn_tri[nn_tri < n])
u[i] = np.mean(u[nn_tri])
# u[i] = np.max(u[nn_tri])
return X, Tri, u
else:
return X, Tri
# Triangulation of domain
def improved_mesh(X):
n = X.shape[0]
d = X.shape[1]
if d > 2:
X = X[:, 0:2]
# Normalize data to unit box
x1 = X[:, 0].min()
x2 = X[:, 0].max()
y1 = X[:, 1].min()
y2 = X[:, 1].max()
X = X - [x1, y1]
X[:, 0] = X[:, 0] / (x2 - x1)
X[:, 1] = X[:, 1] / (y2 - y1)
# Add padding data around
pad = 10 / np.sqrt(n)
m = int(pad * n)
Y = rand(m, 2)
Y[:, 0] = Y[:, 0] * pad - pad
Z = np.vstack((X, Y))
Y = rand(m, 2)
Y[:, 0] = Y[:, 0] * pad + 1
Z = np.vstack((Z, Y))
Y = rand(m, 2)
Y[:, 1] = Y[:, 1] * pad - pad
Z = np.vstack((Z, Y))
Y = rand(m, 2)
Y[:, 1] = Y[:, 1] * pad + 1
Z = np.vstack((Z, Y))
# Delaunay triangulation
T = spatial.Delaunay(Z);
Tri = T.simplices
J = np.sum(Tri >= n, axis=1) == 0;
Tri = Tri[J, :]
return Tri
def plot(X, u):
Tri = mesh(X)
import mayavi.mlab as mlab
mlab.triangular_mesh(X[:, 0], X[:, 1], u, Tri)
mlab.view(azimuth=-45, elevation=60)
# Laplace learning
# Zhu, Xiaojin, <NAME>, and <NAME>. "Semi-supervised learning using gaussian fields and harmonic functions." Proceedings of the 20th International conference on Machine learning (ICML-03). 2003.
def laplace_solve(W, I, g, norm="none"):
L = graph_laplacian(W, norm=norm)
return constrained_solve(L, I, g)
# Shift trick
# W = Weight matrix
# I = indices of labels
# g = +1/-1 values of labels
def shift_solve(W, I, g):
# Laplace learning
u = laplace_solve(W, I, g)
# Shift solution
s = degrees(W)
c = np.sum(s[I] * g) / sum(s[I])
u = u - c
u = u - np.mean(u)
return u
# Shift trick by mean
# W = Weight matrix
# I = indices of labels
# g = +1/-1 values of labels
def meanshift_solve(W, I, g):
# Laplace learning
u = laplace_solve(W, I, g)
# Center solution
u = u - np.mean(u)
return u
# Reweights the weight matrix for WNLL
def wnll(W, I):
n = W.shape[0]
m = len(I)
a = np.ones((n,))
a[I] = n / m
D = sparse.spdiags(a, 0, n, n).tocsr()
W = D * W + W * D
return W
# Weighted nonlocal Laplacian
# Shi, Zuoqiang, <NAME>, and <NAME>. "Weighted nonlocal laplacian on interpolation from sparse data." Journal of Scientific Computing 73.2-3 (2017): 1164-1177.
def wnll_solve(W, I, g):
n = W.shape[0]
W = wnll(W, I)
L = graph_laplacian(W, norm="none")
return constrained_solve(L, I, g)
# Properly weighted Laplacian
# Calder, Jeff, and <NAME>. "Properly-weighted graph Laplacian for semi-supervised learning." arXiv preprint arXiv:1810.04351 (2018).
def properlyweighted_solve(W, I, g, X, alpha, zeta, r):
n = W.shape[0]
rzeta = r / (zeta - 1) ** (1 / alpha)
Xtree = spatial.cKDTree(X[I, :])
D, J = Xtree.query(X)
D[D < rzeta] = rzeta
gamma = 1 + (r / D) ** alpha
D = sparse.spdiags(gamma, 0, n, n).tocsr()
L = graph_laplacian(D * W + W * D, norm="none")
return constrained_solve(L, I, g)
# Game theoretic p-Laplace learning
# Rios, <NAME>, <NAME>, and <NAME>. "Algorithms for $\ell_p$-based semi-supervised learning on graphs." arXiv preprint arXiv:1901.05031 (2019).
def plaplace_solve(W, I, g, p, sol_method="SemiImplicit", norm="none"):
# start_time = time.time()
n = W.shape[0]
W = W / W.max()
if p == float("inf"):
alpha = 0
delta = 1
else:
alpha = 1 / p
delta = 1 - 2 / p
dx = degrees(W)
theta = 1.2 * (2 * alpha + np.max(dx) * delta)
if p == float("inf"):
beta = 1
gamma = 1 / theta
else:
beta = (theta * p - 2) / (theta * p)
gamma = (p - 2) / (theta * p - 2)
if norm == "normalized":
deg = dx[I] ** (1 / 2)
g = g / deg
L = graph_laplacian(W)
u = constrained_solve(L, I, g)
uu = np.max(g) * np.ones((n,))
ul = np.min(g) * np.ones((n,))
WI, WJ, WV = sparse.find(W)
# Set labels
u[I] = g
uu[I] = g
ul[I] = g
# Time step for gradient descent
dt = 0.9 / (alpha + 2 * delta)
if sol_method == "GradientDescentCcode":
try:
import cmodules.cgraphpy as cgp
except:
print("cgraphpy cmodule not found. You may just need to compile it.")
sys.exit()
# Type casting and memory blocking
uu = np.ascontiguousarray(uu, dtype=np.float64)
ul = np.ascontiguousarray(ul, dtype=np.float64)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WJ = np.ascontiguousarray(WJ, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float64)
I = np.ascontiguousarray(I, dtype=np.int32)
g = np.ascontiguousarray(g, dtype=np.float64)
cgp.lp_iterate(uu, ul, WI, WJ, WV, I, g, p, 1e6, 1e-1, 0.0)
u = (uu + ul) / 2
# Check residual
L2uu = -L * uu
LIuu = graph_infinity_laplacian(W, uu, I=WI, J=WJ, V=WV)
resu = alpha * L2uu / dx + delta * LIuu
resu[I] = 0
L2ul = -L * ul
LIul = graph_infinity_laplacian(W, ul, I=WI, J=WJ, V=WV)
resl = alpha * L2ul / dx + delta * LIul
resl[I] = 0
# print('Upper residual = %f' % np.max(np.absolute(resu)))
# print('Lower residual = %f' % np.max(np.absolute(resl)))
else:
err = 1e6
i = 0
while err > 1e-1:
i += 1
# Graph laplacians
L2u = -L * u
LIu = graph_infinity_laplacian(W, u, I=WI, J=WJ, V=WV)
# Residual error
res = alpha * L2u / dx + delta * LIu
res[I] = 0
# err = np.max(np.absolute(res))
# print("Residual error = "+str(err))
# Update
if sol_method == "GradientDescent":
L2uu = -L * uu
LIuu = graph_infinity_laplacian(W, uu, I=WI, J=WJ, V=WV)
res = alpha * L2uu / dx + delta * LIuu
res[I] = 0
uu = uu + dt * res
err = np.max(np.absolute(res))
# print("Upper residual = "+str(err))
L2ul = -L * ul
LIul = graph_infinity_laplacian(W, ul, I=WI, J=WJ, V=WV)
res = alpha * L2ul / dx + delta * LIul
res[I] = 0
ul = ul + dt * res
err = np.max(np.absolute(res))
# print("Lower residual = "+str(err))
err1 = np.max(uu - ul)
err2 = np.min(uu - ul)
# print("Residual error = "+str(err1)+","+str(err2))
err = err1
u = (uu + ul) / 2
elif sol_method == "SemiImplicit":
rhs = beta * (2 * gamma * dx * LIu - L2u)
u = constrained_solve(L, I, g, f=rhs, x0=u, tol=err / 100)
else:
print("Invalid p-Laplace solution method.")
sys.exit()
if norm == "normalized":
deg = dx ** (1 / 2)
u = u * deg
# print("--- %s seconds ---" % (time.time() - start_time))
return u
# Gradient of function on graph
# W = sparse weight matrix
# u = function on graph
def graph_gradient(W, u, I=None, J=None, V=None):
n = W.shape[0]
if I is None or J is None:
I, J, V = sparse.find(W)
G = sparse.coo_matrix((V * (u[J] - u[I]), (I, J)), shape=(n, n)).tocsr()
return G
# Divergence of vector field F (F should be skew-symmetric)
# F = sparse matrix representing vector field
def graph_divergence(F, W):
F = F.multiply(W)
return 2 * np.squeeze(np.array(np.sum(F, axis=1)))
# Random-walk SSL
# Zhou, Dengyong, et al. "Learning with local and global consistency." Advances in neural information processing systems. 2004.
def randomwalk_solve(W, I, g, epsilon):
n = W.shape[0]
# Zero diagonals
W = W - sparse.spdiags(W.diagonal(), 0, n, n)
# Construct Laplacian matrix
Dinv2 = degree_matrix(W, p=-1 / 2)
L = sparse.identity(n) - (1 - epsilon) * Dinv2 * W * Dinv2;
# Format right hand side
b = np.zeros((n,))
b[I] = g
return pcg_solve(L, b)
# Computes accuracy of labeling
# m = number of labeled points used
def accuracy(L, L_true, m):
# Remove unlabeled nodes
I = L_true >= 0
L = L[I]
L_true = L_true[I]
# Compute accuracy
return 100 * np.maximum(np.sum(L == L_true) - m, 0) / (len(L) - m)
# Projects all columns of (kxn) matrix X onto k-simplex
def ProjectToSimplex(X):
n = X.shape[1]
k = X.shape[0]
Xs = -np.sort(-X, axis=0) # Sort descending
A = np.tril(np.ones((k, k)))
Sum = A @ Xs
Max = np.transpose((np.transpose(Sum) - 1) / (np.arange(k) + 1))
Xs[:-1, :] = Xs[1:, :]
Xs[-1, :] = (Sum[k - 1, :] - 1) / k
I = np.argmax(Max >= Xs, axis=0)
X = np.maximum(X - Max[I, range(n)], 0)
return X
# Takes list of labels and converts to vertices of simplex format
def LabelsToVec(L):
n = L.shape[0]
labels = np.unique(L)
k = len(labels)
for i in range(k):
L[L == labels[i]] = i
L = L.astype(int)
X = np.zeros((k, n))
X[L, range(n)] = 1
return X, labels
# Projects all rows of (nxk) matrix X to closest vertex of the simplex
# Assume X already lives in the simplex, e.g., is the output of ProjectToSimplex
def ClosestVertex(X):
n = X.shape[1]
k = X.shape[0]
L = np.argmax(X, axis=0)
X = np.zeros((k, n))
X[L, range(n)] = 1
return X
# Threshold with temperature to closest vertex
def ClosestVertexTemp(X, T=0.01):
n = X.shape[1]
k = X.shape[0]
beta = 1 / T
Y = np.exp(beta * X)
Ysum = np.sum(Y, axis=0)
Y = Y / Ysum
X[0, :] = Y[0, :]
for i in range(1, k):
X[i, :] = X[i - 1, :] + Y[i, :]
R = random.rand(n, 1)
L = np.sum(R.flatten() > X, axis=0)
X = np.zeros((k, n))
X[L, range(n)] = 1
return X
# Volume MBO, initialized with Poisson
def poisson_volumeMBO(W, I, g, dataset, beta, T, volume_mult):
# Set diagonal entries to zero
W = diag_multiply(W, 0)
try:
import cmodules.cgraphpy as cgp
except:
print("cgraphpy cmodule not found. You may just need to compile it.")
sys.exit()
# Solve Poisson problem and compute labels
u, _ = poisson(W, I, g)
max_locations = np.argmax(u, axis=0)
u = (np.unique(g))[max_locations]
n = W.shape[0]
k = len(np.unique(g))
WI, WJ, WV = sparse.find(W)
# Class counts
ClassCounts = (n * beta).astype(int)
# Type casting and memory blocking
u = np.ascontiguousarray(u, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WJ = np.ascontiguousarray(WJ, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float32)
I = np.ascontiguousarray(I, dtype=np.int32)
g = np.ascontiguousarray(g, dtype=np.int32)
ClassCounts = np.ascontiguousarray(ClassCounts, dtype=np.int32)
cgp.volume_mbo(u, WI, WJ, WV, I, g, ClassCounts, k, 0.0, T, volume_mult)
# Set given labels and convert to vector format
u[I] = g
u, _ = LabelsToVec(u)
return u
# Volume MBO (Jacobs, et al.)
def volumeMBO(W, I, g, dataset, beta, T, volume_mult):
# Set diagonal entries to zero
W = diag_multiply(W, 0)
try:
import cmodules.cgraphpy as cgp
except:
print("cgraphpy cmodule not found. You may just need to compile it.")
sys.exit()
n = W.shape[0]
k = len(np.unique(g))
u = np.zeros((n,))
WI, WJ, WV = sparse.find(W)
# Class counts
ClassCounts = (n * beta).astype(int)
# Type casting and memory blocking
u = np.ascontiguousarray(u, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WJ = np.ascontiguousarray(WJ, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float32)
I = np.ascontiguousarray(I, dtype=np.int32)
g = np.ascontiguousarray(g, dtype=np.int32)
ClassCounts = np.ascontiguousarray(ClassCounts, dtype=np.int32)
cgp.volume_mbo(u, WI, WJ, WV, I, g, ClassCounts, k, 1.0, T, volume_mult)
# Set given labels and convert to vector format
u[I] = g
u, _ = LabelsToVec(u)
return u
# Multiclass MBO
# Garcia-Cardona, Cristina, et al. "Multiclass data segmentation using diffuse interface methods on graphs." IEEE transactions on pattern analysis and machine intelligence 36.8 (2014): 1600-1613.
def multiclassMBO(W, I, g, eigvals, eigvecs, dataset, true_labels=None):
n = W.shape[0]
k = len(np.unique(g))
Ns = 6
if dataset == 'MNIST' or dataset == 'FashionMNIST' or dataset == 'cifar':
dt = 0.15
mu = 50
elif dataset == 'WEBKB':
dt = 1
mu = 4
else:
print('Dataset not supported by MBO...')
sys.exit(2)
# Load eigenvalues and eigenvectors
X = eigvecs
num_eig = len(eigvals)
# Form matrices
V = np.diag(1 / (1 + (dt / Ns) * eigvals))
Y = X @ V
Xt = np.transpose(X)
# Random initial labeling
u = random.rand(k, n)
u = ProjectToSimplex(u)
# Set initial known labels
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
u = Kg + (1 - J) * u
# Maximum number of iterations
T = 10
for i in range(T):
for s in range(Ns):
Z = (u - (dt / Ns) * mu * J * (u - Kg)) @ Y
u = Z @ Xt
# Projection step
u = ProjectToSimplex(u)
u = ClosestVertex(u)
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return u
# Poisson MBO
def poissonMBO(W, I, g, dataset, beta, true_labels=None, temp=0, use_cuda=False, Ns=40, mu=1, T=50):
n = W.shape[0]
unique_labels = np.unique(g)
k = len(unique_labels)
num_labels = np.zeros((k,))
for i in range(k):
num_labels[i] = np.sum(g == unique_labels[i])
W = diag_multiply(W, 0)
if dataset == 'WEBKB':
mu = 1000
Ns = 8
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Poisson source term
c = np.sum(Kg, axis=1) / len(I)
b = np.transpose(Kg)
b[I, :] = b[I, :] - c
b = np.transpose(b)
L = graph_laplacian(W, norm='none')
# Initialize u via Poisson learning
# u = np.zeros((k,n))
# for j in range(k):
# u[j,:] = pcg_solve(L,b[j,:])
# u = mu*u
# u = np.transpose(np.transpose(u) - np.mean(u,axis=1))
u, mix_time = poisson(W, I, g, use_cuda=use_cuda, beta=beta)
# Ns = int(mix_time/4)
u = ProjectToSimplex(u)
u = ClosestVertex(u)
# Time step for stability
dt = 1 / np.max(degrees(W))
P = sparse.identity(n) - dt * L
Db = mu * dt * b
if use_cuda:
Pt = torch_sparse(P).cuda()
Dbt = torch.from_numpy(np.transpose(Db)).float().cuda()
for i in range(T):
if use_cuda:
# Put on GPU and run heat equation
ut = torch.from_numpy(np.transpose(u)).float().cuda()
for s in range(Ns):
# u = u*P + Db
ut = torch.sparse.addmm(Dbt, Pt, ut)
# Put back on CPU
u = np.transpose(ut.cpu().numpy())
else: # Use CPU
for s in range(Ns):
# u = u + dt*(mu*b - u*L)
u = u * P + Db
# Projection step
# u = np.diag(beta/num_labels)@u
u = ProjectToSimplex(u)
u = ClosestVertex(u)
u = np.transpose(np.transpose(u) - np.mean(u, axis=1) + beta)
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return u
def torch_sparse(A):
A = A.tocoo()
values = A.data
indices = np.vstack((A.row, A.col))
i = torch.LongTensor(indices)
v = torch.FloatTensor(values)
shape = A.shape
return torch.sparse.FloatTensor(i, v, torch.Size(shape))
# Sparse Label Propagation
def SparseLabelPropagation(W, I, g, true_labels=None):
n = W.shape[0]
k = len(np.unique(g))
WI, WJ, WV = sparse.find(W)
B = sparse.coo_matrix((np.ones(len(WV), ), (WI, WJ)), shape=(n, n)).tocsr() # Ones in all entries
# Construct matrix 1/2W and 1/deg
lam = 2 * W - (1 - 1e-10) * B
lam = -lam.log1p()
lam = lam.expm1() + B
Id = sparse.identity(n)
gamma = degree_matrix(W + 1e-10 * Id, p=-1)
# Random initial labeling
# u = random.rand(k,n)
u = np.zeros((k, n))
# Set initial known labels
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Initialization
Y = list()
for j in range(k):
Gu = graph_gradient(W, u[j, :], I=WI, J=WJ, V=WV)
Y.append(Gu)
# Main loop for sparse label propagation
T = 100
for i in range(T):
u_prev = np.copy(u)
# Compute div
for j in range(k):
div = graph_divergence(Y[j], W)
u[j, :] = u_prev[j, :] - gamma * div
u[j, I] = Kg[j, I] # Set labels
u_tilde = 2 * u[j, :] - u_prev[j, :]
Gu = -graph_gradient(W, u_tilde, I=WI, J=WJ, V=WV)
Y[j] = Y[j] + Gu.multiply(lam)
ind1 = B.multiply(abs(Y[j]) > 1)
ind2 = B - ind1
Y[j] = ind1.multiply(Y[j].sign()) + ind2.multiply(Y[j])
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return u
# Dynamic Label Propagation
def DynamicLabelPropagation(W, I, g, alpha=0.05, lam=0.1, true_labels=None):
n = W.shape[0]
k = len(np.unique(g))
W = diag_multiply(W, 0)
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
u, _ = LabelsToVec(K)
u = u * J
# Set initial known labels
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = np.transpose(Kg * J)
u = np.copy(Kg)
if n > 5000:
print("Cannot use Dynamic Label Propagation on large datasets.")
else:
# Setup matrices
Id = sparse.identity(n)
D = degree_matrix(W, p=-1)
P = D * W
P = np.array(P.todense())
Pt = np.copy(P)
T = 2
for i in range(T):
v = P @ u
u = Pt @ u
u[I, :] = Kg[I, :]
Pt = P @ Pt @ np.transpose(P) + alpha * v @ np.transpose(v) + lam * Id
# Compute accuracy if all labels are provided
if true_labels is not None:
u = np.array(u)
max_locations = np.argmax(u, axis=1)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('i:%d' % i + ',Accuracy = %.2f' % acc)
u = np.transpose(np.array(u))
return u
# Centered and Iterated Centered Kernel of Mai/Coulliet 2018
def CenteredKernel(W, I, g, true_labels=None):
n = W.shape[0]
k = len(np.unique(g))
W = diag_multiply(W, 0)
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = np.transpose(Kg * J)
# Center labels
c = np.sum(Kg, axis=0) / len(I)
Kg[I, :] = Kg[I, :] - c
u = np.copy(Kg)
v = np.ones((n, 1))
vt = np.ones((1, n))
e = np.random.rand(n, 1)
for i in range(100):
y = W * (e - (1 / n) * v @ (vt @ e))
w = y - (1 / n) * v @ (vt @ y) # =Ae
l = abs(np.transpose(e) @ w / (np.transpose(e) @ e))
e = w / np.linalg.norm(w)
# Number of iterations
# alpha = 5*l/4
alpha = 105 * l / 100
T = 1000
err = 1
while err > 1e-10:
y = W * (u - (1 / n) * v @ (vt @ u))
w = (1 / alpha) * (y - (1 / n) * v @ (vt @ y)) - u # Laplacian
w[I, :] = 0
err = np.max(np.absolute(w))
u = u + w
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=1)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return np.transpose(u)
def vec_acc(u, I, g, true_labels):
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
return acc
# def volume_label_projection(u,beta,s=None):
#
# k = u.shape[0]
# n = u.shape[1]
# if s is None:
# s = np.ones((k,))
# for i in range(100):
# grad = beta - np.sum(ClosestVertex(np.diag(s)@u),axis=1)/n
# err0 = np.max(np.absolute(grad))
#
# dt = 1
# snew = s + dt*grad
# gradnew = beta - np.sum(ClosestVertex(np.diag(snew)@u),axis=1)/n
# err = err0
# newerr = np.max(np.absolute(gradnew))
# while newerr < err:
# print(dt)
# dt = 2*dt
# snew = s + dt*grad
# gradnew = beta - np.sum(ClosestVertex(np.diag(snew)@u),axis=1)/n
# err = newerr
# newerr = np.max(np.absolute(gradnew))
# dt = dt/2
# snew = s + dt*grad
# gradnew = beta - np.sum(ClosestVertex(np.diag(snew)@u),axis=1)/n
# newerr = np.max(np.absolute(gradnew))
# while newerr >= err:
# print(dt)
# dt = dt/2
# snew = s + dt*grad
# gradnew = beta - np.sum(ClosestVertex(np.diag(snew)@u),axis=1)/n
# newerr = np.max(np.absolute(gradnew))
# if dt < 1:
# dt = dt/2
#
# s = s + dt*grad
#
# print(err)
# if err == 0:
# print(i)
# break
#
# #s = s + dt*(beta - beta_u)
#
# return ClosestVertex(np.diag(s)@u),s
def volume_label_projection(u, beta, s=None, dt=None):
k = u.shape[0]
n = u.shape[1]
if s is None:
s = np.ones((k,))
if dt is None:
dt = 10
# print(np.around(100*beta,decimals=1))
# print(np.around(100*np.sum(ClosestVertex(np.diag(s)@u),axis=1)/n,decimals=1))
for i in range(100):
class_size = np.sum(ClosestVertex(np.diag(s) @ u), axis=1) / n
grad = beta - class_size
# print(np.around(100*class_size,decimals=1))
# err = np.max(np.absolute(grad))
# if err == 0:
# break
s = np.clip(s + dt * grad, 0.5, 2)
# print(np.around(100*beta,decimals=1))
# print(np.around(100*np.sum(ClosestVertex(np.diag(s)@u),axis=1)/n,decimals=1))
# print(np.around(100*beta - 100*np.sum(ClosestVertex(np.diag(s)@u),axis=1)/n,decimals=4))
return ClosestVertex(np.diag(s) @ u), s
# Poisson MBO with volume constraints
def poissonMBO_volume(W, I, g, dataset, beta, true_labels=None, temp=0, use_cuda=False, Ns=40, mu=1, T=20):
n = W.shape[0]
k = len(np.unique(g))
W = diag_multiply(W, 0)
if dataset == 'WEBKB':
mu = 1000
Ns = 8
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Poisson source term
c = np.sum(Kg, axis=1) / len(I)
b = np.transpose(Kg)
b[I, :] = b[I, :] - c
b = np.transpose(b)
D = degree_matrix(W)
# L = graph_laplacian(W,norm='none')
L = D - W.transpose()
# Initialize u via Poisson learning
u, _ = poisson(W, I, g, true_labels=true_labels, use_cuda=use_cuda, beta=beta)
u = mu * u
# Time step for stability
dt = 1 / np.max(degrees(W))
P = sparse.identity(n) - dt * L
Db = mu * dt * b
if use_cuda:
Pt = torch_sparse(P).cuda()
Dbt = torch.from_numpy(np.transpose(Db)).float().cuda()
for i in range(T):
# Heat equation step
if use_cuda:
# Put on GPU and run heat equation
ut = torch.from_numpy(np.transpose(u)).float().cuda()
for j in range(Ns):
ut = torch.sparse.addmm(Dbt, Pt, ut)
# Put back on CPU
u = np.transpose(ut.cpu().numpy())
else: # Use CPU
for j in range(Ns):
u = u * P + Db
# Projection step
u, s = volume_label_projection(u, beta)
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return u
# Poisson Volume
def PoissonVolume(W, I, g, true_labels=None, use_cuda=False, training_balance=True, beta=None, min_iter=50):
# Run Poisson learning
u, _ = poisson(W, I, g, true_labels=true_labels, use_cuda=use_cuda, training_balance=training_balance, beta=beta)
# Volume constraints
_, s = volume_label_projection(u, beta)
return np.diag(s) @ u
def original_poisson(W, I, g, true_labels=None, use_cuda=False, training_balance=True, beta=None, min_iter=50):
n = W.shape[0]
unique_labels = np.unique(g)
k = len(unique_labels)
# Zero out diagonal for faster convergence
W = diag_multiply(W, 0)
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Poisson source term
c = np.sum(Kg, axis=1) / len(I)
b = np.transpose(Kg)
b[I, :] = b[I, :] - c
# Setup matrices
D = degree_matrix(W + 1e-10 * sparse.identity(n), p=-1)
# L = graph_laplacian(W,norm='none')
# P = sparse.identity(n) - D*L #Line below is equivalent when W symmetric
P = D * W.transpose()
Db = D * b
v = np.max(Kg, axis=0)
v = v / np.sum(v)
vinf = degrees(W) / np.sum(degrees(W))
RW = W.transpose() * D
u = np.zeros((n, k))
# vals, vec = sparse.linalg.eigs(RW,k=1,which='LM')
# vinf = np.absolute(vec.flatten())
# vinf = vinf/np.sum(vinf)
# Number of iterations
T = 0
if use_cuda:
Pt = torch_sparse(P).cuda()
ut = torch.from_numpy(u).float().cuda()
Dbt = torch.from_numpy(Db).float().cuda()
# start_time = time.time()
while (T < min_iter or np.max(np.absolute(v - vinf)) > 1 / n) and (T < 1000):
ut = torch.sparse.addmm(Dbt, Pt, ut)
v = RW * v
T = T + 1
# print("--- %s seconds ---" % (time.time() - start_time))
# Transfer to CPU and convert to numpy
u = ut.cpu().numpy()
else: # Use CPU
# start_time = time.time()
while (T < min_iter or np.max(np.absolute(v - vinf)) > 1 / n) and (T < 1000):
uold = u.copy()
u = Db + P * u
v = RW * v
T = T + 1
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=1)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('%d,Accuracy = %.2f' % (T, acc))
# print("--- %s seconds ---" % (time.time() - start_time))
# Balancing for training data/class size discrepancy
if training_balance:
if beta is None:
u = u @ np.diag(1 / c)
else:
u = u @ np.diag(beta / c)
return np.transpose(u), T
# Poisson learning
def poisson(W, I, g, true_labels=None, use_cuda=False, training_balance=True, beta=None, min_iter=50, error=None):
n = W.shape[0]
unique_labels = np.unique(g)
k = len(unique_labels)
if error is None:
error = np.ones(n, dtype=np.float32)
else:
error = error.reshape((n,)) / np.max(error)
# Zero out diagonal for faster convergence
W = diag_multiply(W, 0)
# Labels to vector and correct position
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Poisson source term
c = np.sum(Kg, axis=1) / len(I)
b = np.transpose(Kg)
b[I, :] = 2 * b[I, :] - 1
# Setup matrices
# D = degree_matrix(W + 1e-10 * sparse.identity(n), p=-1)
# L = graph_laplacian(W,norm='none')
# P = sparse.identity(n) - D*L #Line below is equivalent when W symmetric
v_prev = np.random.random(size=(n, 1))
residue_energy = 1
u = np.zeros((n, k))
confidence_gain = W.transpose() #* sparse.spdiags(np.power(1 + error, -1), 0, n, n)
# vals, vec = sparse.linalg.eigs(RW,k=1,which='LM')
# vinf = np.absolute(vec.flatten())
# vinf = vinf/np.sum(vinf)
# Number of iterations
T = 0
if use_cuda:
Wt = torch_sparse(confidence_gain).cuda()
ut = torch.from_numpy(u).float().cuda()
bt = torch.from_numpy(b).float().cuda()
# start_time = time.time()
while (T < min_iter or residue_energy > 1e-10) and (T < 1000):
ut = torch.sparse.addmm(bt, Wt, ut)
v = W.transpose() * v_prev
residue_energy = np.linalg.norm(v - v_prev)
v_prev = v
T = T + 1
# print("--- %s seconds ---" % (time.time() - start_time))
# Transfer to CPU and convert to numpy
u = ut.cpu().numpy()
else: # Use CPU
# start_time = time.time()
while (T < min_iter or residue_energy > 1e-6) and (T < 1000):
u = np.clip(b + confidence_gain * u, a_min=-1, a_max=1)
v = W.transpose() * v_prev
residue_energy = np.linalg.norm(v - v_prev)
v_prev = v
T = T + 1
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=1)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('%d,Accuracy = %.2f' % (T, acc))
# print("--- %s seconds ---" % (time.time() - start_time))
print(f"T: {T}, residue: {residue_energy}")
# Balancing for training data/class size discrepancy
if training_balance:
if beta is None:
u = u @ np.diag(1 / c)
else:
u = u @ np.diag(beta / c)
return np.transpose(u), T
# Poisson L1 based on Split Bregman Method
# Does not work as well as PoissonMBO
def poissonL1(W, I, g, dataset, norm="none", lam=100, mu=1000, Nouter=30, Ninner=6, true_labels=None):
n = W.shape[0]
k = len(np.unique(g))
# mu = mu*W.count_nonzero()/len(g) #Normalize constants
gamma = 1 / lam
WI, WJ, WV = sparse.find(W)
B = sparse.coo_matrix((np.ones(len(WV), ), (WI, WJ)), shape=(n, n)).tocsr() # Ones in all entries
L = graph_laplacian(2 * W.multiply(W), norm=norm)
deg = degrees(W)
dt = 1 / np.max(deg)
# Random initial labeling
# u = random.rand(k,n)
# u = ProjectToSimplex(u)
u = np.zeros((k, n))
# Set initial known labels
J = np.zeros(n, )
K = np.ones(n, ) * g[0]
J[I] = 1
K[I] = g
Kg, _ = LabelsToVec(K)
Kg = Kg * J
# Poisson parameters
c = np.sum(Kg, axis=1) / len(I)
b = np.transpose(Kg)
b[I, :] = b[I, :] - c
b = (mu / lam) * np.transpose(b)
# Initialize u via Poisson learning
u = np.zeros((k, n))
L = graph_laplacian(W, norm='none')
for j in range(k):
u[j, :] = pcg_solve(L, b[j, :])
u = np.transpose(np.transpose(u) - np.mean(u, axis=1))
# Initialization
V = list()
R = list()
gradu = list()
for j in range(k):
Gu = graph_gradient(W, u[j, :], I=WI, J=WJ, V=WV)
gradu.append(Gu)
V.append(Gu)
R.append(Gu)
# Main loop for Split Bregman iteration
for i in range(Nouter):
print('Outer:%d' % i)
for s in range(Ninner):
normV = 0 * W
for j in range(k):
divVR = graph_divergence(R[j] - V[j], W)
u[j, :] = pcg_solve(L, b[j, :] + divVR, x0=u[j, :], tol=1e-10)
# for s in range(100):
# u[j,:] = u[j,:] + dt*(b[j,:] + divVR - u[j,:]*L)
gradu[j] = graph_gradient(W, u[j, :], I=WI, J=WJ, V=WV)
V[j] = gradu[j] + R[j]
normV = normV + V[j].multiply(V[j])
normV = normV.sqrt()
# Shrinkage operation
# normV^{-1} for nonzero entries (tricky to do in sparse format)
# normV.eliminate_zeros(X)
normVinv = normV - (1 - 1e-10) * B
normVinv = -normVinv.log1p()
normVinv = normVinv.expm1() + B
C = normV.multiply(normVinv)
# print(np.sum(C>0))
# print(np.sum(C>0.9999))
# Compute shrinkage factor
# print(np.sum(normV>0))
shrink = normV - gamma * B
shrink = shrink.maximum(0)
# print(np.sum(shrink>0))
shrink = shrink.multiply(normVinv)
# Apply shrinkage
for j in range(k):
V[j] = V[j].multiply(shrink)
for j in range(k):
R[j] = R[j] + gradu[j] - V[j]
# Compute accuracy if all labels are provided
if true_labels is not None:
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
labels[I] = g
acc = accuracy(labels, true_labels, len(I))
print('Accuracy = %.2f' % acc)
return u
# Heap functions
# d = values in heap (indexed by graph vertex)
# h = heap (contains indices of graph elements in heap)
# p = pointers from graph back to heap (are updated with heap operations)
# s = number of elements in heap
# Sift up
# i = heap index of element to be sifted up
def SiftUp(d, h, s, p, i):
pi = int(i / 2) # Parent index in heap
while pi != 0:
if d[h[pi]] > d[h[i]]: # If parent larger, then swap
# Swap in heap
tmp = h[pi]
h[pi] = h[i]
h[i] = tmp
# Update pointers to heap
p[h[i]] = i
p[h[pi]] = pi
# Update parent/child indices
i = pi
pi = int(i / 2)
else:
pi = 0
# Sift down
# i = heap index of element to be sifted down
def SiftDown(d, h, s, p, i):
ci = 2 * i # child index in heap
while ci <= s:
if d[h[ci + 1]] < d[h[ci]] and ci + 1 <= s: # Choose smallest child
ci = ci + 1
if d[h[ci]] < d[h[i]]: # If child smaller, then swap
# Swap in heap
tmp = h[ci]
h[ci] = h[i]
h[i] = tmp
# Update pointers to heap
p[h[i]] = i
p[h[ci]] = ci
# Update parent/child indices
i = ci
ci = 2 * i
else:
ci = s + 1
# Pop smallest off of heap
# Returns index of smallest and size of new heap
def PopHeap(d, h, s, p):
# Index of smallest in heap
i = h[1]
# Put last element on top of heap
h[1] = h[s]
# Update pointer
p[h[1]] = 1
# Sift down the heap
SiftDown(d, h, s - 1, p, 1)
return i, s - 1
# Push element onto heap
# i = Graph index to add to heap
def PushHeap(d, h, s, p, i):
h[s + 1] = i # add to heap at end
p[i] = s + 1 # Update pointer to heap
SiftUp(d, h, s + 1, p, s + 1)
return s + 1
def stencil_solver(ui, u, w=None):
if w is None:
w = np.ones((len(u),))
m = len(u)
# Sort neighbors
I = np.argsort(u)
u = u[I]
w = w[I]
f = np.zeros((m + 1,))
for i in range(m):
f[i] = np.sum(np.maximum(u[i] - u, 0) ** 2)
f[m] = np.maximum(1, f[m - 1])
k = np.argmin(f < 1)
b = np.sum(u[:k])
c = np.sum(u[:k] ** 2)
t = (b + np.sqrt(b * b - k * c + k)) / k
check = np.sum(np.maximum(t - u, 0) ** 2)
if (abs(check - 1) > 1e-5):
print("Error")
return t
# return np.min(u) + 1
# C code version of dijkstra
def cDijkstra(W, I, g, WI=None, WJ=None, K=None):
n = W.shape[0]
k = len(I)
u = np.ones((n,)) * 1e10 # HJ Solver
l = -np.ones((n,), dtype=int) # Index of closest label
if (WI == None) or (WJ == None) or (K == None):
# Reformat weight matrix W into form more useful for Dijkstra
WI, WJ, WV = sparse.find(W)
K = np.array((WJ[1:] - WJ[:-1]).nonzero()) + 1
K = np.append(0, np.append(K, len(WJ)))
try: # Try to use fast C version, if compiled
import cmodules.cgraphpy as cgp
# Type casting and memory blocking
u = np.ascontiguousarray(u, dtype=np.float64)
l = np.ascontiguousarray(l, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float64)
K = np.ascontiguousarray(K, dtype=np.int32)
I = np.ascontiguousarray(I, dtype=np.int32)
g = np.ascontiguousarray(g, dtype=np.float64)
cgp.dijkstra(u, l, WI, K, WV, I, g, 1.0)
except:
print("You need to compile the cmodules!")
sys.exit(2)
return u
# Solve a general HJ equation with fast marching
def HJsolver(W, I, g, WI=None, WJ=None, K=None, p=1):
n = W.shape[0]
k = len(I)
u = np.ones((n,)) * 1e10 # HJ Solver
l = -np.ones((n,), dtype=int) # Index of closest label
if (WI == None) or (WJ == None) or (K == None):
# Reformat weight matrix W into form more useful for Dijkstra
WI, WJ, WV = sparse.find(W)
K = np.array((WJ[1:] - WJ[:-1]).nonzero()) + 1
K = np.append(0, np.append(K, len(WJ)))
try: # Try to use fast C version, if compiled
import cmodules.cgraphpy as cgp
# Type casting and memory blocking
u = np.ascontiguousarray(u, dtype=np.float64)
l = np.ascontiguousarray(l, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float64)
K = np.ascontiguousarray(K, dtype=np.int32)
I = np.ascontiguousarray(I, dtype=np.int32)
g = np.ascontiguousarray(g, dtype=np.int32)
cgp.HJsolver(u, l, WI, K, WV, I, g, 1.0, p, 1.0)
except:
# Initialization
s = 0 # Size of heap
h = -np.ones((n + 1,), dtype=int) # Active points heap (indices of active points)
A = np.zeros((n,), dtype=bool) # Active flag
p = -np.ones((n,), dtype=int) # Pointer back to heap
V = np.zeros((n,), dtype=bool) # Finalized flag
l = -np.ones((n,), dtype=int) # Index of closest label
# Build active points heap and set distance = 0 for initial points
for i in range(k):
s = PushHeap(u, h, s, p, I[i])
u[I[i]] = g[i] # Initialize distance to zero
A[I[i]] = True # Set active flag to true
l[I[i]] = I[i] # Set index of closest label
# Dijkstra's algorithm
while s > 0:
i, s = PopHeap(u, h, s, p) # Pop smallest element off of heap
# Finalize this point
V[i] = True # Mark as finalized
A[i] = False # Set active flag to false
# Update neighbors (the code below is wrong: compare against C sometime)
for j in WI[K[i]:K[i + 1]]:
if j != i and V[j] == False:
nn_ind = WI[K[j]:K[j + 1]]
w_vals = WV[K[j]:K[j + 1]]
u_vals = u[nn_ind]
u_tmp = stencil_solver(u[j], u_vals, w=w_vals)
if A[j]: # If j is already active
if u_tmp < u[j]: # Need to update heap
u[j] = u_tmp
SiftUp(u, h, s, p, p[j])
l[j] = l[i]
else: # If j is not active
# Add to heap and initialize distance, active flag, and label index
s = PushHeap(u, h, s, p, j)
u[j] = u_tmp
A[j] = True
l[j] = l[i]
return u
# eikonal classifier
def eikonalSSL(W, I, g, p=2, beta=None):
k = len(I) # Number of labels
n = W.shape[0] # Number of datapoints
d = np.zeros((n,)) # Distance function
l = -np.ones((n,), dtype=int) # Index of closest label
# Reformat weight matrix W into form more useful for Dijkstra
WI, WJ, WV = sparse.find(W)
K = np.array((WJ[1:] - WJ[:-1]).nonzero()) + 1
K = np.append(0, np.append(K, len(WJ)))
c_code = False
try: # Try to use fast C version, if compiled
import cmodules.cgraphpy as cgp
# Type casting and memory blocking
d = np.ascontiguousarray(d, dtype=np.float64)
l = np.ascontiguousarray(l, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float64)
K = np.ascontiguousarray(K, dtype=np.int32)
I = np.ascontiguousarray(I, dtype=np.int32)
c_code = True
except:
c_code = False
labels = np.unique(g)
numl = len(labels)
u = np.zeros((numl, n))
for i in range(numl):
ind = I[g == labels[i]]
lab = np.zeros((len(ind),))
if c_code:
ind = np.ascontiguousarray(ind, dtype=np.int32)
lab = np.ascontiguousarray(lab, dtype=np.int32)
cgp.HJsolver(d, l, WI, K, WV, ind, lab, 1.0, p, 0.0)
u[i, :] = -d
else:
u[i, :] = -HJsolver(W, ind, lab, WI=WI, WV=WV, K=K, p=p)
if beta is not None:
_, s = volume_label_projection(u, beta, dt=-0.5)
u = np.diag(s) @ u
return u
# Nearest neighbor classifier (graph geodesic distance)
def nearestneighbor(W, I, g):
k = len(I) # Number of labels
n = W.shape[0] # Number of datapoints
d = np.ones((n,)) * 1e10 # Distance function
l = -np.ones((n,), dtype=int) # Index of closest label
# Reformat weight matrix W into form more useful for Dijkstra
WI, WJ, WV = sparse.find(W)
K = np.array((WJ[1:] - WJ[:-1]).nonzero()) + 1
K = np.append(0, np.append(K, len(WJ)))
try: # Try to use fast C version of dijkstra, if compiled
import cmodules.cgraphpy as cgp
# Type casting and memory blocking
d = np.ascontiguousarray(d, dtype=np.float64)
l = np.ascontiguousarray(l, dtype=np.int32)
WI = np.ascontiguousarray(WI, dtype=np.int32)
WV = np.ascontiguousarray(WV, dtype=np.float64)
K = np.ascontiguousarray(K, dtype=np.int32)
I = np.ascontiguousarray(I, dtype=np.int32)
init = np.ascontiguousarray(np.zeros_like(I), dtype=np.float64)
cgp.dijkstra(d, l, WI, K, WV, I, init, 1.0)
except: # Use python version, which is slower
# Initialization
s = 0 # Size of heap
h = -np.ones((n + 1,), dtype=int) # Active points heap (indices of active points)
A = np.zeros((n,), dtype=bool) # Active flag
p = -np.ones((n,), dtype=int) # Pointer back to heap
V = np.zeros((n,), dtype=bool) # Finalized flag
# Build active points heap and set distance = 0 for initial points
for i in range(k):
d[I[i]] = 0 # Initialize distance to zero
A[I[i]] = True # Set active flag to true
l[I[i]] = I[i] # Set index of closest label
s = PushHeap(d, h, s, p, I[i])
# Dijkstra's algorithm
while s > 0:
i, s = PopHeap(d, h, s, p) # Pop smallest element off of heap
# Finalize this point
V[i] = True # Mark as finalized
A[i] = False # Set active flag to false
# Update neighbors
# for j in WI[K[i]:K[i+1]]:
for jj in range(K[i], K[i + 1]):
j = WI[jj]
if j != i and V[j] == False:
if A[j]: # If j is already active
tmp_dist = d[i] + WV[jj]
if tmp_dist < d[j]: # Need to update heap
d[j] = tmp_dist
SiftUp(d, h, s, p, p[j])
l[j] = l[i]
else: # If j is not active
# Add to heap and initialize distance, active flag, and label index
d[j] = d[i] + WV[jj]
A[j] = True
l[j] = l[i]
s = PushHeap(d, h, s, p, j)
# Set labels based on nearest neighbor
u = np.zeros((n,))
u[I] = g
u, _ = LabelsToVec(u[l])
return u
# Computes accuracy of clustering
def clustering_accuracy(L, L_true):
unique_classes = np.unique(L_true)
num_classes = len(unique_classes)
C = np.zeros((num_classes, num_classes), dtype=float)
for i in range(num_classes):
for j in range(num_classes):
C[i][j] = np.sum((L == i) & (L_true != j))
row_ind, col_ind = opt.linear_sum_assignment(C)
return 100 * (1 - C[row_ind, col_ind].sum() / len(L))
# Spectral embedding
# Projects the graph to R^k via spectral projection
# Method can be 'unnormalized', 'ShiMalik', or 'NgJordanWeiss'
def spectral_embedding(W, k, method='NgJordanWeiss'):
n = W.shape[0]
if method == 'unnormalized':
L = graph_laplacian(W, norm='none')
vals, vec = sparse.linalg.eigs(L, k=k, which='SM')
vec = vec.real
vals = vals.real
elif method == 'ShiMalik':
D = degree_matrix(W)
L = graph_laplacian(W, norm='none')
vals, vec = sparse.linalg.eigs(L, M=D, k=k, which='SM')
vec = vec.real
vals = vals.real
elif method == 'NgJordanWeiss':
L = graph_laplacian(W, norm='normalized')
vals, vec = sparse.linalg.eigs(L, k=k, which='SM')
vec = vec.real
vals = vals.real
norms = np.sum(vec * vec, axis=1)
T = sparse.spdiags(norms ** (-1 / 2), 0, n, n)
vec = T @ vec # Normalize rows
return vec
def kmeans(X, k):
KM = cluster.KMeans(n_clusters=k).fit(X)
return KM.labels_
# Spectral Clustering
def spectral_cluster(W, k, method='NgJordanWeiss', extra_dim=0):
V = spectral_embedding(W, k + extra_dim, method=method)
kmeans = cluster.KMeans(n_clusters=k).fit(V)
# V = spectral_embedding(W,k,method=method)
# kmeans = cluster.KMeans(n_clusters=k).fit(V)
return kmeans.labels_
# INCRES clustering
# Bresson, Xavier, et al. "An incremental reseeding strategy for clustering." International Conference on Imaging, Vision and Learning based on Optimization and PDEs. Spr<NAME>, 2016.
# W = weight matrix
def incres_cluster(W, k, speed, T, labels):
n = W.shape[0]
# Increment
Dm = np.maximum(int(speed * 1e-4 * n / k), 1)
# Random initial labeling
u = random.randint(0, k, size=n)
# Initialization
F = np.zeros((n, k))
J = np.arange(n).astype(int)
# Random walk transition
D = degree_matrix(W, p=-1)
P = W * D
m = int(1)
for i in range(T):
# Plant
F.fill(0)
for r in range(k):
I = u == r
ind = J[I]
F[ind[random.choice(np.sum(I), m)], r] = 1
# Grow
while np.min(F) == 0:
F = P * F
# Harvest
u = np.argmax(F, axis=1)
# Increment
m = m + Dm
# Compute accuracy
if labels is not None:
acc = clustering_accuracy(u, labels)
print("Iteration " + str(i) + ": Accuracy = %.2f" % acc + "%%, #seeds= %d" % m)
return u
# Check if graph is connected
def isconnected(W):
num_comp, comp = csgraph.connected_components(W)
if num_comp == 1:
return True
else:
return False
# Graph-based clustering
# W = sparse weight matrix describing graph
# method = SSL method
# Options: incres
def graph_clustering(W, k, true_labels=None, method="incres", speed=5, T=100, extra_dim=0):
n = W.shape[0]
# Symmetrize W, if not already symmetric
W = (W + W.transpose()) / 2
# Check if connected
if not isconnected(W):
print('Warning: Graph is not connected!')
# Clustering
if method == "incres":
labels = incres_cluster(W, k, speed, T, true_labels)
elif method == "spectral":
labels = spectral_cluster(W, k, method="unnormalized", extra_dim=extra_dim)
elif method == "spectralshimalik":
labels = spectral_cluster(W, k, method="ShiMalik", extra_dim=extra_dim)
elif method == "spectralngjordanweiss":
labels = spectral_cluster(W, k, method="NgJordanWeiss", extra_dim=extra_dim)
else:
print("Invalid choice of clustering method.")
sys.exit()
return labels
# Graph-based semi-supervised learning
# W = sparse weight matrix describing graph
# I = indices of labeled datapoints
# g = values of labels
# method = SSL method
# Options: laplace, poisson, poisson_nodeg, wnll, properlyweighted, plaplace, randomwalk
def graph_ssl(W, I, g, D=None, Ns=40, mu=1, numT=50, beta=None, method="laplace", p=3, volume_mult=0.5, alpha=2,
zeta=1e7, r=0.1, epsilon=0.05, X=None, plaplace_solver="GradientDescentCcode", norm="none",
true_labels=None, eigvals=None, eigvecs=None, dataset=None, T=0, use_cuda=False, return_vector=False,
poisson_training_balance=True, symmetrize=True, error=None):
one_shot_methods = ["mbo", "poisson", "poissonbalanced", "poissonvolume", "poissonmbo_volume", "poissonmbo",
"poissonl1", "nearestneighbor", "poissonmbobalanced", "volumembo", "poissonvolumembo",
"dynamiclabelpropagation", "sparselabelpropagation", "centeredkernel", "eikonal"]
n = W.shape[0]
method = method.lower()
if beta is None:
beta = np.ones((len(np.unique(g)),))
# Symmetrize D,W, if not already symmetric
if symmetrize:
W = (W + W.transpose()) / 2
if D is not None:
D = sparse_max(D, D.transpose())
if not isconnected(W):
print('Warning: Graph is not connected!')
# One shot methods
if method in one_shot_methods:
if method == "mbo":
u = multiclassMBO(W, I, g, eigvals, eigvecs, dataset, true_labels=true_labels)
elif method == "volumembo":
u = volumeMBO(W, I, g, dataset, beta, T, volume_mult)
elif method == "poissonvolumembo":
u = poisson_volumeMBO(W, I, g, dataset, beta, T, volume_mult)
elif method == "poissonmbo_old":
u = poissonMBO(W, I, g, dataset, np.ones_like(beta), true_labels=true_labels, temp=T, use_cuda=use_cuda,
Ns=Ns, mu=mu, T=numT)
elif method == "poissonmbobalanced":
u = poissonMBO(W, I, g, dataset, beta, true_labels=true_labels, temp=T, use_cuda=use_cuda, Ns=Ns, mu=mu,
T=numT)
elif method == "poissonl1":
u = poissonL1(W, I, g, dataset, true_labels=true_labels)
elif method == "poisson":
u, _ = poisson(W, I, g, true_labels=true_labels, use_cuda=use_cuda,
training_balance=poisson_training_balance, error=error)
elif method == "poissonbalanced":
u, _ = poisson(W, I, g, true_labels=true_labels, use_cuda=use_cuda,
training_balance=poisson_training_balance, beta=beta)
elif method == "poissonvolume":
u = PoissonVolume(W, I, g, true_labels=true_labels, use_cuda=use_cuda,
training_balance=poisson_training_balance, beta=beta)
elif method == "poissonmbo":
u = poissonMBO_volume(W, I, g, dataset, beta, true_labels=true_labels, temp=T, use_cuda=use_cuda, Ns=Ns,
mu=mu)
elif method == "dynamiclabelpropagation":
u = DynamicLabelPropagation(W, I, g, true_labels=true_labels)
elif method == "sparselabelpropagation":
u = SparseLabelPropagation(W, I, g, true_labels=true_labels)
elif method == "centeredkernel":
u = CenteredKernel(W, I, g, true_labels=true_labels)
elif method == "nearestneighbor":
# Use distance matrix if provided, instead of weight matrix
if D is None:
u = nearestneighbor(W, I, g)
else:
u = nearestneighbor(D, I, g)
elif method == "eikonal":
# Use distance matrix if provided, instead of weight matrix
if D is None:
u = eikonalSSL(W, I, g, p=p, beta=beta)
else:
u = eikonalSSL(W, I, g, p=p, beta=beta)
else: # One vs rest methods
k = len(np.unique(g)) # Number of labels
u = np.zeros((k, n))
i = 0
for l in np.unique(g):
h = g == l
# Solve binary classification problem
if method == "laplace":
v = laplace_solve(W, I, h, norm=norm)
elif method == "shift":
v = shift_solve(W, I, h)
elif method == "meanshift":
v = meanshift_solve(W, I, h)
elif method == "wnll":
v = wnll_solve(W, I, h)
elif method == "properlyweighted":
if X is None:
print("Must supply raw data points for properly weighted Laplacian.")
sys.exit()
v = properlyweighted_solve(W, I, h, X, alpha, zeta, r)
elif method == "plaplace":
v = plaplace_solve(W, I, h, p, sol_method=plaplace_solver, norm=norm)
elif method == "randomwalk":
v = randomwalk_solve(W, I, h, epsilon)
else:
print("Invalid choice of SSL method.")
sys.exit()
# Update labels
u[i, :] = v
i = i + 1
if return_vector:
labels = np.transpose(u)
else:
# Select labels
max_locations = np.argmax(u, axis=0)
labels = (np.unique(g))[max_locations]
# Make sure to set labels at labeled points
labels[I] = g
return labels
confidence = usort[0, :] - usort[1, :]
# Read numerical data from csv file
def csvread(filename):
X = []
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
n = 0
for row in csv_reader:
if not row[0] == 'Date/Time':
X += [float(i) for i in row]
m = len(row)
n += 1
return np.array(X).reshape((n, m))
# Compute average and standard deviation of accuracy over many trials
# Reads data from csv file filename
# Returns accuracy (acc), standard deviation (stddev) and number of labeled points (N)
def accuracy_statistics(filename):
X = csvread(filename)
N = np.unique(X[:, 0])
acc = []
stddev = []
quant = []
for n in N:
Y = X[X[:, 0] == n, 1]
Y = np.sort(Y)
acc += [np.mean(Y)]
quant += [Y[int(3 * len(Y) / 4)]]
stddev += [np.std(Y)]
# print("%.1f (%.1f)"%(np.mean(Y),np.std(Y)), end="&")
num_trials = len(X[:, 0]) / len(N)
return acc, stddev, N, quant, num_trials
# Makes an accuracy table to be included in LaTeX documents
# dataset = name of dataset
# ssl_methods = list of names of methods to compare
def accuracy_table_icml(dataset, ssl_method_list, legend_list, num_of_classes, testerror=False, savefile='tables.tex',
title='', quantile=False, append=False, directory='Results', fontsize='small', small_caps=True,
two_column=True):
# Retrieve number of different label rates m
accfile = directory + "/" + dataset + "_" + ssl_method_list[0] + "_accuracy.csv"
acc, stddev, N, quant, num_trials = accuracy_statistics(accfile)
m = len(N)
# Determine best algorithm at each label rate
best = [None] * m
best_score = [0] * m
i = 0
for ssl_method in ssl_method_list:
accfile = directory + "/" + dataset + "_" + ssl_method + "_accuracy.csv"
acc, stddev, N, quant, num_trials = accuracy_statistics(accfile)
if quantile:
acc = quant
for j in range(m):
if acc[j] > best_score[j]:
best_score[j] = acc[j]
best[j] = i
i += 1
if append:
f = open(savefile, "r")
lines = f.readlines()
f.close()
f = open(savefile, "w")
f.writelines([item for item in lines[:-1]])
else:
f = open(savefile, "w")
f.write("\\documentclass{article}\n")
f.write("\\usepackage[T1]{fontenc}\n")
f.write("\\usepackage{booktabs}\n")
f.write("\\usepackage[margin=1in]{geometry}\n")
f.write("\\begin{document}\n")
f.write("\n\n\n")
if two_column:
f.write("\\begin{table*}[t!]\n")
else:
f.write("\\begin{table}[t!]\n")
f.write("\\vspace{-3mm}\n")
f.write(
"\\caption{" + title + ": Average (standard deviation) classification accuracy over %d trials.}\n" % num_trials)
f.write("\\vspace{-3mm}\n")
f.write("\\label{tab:" + title + "}\n")
f.write("\\vskip 0.15in\n")
f.write("\\begin{center}\n")
f.write("\\begin{" + fontsize + "}\n")
if small_caps:
f.write("\\begin{sc}\n")
f.write("\\begin{tabular}{l")
for i in range(m):
f.write("l")
f.write("}\n")
f.write("\\toprule\n")
f.write("\\# Labels per class")
for i in range(m):
f.write("&\\textbf{%d}" % int(N[i] / num_of_classes))
f.write("\\\\\n")
f.write("\\midrule\n")
i = 0
for ssl_method in ssl_method_list:
f.write(legend_list[i].ljust(15))
accfile = directory + "/" + dataset + "_" + ssl_method + "_accuracy.csv"
acc, stddev, N, quant, num_trials = accuracy_statistics(accfile)
for j in range(m):
if best[j] == i:
f.write("&{\\bf %.1f" % acc[j] + " (%.1f)}" % stddev[j])
# f.write("&${\\bf %.1f"%acc[j]+"\\pm %.1f}$"%stddev[j])
else:
f.write("&%.1f" % acc[j] + " (%.1f) " % stddev[j])
# f.write("&$%.1f"%acc[j]+"\\pm %.1f$ "%stddev[j])
f.write("\\\\\n")
i += 1
f.write("\\bottomrule\n")
f.write("\\end{tabular}\n")
if small_caps:
f.write("\\end{sc}\n")
f.write("\\end{" + fontsize + "}\n")
f.write("\\end{center}\n")
f.write("\\vskip -0.1in\n")
if two_column:
f.write("\\end{table*}")
else:
f.write("\\end{table}")
f.write("\n\n\n")
f.write("\\end{document}\n")
f.close()
def plot_graph(X, W, l=None):
# Other colormaps, coolwarm, winter, Set3, tab20b, rainbow
# plt.ion()
colors = np.array([[1.0, 0, 0], [0, 0.9, 0]])
plt.rcParams['figure.facecolor'] = 'navy'
n = W.shape[0]
I, J, V = sparse.find(W)
for i in range(len(I)):
xval = [X[I[i], 0], X[J[i], 0]]
yval = [X[I[i], 1], X[J[i], 1]]
# plt.plot(xval,yval, color='black', linewidth=0.15, markersize=0)
plt.plot(xval, yval, color=[0.5, 0.5, 0.5], linewidth=0.5, markersize=0)
if l is None:
# plt.scatter(X[:,0],X[:,1], s=30, cmap='Paired')
plt.scatter(X[:, 0], X[:, 1], s=8, zorder=3)
else:
# plt.scatter(X[:,0],X[:,1], s=30, c=l, cmap='Paired')
plt.scatter(X[:, 0], X[:, 1], s=8, c=colors[l, :], zorder=3)
plt.axis("off")
# plot average and standard deviation of accuracy over many trials
# dataset = name of dataset
# ssl_methods = list of names of methods to compare
def accuracy_plot(dataset, ssl_method_list, legend_list, num_of_classes, title=None, errorbars=False, testerror=False,
savefile=None, loglog=False, log_dirs=None, directed_graph=False):
if log_dirs is None:
log_dirs = ["Results/"]
# plt.ion()
plt.figure()
if errorbars:
matplotlib.rcParams.update({'errorbar.capsize': 5})
matplotlib.rcParams.update({'font.size': 16})
styles = ['^b-', 'or-', 'dg-', 'sk-', 'pm-', 'xc-', '*y-']
i = 0
for log in log_dirs:
for ssl_method in ssl_method_list:
accfile = os.path.join(log, dataset + "_" + ssl_method)
if directed_graph:
accfile += "_directed"
accfile += "_accuracy.csv"
acc, stddev, N, quant, num_trials = accuracy_statistics(accfile)
if testerror:
acc = 100 - acc
# z = np.polyfit(np.log(N),np.log(acc),1)
# print(z[0])
if errorbars:
plt.errorbar(N / num_of_classes, acc, fmt=styles[i], yerr=stddev, label=legend_list[i])
else:
if loglog:
plt.loglog(N / num_of_classes, acc, styles[i], label=legend_list[i])
else:
plt.plot(N / num_of_classes, acc, styles[i], label=legend_list[i])
i += 1
plt.xlabel('Number of labels per class')
if testerror:
plt.ylabel('Test error (%)')
plt.legend(loc='upper right')
else:
plt.ylabel('Accuracy (%)')
plt.legend(loc='lower right')
if title is not None:
plt.title(title)
plt.tight_layout()
plt.grid(True)
if savefile is not None:
plt.savefig(savefile)
else:
plt.show()
# Select labels based on a ranking
# Prodces a label permutation with 1 trial with same variations of #labels per class as the label permutation perm provided as input
def SelectLabels(labels, permold, rank):
perm = permold
# Number of classes
L = np.unique(labels)
k = len(L)
n = len(labels)
m = len(permold)
num = np.zeros((m,))
for i in range(m):
num[i] = len(permold[i])
num, unique_perm = np.unique(num, return_index=True)
perm = list()
for i in unique_perm:
p = permold[i]
pl = labels[p]
ind = []
for l in L:
numl = np.sum(pl == l)
K = labels == l
c = np.argsort(-rank[K])
j = np.arange(0, n)[K]
ind = ind + j[c[:numl]].tolist()
ind = np.array(ind)
perm.append(ind)
return perm
# PageRank algorithm
def PageRank(W, alpha):
n = W.shape[0]
u = np.ones((n,))
v = np.ones((n,))
D = degree_matrix(W, p=-1)
P = np.transpose(D * W)
err = 1
while err > 1e-10:
w = alpha * P * u + (1 - alpha) * v
err = np.max(np.absolute(w - u))
u = w
return u
# Print help
def print_help():
print('========================================================')
print('GraphLearning: Python package for graph-based learning. ')
print('========================================================')
print('========================================================')
print('Graph-based Clustering & Semi-Supervised Learning')
print('========================================================')
print(' ')
print('Options:')
print(' -d (--dataset=): MNIST, FashionMNIST, WEBKB, cifar (default=MNIST)')
print(' -m (--metric=): Metric for computing similarities (default=L2)')
print(' Choices: vae, scatter, L2, aet')
print(' -a (--algorithm=): Learning algorithm (default=Laplace)')
print(' -k (--knn=): Number of nearest neighbors (default=10)')
print(' -t (--num_trials=): Number of trial permutations to run (default=all)')
print(
' -l (--label_perm=): Choice of label permutation file (format=dataset<label_perm>_permutations.npz). (default is empty).')
print(' -p (--p=): Value of p for plaplace method (default=3)')
print(' -n (--normalization=): Laplacian normalization (default=none)')
print(' Choices: none, normalized')
print(' -N (--num_classes): Number of clusters if choosing clustering algorithm (default=10)')
print(' -s (--speed=): Speed in INCRES method (1--10) (default=2)')
print(' -i (--num_iter=): Number of iterations for iterative methods (default=1000)')
print(' -x (--extra_dim=): Number of extra dimensions in spectral clustering (default=0)')
print(' -c (--cuda): Use GPU acceleration (when available)')
print(' -T (--temperature): Temperature for volume constrained MBO (default=0)')
print(' -v (--volume_constraint=): Volume constraint for MBO (default=0.5)')
print(' -j (--num_cores=): Number of cores to use in parallel processing (default=1)')
print(' -r (--results): Turns off automatic saving of results to .csv file')
print(' -b (--verbose): Turns on verbose mode (displaying more intermediate steps).')
# Default settings
def default_dataset(): return 'MNIST'
def default_metric(): return 'L2'
def default_algorithm(): return 'laplace'
def default_k(): return 10
def default_t(): return '-1'
def default_label_perm(): return ''
def default_p(): return 3
def default_norm(): return "none"
def default_use_cuda(): return False
def default_T(): return 0
def default_num_cores(): return 1
def default_results(): return True
def default_num_classes(): return 10
def default_speed(): return 2
def default_num_iter(): return 1000
def default_extra_dim(): return 0
def default_volume_constraint(): return 0.5
def default_verbose(): return False
def default_poisson_training_balance(): return True
def default_directed_graph(): return False
# Main subroutine. Is calleable from other scripts as graphlearning.main(...)
def main(dataset=default_dataset(), metric=default_metric(), algorithm=default_algorithm(), k=default_k(),
t=default_t(), label_perm=default_label_perm(), p=default_p(), norm=default_norm(),
use_cuda=default_use_cuda(), T=default_T(), num_cores=default_num_cores(), results=default_results(),
num_classes=default_num_classes(), speed=default_speed(), num_iter=default_num_iter(),
extra_dim=default_extra_dim(), volume_constraint=default_volume_constraint(), verbose=default_verbose(),
poisson_training_balance=default_poisson_training_balance(), directed_graph=default_directed_graph()):
# Load labels
labels = load_labels(dataset)
# Load nearest neighbor data
I, J, D = load_kNN_data(dataset, metric=metric)
# Consturct weight matrix and distance matrix
W, error = nnk_weight_matrix(dataset, metric, mask=J, knn_param=k, symmetrize=not directed_graph)
Wdist = None # dist_matrix(I, J, D, k)
# Load label permutation (including restrictions in t)
perm = load_label_permutation(dataset, label_perm=label_perm, t=t)
# Load eigenvector data if MBO selected
if algorithm == 'mbo':
eigvals, eigvecs = load_mbo_eig(dataset, metric, k)
else:
eigvals = None
eigvecs = None
# Output file
outfile = "Results/" + dataset + label_perm + "_" + metric + "_k%d" % k
if algorithm == 'plaplace':
outfile = outfile + "_p%.1f" % p + algorithm[1:] + "_" + norm
elif algorithm == 'eikonal':
outfile = outfile + "_p%.1f" % p + algorithm
else:
outfile = outfile + "_" + algorithm
if algorithm == 'volumembo' or algorithm == 'poissonvolumembo':
outfile = outfile + "_T%.3f" % T
outfile = outfile + "_V%.3f" % volume_constraint
if algorithm == 'poisson' and poisson_training_balance == False:
outfile = outfile + "_NoBal"
if directed_graph:
outfile += "_directed"
outfile = outfile + "_accuracy.csv"
# Print basic info
print('========================================================')
print('GraphLearning: Python package for graph-based learning. ')
print('========================================================')
print('========================================================')
print('Graph-based Clustering & Semi-Supervised Learning')
print('========================================================')
print(' ')
print('Dataset: ' + dataset)
print('Metric: ' + metric)
print('Number of neighbors: %d' % k)
print('Learning algorithm: ' + algorithm)
print('Laplacian normalization: ' + norm)
if algorithm == 'plaplace' or algorithm == 'eikonal':
print("p-Laplace/eikonal value p=%.2f" % p)
if algorithm in clustering_algorithms:
print('Number of clusters: %d' % num_classes)
if algorithm == 'INCRES':
print('INCRES speed: %.2f' % speed)
print('Number of iterations: %d' % num_iter)
if algorithm[:8] == 'Spectral':
print('Number of extra dimensions: %d' % extra_dim)
else:
print('Number of trial permutations: %d' % len(perm))
print('Permutations file: LabelPermutations/' + dataset + label_perm + '_permutations.npz')
if algorithm == 'volumembo' or algorithm == 'poissonvolumembo':
print("Using temperature=%.3f" % T)
print("Volume constraints = [%.3f,%.3f]" % (volume_constraint, 2 - volume_constraint))
# If output file selected
if results:
print('Output file: ' + outfile)
print(' ')
print('========================================================')
print(' ')
true_labels = None
if verbose:
true_labels = labels
# If clustering algorithm was chosen
if algorithm in clustering_algorithms:
# Clustering
u = graph_clustering(W, num_classes, labels, method=algorithm, T=num_iter, speed=speed, extra_dim=extra_dim)
# Compute accuracy
acc = clustering_accuracy(u, labels)
# Print to terminal
print("Accuracy: %.2f" % acc + "%")
# If semi-supervised algorithms chosen
else:
# If output file selected
if results:
# Check if Results directory exists
if not os.path.exists('Results'):
os.makedirs('Results')
now = datetime.datetime.now()
# Add time stamp to output file
f = open(outfile, "a+")
f.write("Date/Time, " + now.strftime("%Y-%m-%d_%H:%M") + "\n")
f.close()
# Loop over label permutations
print("Number of labels, Accuracy")
def one_trial(label_ind):
# Number of labels
m = len(label_ind)
# Label proportions (used by some algroithms)
beta = label_proportions(labels)
start_time = time.time()
# Graph-based semi-supervised learning
u = graph_ssl(W, label_ind, labels[label_ind], D=Wdist, beta=beta, method=algorithm, epsilon=0.3, p=p,
norm=norm, eigvals=eigvals, eigvecs=eigvecs, dataset=dataset, T=T, use_cuda=use_cuda,
volume_mult=volume_constraint, true_labels=true_labels,
poisson_training_balance=poisson_training_balance, symmetrize=not directed_graph, error=error)
print("--- %s seconds ---" % (time.time() - start_time))
# Compute accuracy
acc = accuracy(u, labels, m)
# Print to terminal
print("%d" % m + ",%.2f" % acc)
# Write to file
if results:
f = open(outfile, "a+")
f.write("%d" % m + ",%.2f\n" % acc)
f.close()
# Number of cores for parallel processing
num_cores = min(multiprocessing.cpu_count(), num_cores)
Parallel(n_jobs=num_cores)(delayed(one_trial)(label_ind) for label_ind in perm)
if __name__ == '__main__':
# Default settings
dataset = default_dataset()
metric = default_metric()
algorithm = default_algorithm()
k = default_k()
t = default_t()
label_perm = default_label_perm()
p = default_p()
norm = default_norm()
use_cuda = default_use_cuda()
T = default_T()
num_cores = default_num_cores()
results = default_results()
num_classes = default_num_classes()
speed = default_speed()
num_iter = default_num_iter()
extra_dim = default_extra_dim()
volume_constraint = default_volume_constraint()
verbose = default_verbose()
poisson_training_balance = default_poisson_training_balance()
directed_graph = default_directed_graph()
# Read command line arguments
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:m:k:a:p:n:v:N:s:i:x:t:cl:T:j:rboz",
["dataset=", "metric=", "knn=", "algorithm=", "p=", "normalization=",
"volume_constraint=", "num_classes=", "speed=", "num_iter=", "extra_dim=",
"num_trials=", "cuda", "label_perm=", "temperature=", "num_cores=", "results",
"verbose", "poisson_training_balance", "directed"])
except getopt.GetoptError:
print_help()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print_help()
sys.exit()
elif opt in ("-d", "--dataset"):
dataset = arg
elif opt in ("-m", "--metric"):
metric = arg
elif opt in ("-k", "--knn"):
k = int(arg)
elif opt in ("-a", "--algorithm"):
algorithm = arg.lower()
elif opt in ("-p", "--p"):
p = float(arg)
elif opt in ("-n", "--normalization"):
norm = arg
elif opt in ("-v", "--volume_constraint"):
volume_constraint = float(arg)
elif opt in ("-N", "--num_classes"):
num_classes = int(arg)
elif opt in ("-s", "--speed"):
speed = float(arg)
elif opt in ("-i", "--num_iter"):
num_iter = int(arg)
elif opt in ("-x", "--extra_dim"):
extra_dim = int(arg)
elif opt in ("-t", "--num_trials"):
t = arg
elif opt in ("-c", "--cuda"):
use_cuda = True
elif opt in ("-l", "--label_perm"):
label_perm = arg
elif opt in ("-T", "--temperature"):
T = float(arg)
elif opt in ("-j", "--num_cores"):
num_cores = int(arg)
elif opt in ("-r", "--results"):
results = False
elif opt in ("-b", "--verbose"):
verbose = True
elif opt in ("-o", "--poisson_training_balance"):
poisson_training_balance = False
elif opt in ("-z", "--directed"):
directed_graph = True
# Call main subroutine
main(dataset=dataset, metric=metric, algorithm=algorithm, k=k, t=t, label_perm=label_perm, p=p, norm=norm,
use_cuda=use_cuda, T=T, num_cores=num_cores, results=results, num_classes=num_classes, speed=speed,
num_iter=num_iter, extra_dim=extra_dim, volume_constraint=volume_constraint, verbose=verbose,
poisson_training_balance=poisson_training_balance, directed_graph=directed_graph)
| 2.6875
| 3
|
ECE365/nlp/nlplab4_dist/lab4.py
|
debugevent90901/courseArchive
| 0
|
12784363
|
<reponame>debugevent90901/courseArchive
# All Import Statements Defined Here
# Note: Do not add to this list.
# All the dependencies you need, can be installed by running .
# ----------------
import numpy as np
import random
import scipy as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.decomposition import PCA
START_TOKEN = '<START>'
END_TOKEN = '<END>'
np.random.seed(0)
random.seed(0)
# ----------------
def distinct_words(corpus):
""" Determine a list of distinct words for the corpus.
Params:
corpus (list of list of strings): corpus of documents
Return:
corpus_words (list of strings): list of distinct words across the corpus, sorted (using python 'sorted' function)
num_corpus_words (integer): number of distinct words across the corpus
"""
corpus_words = []
num_corpus_words = -1
# ------------------
# Write your implementation here.
corpus_words = list(set([strings for list_of_strings in corpus for strings in list_of_strings]))
corpus_words = sorted(corpus_words)
num_corpus_words = len(corpus_words)
# ------------------
return corpus_words, num_corpus_words
def compute_co_occurrence_matrix(corpus, window_size=4):
""" Compute co-occurrence matrix for the given corpus and window_size (default of 4).
Note: Each word in a document should be at the center of a window. Words near edges will have a smaller
number of co-occurring words.
For example, if we take the document "START All that glitters is not gold END" with window size of 4,
"All" will co-occur with "START", "that", "glitters", "is", and "not".
Params:
corpus (list of list of strings): corpus of documents
window_size (int): size of context window
Return:
M (numpy matrix of shape (number of corpus words, number of corpus words)):
Co-occurence matrix of word counts.
The ordering of the words in the rows/columns should be the same as the ordering of the words given by the distinct_words function.
word2Ind (dict): dictionary that maps word to index (i.e. row/column number) for matrix M.
"""
words, num_words = distinct_words(corpus)
M = None
word2Ind = {}
# ------------------
# Write your implementation here.
M = np.zeros((num_words, num_words))
for i in range(num_words):
word2Ind[words[i]] = i
for list_of_strings in corpus:
for i in range(len(list_of_strings)):
left = max(i - window_size, 0)
right = min(i + window_size, len(list_of_strings) - 1)
for j in range(left, right+1):
if word2Ind[list_of_strings[i]] != word2Ind[list_of_strings[j]]:
M[word2Ind[list_of_strings[i]]][word2Ind[list_of_strings[j]]] += 1
# ------------------
return M, word2Ind
def reduce_to_k_dim(M, k=2):
""" Reduce a co-occurence count matrix of dimensionality (num_corpus_words, num_corpus_words)
to a matrix of dimensionality (num_corpus_words, k) using the following SVD function from Scikit-Learn:
- http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.TruncatedSVD.html
Params:
M (numpy matrix of shape (number of corpus words, number of corpus words)): co-occurence matrix of word counts
k (int): embedding size of each word after dimension reduction
Return:
M_reduced (numpy matrix of shape (number of corpus words, k)): matrix of k-dimensioal word embeddings.
In terms of the SVD from math class, this actually returns U * S
"""
n_iters = 10 # Use this parameter in your call to `TruncatedSVD`
M_reduced = None
print("Running Truncated SVD over %i words..." % (M.shape[0]))
# ------------------
# Write your implementation here.
M_reduced = TruncatedSVD(n_components=k, n_iter=n_iters).fit_transform(M)
# ------------------
print("Done.")
return M_reduced
| 2.796875
| 3
|
Cogs/timerevents.py
|
xantbos/Discord.py-Cogs
| 0
|
12784364
|
<gh_stars>0
import discord
import asyncio
import pytz
import io
import os
import sys
from discord.ext import commands
from datetime import date, datetime, timedelta, timezone
from threading import Timer
from pytz import timezone
localland = timezone("Canada/Eastern")
class TimerCommands(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.ticker = localland.localize(datetime(year=datetime.now().year, month=datetime.now().month, day=datetime.now().day, hour=12, minute=00, second=00))
asyncio.ensure_future(timer_loop(self))
@commands.command(name="timercheck")
async def timer_print_internal_times(self, ctx):
print("TIMER: {}".format(self.ticker.strftime("%B")))
async def timer_loop(self):
await self.bot.wait_until_ready()
preString = "Starting Ticker Timer..."
print("{:<35}".format(preString), end="")
print("\tSuccess") # purely for aesthetics
while True:
diff = self.ticker - localland.localize(datetime.now())
if diff.total_seconds() <= 0:
# here is where we increment to our next timer event
# read up on timedelta if you need help
self.ticker = self.ticker + timedelta(days=1)
else:
diff = self.ticker - localland.localize(datetime.now())
await asyncio.sleep(diff.total_seconds())
thisChannel = self.bot.get_channel(000000000000) # channel id goes here
await thisChannel.send("Timer tick")
def setup(bot):
bot.add_cog(TimerCommands(bot))
| 2.640625
| 3
|
src/sssimp/utils.py
|
vincent38/sssimp
| 8
|
12784365
|
<gh_stars>1-10
import os
import sys
from pathlib import Path
import traceback
from typing import Type
from sssimp import APP_DIR
def mkdir(path):
"""
Creates a directory or the parent directory if `path` is not a directory.
Safely ignores if already exists
"""
path = Path(path)
if not path.is_dir():
path = path.parent
path.mkdir(exist_ok=True, parents=True)
def path_strip(path, parent = APP_DIR):
"""Strips `parent` from `path`"""
path = str(path)
path = path.removeprefix(str(parent))
path = path.removeprefix(os.path.sep)
return path
def run_safely(f, exception_type: Type[Exception] = Exception, out=sys.stderr):
try:
f()
except exception_type:
print(traceback.format_exc(), file=out)
| 2.78125
| 3
|
networking_odl/agent/db_ovs_neutron_agent.py
|
dingboopt/wqq
| 0
|
12784366
|
# Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import collections
import functools
import hashlib
import signal
import sys
import time
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_service import systemd
import six
from six import moves
from neutron._i18n import _, _LE, _LI, _LW
from neutron.agent.common import ip_lib
from neutron.agent.common import ovs_lib
from neutron.agent.common import polling
from neutron.agent.common import utils
from neutron.agent.l2.extensions import manager as ext_manager
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import dvr_rpc
from neutron.common import config
from neutron.common import constants as n_const
from neutron.common import ipv6_utils as ipv6
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron import context
from neutron.extensions import portbindings
from neutron.plugins.common import constants as p_const
from neutron.plugins.common import utils as p_utils
from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants
from neutron.plugins.ml2.drivers.openvswitch.agent \
import ovs_agent_extension_api as ovs_ext_api
from neutron.plugins.ml2.drivers.openvswitch.agent \
import ovs_dvr_neutron_agent
from neutron.plugins.ml2.drivers.openvswitch.agent \
import ovs_neutron_agent
from networking_odl.ovsdb import impl_idl_ovn
from networking_odl.ovsdb import ovsdb_monitor
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.'
'common.config')
class OVSPluginApi(agent_rpc.PluginApi):
pass
class DBOVSNeutronAgent(ovs_neutron_agent.OVSNeutronAgent):
'''Implements OVS-based tunneling, VLANs and flat networks.
Two local bridges are created: an integration bridge (defaults to
'br-int') and a tunneling bridge (defaults to 'br-tun'). An
additional bridge is created for each physical network interface
used for VLANs and/or flat networks.
All VM VIFs are plugged into the integration bridge. VM VIFs on a
given virtual network share a common "local" VLAN (i.e. not
propagated externally). The VLAN id of this local VLAN is mapped
to the physical networking details realizing that virtual network.
For virtual networks realized as GRE tunnels, a Logical Switch
(LS) identifier is used to differentiate tenant traffic on
inter-HV tunnels. A mesh of tunnels is created to other
Hypervisors in the cloud. These tunnels originate and terminate on
the tunneling bridge of each hypervisor. Port patching is done to
connect local VLANs on the integration bridge to inter-hypervisor
tunnels on the tunnel bridge.
For each virtual network realized as a VLAN or flat network, a
veth or a pair of patch ports is used to connect the local VLAN on
the integration bridge with the physical network bridge, with flow
rules adding, modifying, or stripping VLAN tags as necessary.
'''
# history
# 1.0 Initial version
# 1.1 Support Security Group RPC
# 1.2 Support DVR (Distributed Virtual Router) RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
# 1.4 Added support for network_update
target = oslo_messaging.Target(version='1.4')
def __init__(self, bridge_classes, conf=None):
'''Constructor.
:param bridge_classes: a dict for bridge classes.
:param conf: an instance of ConfigOpts
'''
super(DBOVSNeutronAgent, self).__init__(bridge_classes, conf)
def setup_rpc(self):
self.plugin_rpc = OVSPluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.dvr_plugin_rpc = dvr_rpc.DVRServerRpcApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
# RPC network init
self.context = context.get_admin_context_without_session()
# Define the listening consumers for the agent
consumers = [[constants.TUNNEL, topics.UPDATE],
[constants.TUNNEL, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE],
[topics.DVR, topics.UPDATE],
[topics.NETWORK, topics.UPDATE]]
if self.l2_pop:
consumers.append([topics.L2POPULATION, topics.UPDATE])
self.connection = agent_rpc.create_consumers([self],
topics.AGENT,
consumers,
start_listening=False)
# NOTE(rtheis): This will initialize all workers (API, RPC,
# plugin service and OVN) with OVN IDL connections.
trigger = ovsdb_monitor.OvnWorker
self._nb_ovn, self._sb_ovn = impl_idl_ovn.get_ovn_idls(self, trigger)
def validate_local_ip(local_ip):
"""Verify if the ip exists on the agent's host."""
if not ip_lib.IPWrapper().get_device_by_ip(local_ip):
LOG.error(_LE("Tunneling can't be enabled with invalid local_ip '%s'."
" IP couldn't be found on this host's interfaces."),
local_ip)
raise SystemExit(1)
def validate_tunnel_config(tunnel_types, local_ip):
"""Verify local ip and tunnel config if tunneling is enabled."""
if not tunnel_types:
return
validate_local_ip(local_ip)
for tun in tunnel_types:
if tun not in constants.TUNNEL_NETWORK_TYPES:
LOG.error(_LE('Invalid tunnel type specified: %s'), tun)
raise SystemExit(1)
def prepare_xen_compute():
is_xen_compute_host = 'rootwrap-xen-dom0' in cfg.CONF.AGENT.root_helper
if is_xen_compute_host:
# Force ip_lib to always use the root helper to ensure that ip
# commands target xen dom0 rather than domU.
cfg.CONF.register_opts(ip_lib.OPTS)
cfg.CONF.set_default('ip_lib_force_root', True)
def main(bridge_classes):
prepare_xen_compute()
validate_tunnel_config(cfg.CONF.AGENT.tunnel_types, cfg.CONF.OVS.local_ip)
try:
agent = DBOVSNeutronAgent(bridge_classes, cfg.CONF)
except (RuntimeError, ValueError) as e:
LOG.error(_LE("%s Agent terminated!"), e)
sys.exit(1)
agent.daemon_loop()
| 1.203125
| 1
|
problem_solving/algorithms/strings/q11_funny_string.py
|
mxdzi/hackerrank
| 0
|
12784367
|
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'funnyString' function below.
#
# The function is expected to return a STRING.
# The function accepts STRING s as parameter.
#
def funnyString(s):
# Write your code here
r = s[::-1]
if [abs(ord(s[i]) - ord(s[i+1])) for i in range(len(s) -1)] == [abs(ord(r[i]) - ord(r[i+1])) for i in range(len(r) -1)]:
return "Funny"
else:
return "Not Funny"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input().strip())
for q_itr in range(q):
s = input()
result = funnyString(s)
fptr.write(result + '\n')
fptr.close()
| 4.03125
| 4
|
src/admin/models/tag_rules_es.py
|
mstrechen/advanced-news-scraper
| 0
|
12784368
|
from elasticsearch_dsl import Document, Integer, Percolator, Text
from admin.models.articles_es import SUPPORTED_LANGUAGES_ANALYZER_MAPPING
class TagRuleEs(Document):
tag_id = Integer()
query = Percolator()
title = Text(required=False, fields={
lang: Text(analyzer=analyzer)
for lang, analyzer in SUPPORTED_LANGUAGES_ANALYZER_MAPPING.items()
})
content = Text(required=False, fields={
lang: Text(analyzer=analyzer)
for lang, analyzer in SUPPORTED_LANGUAGES_ANALYZER_MAPPING.items()
})
class Index:
name = 'tag_rules'
settings = {
"number_of_shards": 2,
}
| 2.28125
| 2
|
setup.py
|
Tim55667757/FuzzyRoutines
| 1
|
12784369
|
<reponame>Tim55667757/FuzzyRoutines<filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import os
__version__ = '1.0' # identify main version of FuzzyRoutines
devStatus = '4 - Beta' # default build status, see: https://pypi.python.org/pypi?%3Aaction=list_classifiers
if 'TRAVIS_BUILD_NUMBER' in os.environ and 'TRAVIS_BRANCH' in os.environ:
print("This is TRAVIS-CI build")
print("TRAVIS_BUILD_NUMBER = {}".format(os.environ['TRAVIS_BUILD_NUMBER']))
print("TRAVIS_BRANCH = {}".format(os.environ['TRAVIS_BRANCH']))
__version__ += '.{}{}'.format(
'' if 'release' in os.environ['TRAVIS_BRANCH'] or os.environ['TRAVIS_BRANCH'] == 'master' else 'dev',
os.environ['TRAVIS_BUILD_NUMBER'],
)
devStatus = '5 - Production/Stable' if 'release' in os.environ['TRAVIS_BRANCH'] or os.environ['TRAVIS_BRANCH'] == 'master' else devStatus
else:
print("This is local build")
__version__ += '.dev0' # set version as major.minor.localbuild if local build: python setup.py install
print("FuzzyRoutines build version = {}".format(__version__))
setup(
name='fuzzyroutines',
version=__version__,
description='FuzzyRoutines library contains some routines for work with fuzzy logic operators, fuzzy datasets and fuzzy scales.',
long_description='You can see detailed user manual here: https://devopshq.github.io/FuzzyRoutines/',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
url='https://devopshq.github.io/FuzzyRoutines/',
download_url='https://github.com/devopshq/FuzzyRoutines.git',
classifiers=[
'Development Status :: {}'.format(devStatus),
'Environment :: Console',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
],
keywords=[
'fuzzy',
'logic',
'math',
'science',
'research',
'fuzzylogic',
'fuzzyset',
],
packages=[
'fuzzyroutines',
],
setup_requires=[
],
tests_require=[
'pytest',
],
install_requires=[
],
package_data={
'': [
'./fuzzyroutines/*'
'./tests/*'
'LICENSE',
'README.md',
],
},
zip_safe=True,
)
| 2.015625
| 2
|
sudoku.py
|
Y-o-Z/games
| 0
|
12784370
|
<reponame>Y-o-Z/games
# Sudoku → japanisches Akronym (griech. akros, onyma = Spitze, Namen)
# (= dos(denial-of-service)-attack on the brain ..)
# 9x9 grid with 9 3x3 subgrids, each subgrid, row, and column contains the numbers 1..9 once (+ unique solution)
from collections import deque
legal_entries = [-1, 1, 2, 3, 4, 5, 6, 7, 8, 9]
class Sudoku:
"""
# list of 81 entries from top-left to bottom-right (-1 → empty)
"""
def __init__(self, entries):
self.entries = entries[:]
self.original_entries = entries[:]
def __repr__(self):
rows = ["sudoku:\n"]
for row in range(9):
current_row = []
for column in range(9):
current_row.append(str(self.entries[row * 9 + column]))
if current_row[-1][0] != "-":
current_row[-1] = " " + current_row[-1]
rows.append(" ".join(current_row) + "\n")
return "".join(rows)
def reset(self):
self.entries = self.original_entries[:]
def no_duplicates(self, indices):
# True if all entries (not -1) pointed to by indices are different
entries = [self.entries[index] for index in indices]
for entry in range(1, 9 + 1):
if entries.count(entry) > 1:
return False
return True
def no_errors(self):
# no illegal entries
if not (len(self.entries) == 81 and all(entry in legal_entries for entry in self.entries)):
return False
# all subgrids correct
for subgrid in range(9):
indices = get_indices_from_subgrid(subgrid)
if not self.no_duplicates(indices):
return False
# all rows correct
for row in range(9):
indices = get_indices_from_row(row)
if not self.no_duplicates(indices):
return False
# all columns correct
for column in range(9):
indices = get_indices_from_column(column)
if not self.no_duplicates(indices):
return False
return True
def possible_entries_for_index(self, index):
if self.entries[index] != -1:
return [self.entries[index]]
impossible_entries = set()
subgrid = get_subgrid_from_index(index)
row = get_row_from_index(index)
column = get_column_from_index(index)
for i in get_indices_from_subgrid(subgrid):
impossible_entries.add(self.entries[i])
for i in get_indices_from_row(row):
impossible_entries.add(self.entries[i])
for i in get_indices_from_column(column):
impossible_entries.add(self.entries[i])
possible_entries = []
for entry in range(1, 9 + 1):
if entry not in impossible_entries:
possible_entries.append(entry)
return possible_entries
def solve_next_entry(self, quiet=False):
# return 1 for new_entry, 0 for no_new_entry, -1 for error
possible_entries_for_indices = []
# index level
for index in range(81):
if self.entries[index] == -1:
possible_entries = self.possible_entries_for_index(index)
if len(possible_entries) == 1: # success
if not quiet:
row = get_row_from_index(index)
column = get_column_from_index(index)
print(f"New entry at row {row}, column {column}: {possible_entries[0]} [index level]")
self.entries[index] = possible_entries[0]
return 1
possible_entries_for_indices.append(possible_entries)
else:
possible_entries_for_indices.append([self.entries[index]])
for index in range(81):
if not len(possible_entries_for_indices[index]): # error check (empty)
if not quiet:
row = get_row_from_index(index)
column = get_column_from_index(index)
print(f"Error at row {row}, column {column}: no possible entries")
return -1
# subgrid level
for subgrid in range(9):
indices = get_indices_from_subgrid(subgrid)
subgrid_entries = set()
for index in indices:
subgrid_entries.add(self.entries[index])
missing_subgrid_entries = []
for entry in range(1, 9 + 1):
if entry not in subgrid_entries:
missing_subgrid_entries.append(entry)
for missing_entry in missing_subgrid_entries:
count = 0
pos = -1
for index in indices:
if missing_entry in possible_entries_for_indices[index]:
count += 1
pos = index
if count == 1: # success
if not quiet:
row = get_row_from_index(pos)
column = get_column_from_index(pos)
print(f"New entry at row {row}, column {column}: {missing_entry} [subgrid level]")
self.entries[pos] = missing_entry
return 1
elif count == 0: # error check
if not quiet:
print(f"Error at subgrid {subgrid}: no place for {missing_entry}")
return -1
# row level
for row in range(9):
indices = get_indices_from_row(row)
row_entries = set()
for index in indices:
row_entries.add(self.entries[index])
missing_row_entries = []
for entry in range(1, 9 + 1):
if entry not in row_entries:
missing_row_entries.append(entry)
for missing_entry in missing_row_entries:
count = 0
pos = -1
for index in indices:
if missing_entry in possible_entries_for_indices[index]:
count += 1
pos = index
if count == 1: # success
if not quiet:
column = get_column_from_index(pos)
print(f"New entry at row {row}, column {column}: {missing_entry} [row level]")
self.entries[pos] = missing_entry
return 1
elif count == 0: # error check
if not quiet:
print(f"Error at row {row}: no place for {missing_entry}")
return -1
# column level
for column in range(9):
indices = get_indices_from_column(column)
column_entries = set()
for index in indices:
column_entries.add(self.entries[index])
missing_column_entries = []
for entry in range(1, 9 + 1):
if entry not in column_entries:
missing_column_entries.append(entry)
for missing_entry in missing_column_entries:
count = 0
pos = -1
for index in indices:
if missing_entry in possible_entries_for_indices[index]:
count += 1
pos = index
if count == 1: # success
if not quiet:
row = get_row_from_index(pos)
print(f"New entry at row {row}, column {column}: {missing_entry} [column level]")
self.entries[pos] = missing_entry
return 1
elif count == 0: # error check
if not quiet:
print(f"Error at column {column}: no place for {missing_entry}")
return -1
if not quiet:
print("No new entry.")
return 0
def solve(self):
dq = deque()
dq.append(self.entries[:])
while len(dq):
"""
print(f"dq: len = {len(dq)}")
for dq_entry in dq:
print_entries(dq_entry)
"""
dq_entries = dq.pop()
self.entries = dq_entries
while True:
flag = self.solve_next_entry(quiet=True)
if flag == 1: # new_entry
# print("\nFLAG = 1\n")
continue
elif flag == 0: # no new entry (→ dfs)
# terminated?
if all(entry in legal_entries[1:] for entry in self.entries):
dq.clear()
# print("\nDone.\n")
break
else:
for index in range(81):
if self.entries[index] == -1:
possible_entries = self.possible_entries_for_index(index)
for entry in possible_entries:
self.entries[index] = entry
dq.append(self.entries[:]) # [:] !
# print(f"\nFLAG = 0, entries added → len(dq) = {len(dq)}\n")
break
else: # flag = -1, error (← dfs)
# print("\nFLAG = -1\n")
break
assert self.no_errors()
if all(entry in legal_entries[1:] for entry in self.entries):
print("\nSudoku solved.", end="\n\n")
print(self)
else:
print("\nSudoku not solved. (why?)", end="\n\n")
print(self)
def get_subgrid_from_index(index):
# top-left to bottom-right, 0-based
row = get_row_from_index(index)
column = get_column_from_index(index)
if row < 3:
if column < 3:
return 0
elif column < 6:
return 1
else:
return 2
elif row < 6:
if column < 3:
return 3
elif column < 6:
return 4
else:
return 5
else:
if column < 3:
return 6
elif column < 6:
return 7
else:
return 8
def get_row_from_index(index):
# 0-based
if index < 9:
return 0
elif index < 18:
return 1
elif index < 27:
return 2
elif index < 36:
return 3
elif index < 45:
return 4
elif index < 54:
return 5
elif index < 63:
return 6
elif index < 72:
return 7
else:
return 8
def get_column_from_index(index):
# 0-based
return index % 9
def get_indices_from_subgrid(subgrid):
# 0-based
x = subgrid % 3
if subgrid < 3:
y = 0
elif subgrid < 6:
y = 1
else:
y = 2
indices = []
for row in range(3 * y, 3 * y + 3):
for column in range(3 * x, 3 * x + 3):
indices.append(row * 9 + column)
return indices
def get_indices_from_row(row):
# 0-based
return list(range(row * 9, row * 9 + 9))
def get_indices_from_column(column):
# 0-based
return list(range(column, column + 8 * 9 + 1, 9))
def print_entries(entries):
# quick & dirty
print("-" * 10)
for row in range(9):
print(*entries[row * 9: row * 9 + 9])
print("-" * 10)
if __name__ == "__main__":
print("Enter the sudoku row by row (top to bottom, -1 → empty):", end="\n\n")
sudoku_entries = []
for r in range(9):
r_entries = input(f"Enter the row {r} entries as a comma separated tuple:\n")
sudoku_entries.extend(list(map(int, r_entries.split(","))))
sudoku = Sudoku(sudoku_entries)
print("\nPlease verify your starting entries:")
print(sudoku)
while True:
answer = input("Do you want to make a correction? ('yes')\n")
if answer.lower() in ["yes", '"yes"', '"yes"', "y"]:
correction = input("Enter the row, column, and corrected entry as a comma separated tuple: (0-based)\n")
correction = list(map(int, correction.split(",")))
sudoku.entries[(correction[0]) * 9 + (correction[1])] = correction[2]
print("\n", sudoku, sep="")
else:
if not sudoku.no_errors():
print("\nPlease verify your entries again:")
print(sudoku)
else:
break
sudoku.solve()
| 3.5625
| 4
|
tests/test_JsonEncDecC.py
|
schrma/garminmanager
| 0
|
12784371
|
import datetime
import numpy as np
import garminmanager.utils.JsonEncDecC
import garminmanager.utils.FileWriterC
from garminmanager.enumerators.EnumHealthTypeC import EnumHealtTypeC
def test_encode_decode():
raw_data = garminmanager.RawDataC.RawDataC()
my_dates1 = {
datetime.datetime(2019,4,11,1,00) : 100,
datetime.datetime(2019,4,11,2,00) : np.nan,
datetime.datetime(2019,4,11,3,00) : 100
}
for key, value in my_dates1.items():
raw_data.add_x(key)
raw_data.add_y(value)
raw_data.set_data_type(EnumHealtTypeC.heartrate)
json_enc_dec = garminmanager.utils.JsonEncDecC.JsonEncDecC()
json_enc_dec.set_input_data(raw_data)
json_string = json_enc_dec.encode()
json_enc_dec.set_input_json(json_string)
file_writer = garminmanager.utils.FileWriterC.FileWriterC()
file_writer.set_filename('test.json')
file_writer.set_text(json_string)
file_writer.write_text_to_file()
d = file_writer.read_json()
json_enc_dec.set_input_json(d)
json_enc_dec.decode()
raw_data_output = json_enc_dec.get_data()
x = raw_data_output.get_x()
y = raw_data_output.get_y()
org_x = raw_data.get_x()
org_y = raw_data.get_y()
y[np.isnan(y)] = -100
org_y[np.isnan(org_y)] = -100
y[np.isnan(y)] = -100
org_y[np.isnan(org_y)] = -100
raw_data.set_y(y)
raw_data_output.set_y(org_y)
assert raw_data == raw_data_output
| 2.734375
| 3
|
XTWholeMountSegmentation.py
|
Menigedegna/Image-processing
| 1
|
12784372
|
<filename>XTWholeMountSegmentation.py
# -*- coding: utf-8 -*-
#
#
#==============================================================================
# Objectives of this PythonXT for Imaris:
# Segments nucleus, nucleolus and chromocenters into surfaces in DAPI channel,
# Segment FISH or Immunostaining signal into spots or surfaces
# Exports items position and features into a .csv tables and save .ims file
# Note: This script is designed for whole mount immunostaining, FISH of plant tissue conterstained in DAPI. 3D images are acquired using high resolution microscopy. Parameters in XTWholeMountSegmentation_Parameters.csv need to be adjusted to resolution of image.
# Creator: <NAME>, UZH
# Created on 11.04.2018
#==============================================================================
#
# <CustomTools>
# <Menu>
# <Item name="XTWholeMountSegmentation" icon="Python" tooltip="XTWholeMountSegmentation">
# <Command>PythonXT::XTWholeMountSegmentation(%i)</Command>
# </Item>
# </Menu>
# </CustomTools>
import numpy as np
import pandas as pd
import logging
#import tkinter as tk
import tkMessageBox
import os
from Tkinter import *
#from tkinter import filedialog
import tkFileDialog
import time
import datetime
import ImarisLib
import sys
sys.path.insert(0, "H:\Python") #I need to add this to import futures on the VMs
import concurrent.futures
import XTSegmentNuclei as SN
#==============================================================================
# Start of extension
#==============================================================================
# Function to create spots
def CreateSpots(vFactory, aPositionsXYZ, SpotName, groupContainer, aRadius):
vSpot = vFactory.CreateSpots()
aIndicesT = [0.0]*len(aPositionsXYZ)
aRadii = [aRadius]*len(aPositionsXYZ)
vSpot.Set(aPositionsXYZ,aIndicesT,aRadii)
vSpot.SetName(SpotName)
groupContainer.AddChild(vSpot, -1)
return vSpot
# Function to quantify spots in image
def getSurfaceVertices(numberIndex, vSurface, FileIndex, Result_pathway, SurfaceName,GroupOfObjects, LayerSize):
vFactory = vImaris.GetFactory().CreateFactory()
NumberOfSurfaces = vSurface.GetNumberOfSurfaces()
logtime('Get vertices START - image_'+str(FileIndex))
#==============================================================================
# Get vertices per suface and add a layer for each nucleus
#
#==============================================================================
# vVertices=[]
# vNumberOfVerticesPerSurface=[]
# vTriangles=[]
# vNumberOfTrianglesPerSurface=[]
# vNormals=[]
NewShellSurface = vImaris.GetFactory().CreateSurfaces()
for SelectedID in range(NumberOfSurfaces):
aVertices = vSurface.GetVertices(SelectedID)
# vNumberOfVerticesPerSurface=vNumberOfVerticesPerSurface+[len(aVertices)]
vTriangles = vSurface.GetTriangles(SelectedID)
# vTriangles.extend(aTriangles)
# vNumberOfTrianglesPerSurface=vNumberOfTrianglesPerSurface+[len(aTriangles)]
vNormals = vSurface.GetNormals(SelectedID)
# vNormals.extend(aNormals)
vCenterOfMass = vSurface.GetCenterOfMass(SelectedID)[0]
# aVertices = pd.DataFrame(aVertices)
# vStep = len(vVertices)/NumberOfVertices
# if vStep>2:
# SelectIndex = range(0, len(vVertices), vStep)
# vVertices = vVertices.iloc[SelectIndex,]
# LayerMat=[x-y for x,y in zip(LayerMat, vCenterOfMass)]
# LayerMat=pd.DataFrame(LayerMat).T
# LayerMat = pd.concat([LayerMat]*len(aVertices))
# aVertices=aVertices.add(LayerMat)
# vVertices = aVertices.values.tolist()
vVertices=[[x[0]-LayerSize if x[0]<vCenterOfMass[0] else x[0]+LayerSize, x[1]-LayerSize if x[1]<vCenterOfMass[1] else x[1]+LayerSize, x[2]-LayerSize if x[2]<vCenterOfMass[2] else x[2]+LayerSize] for x in aVertices]
# vVertices = [[x[0]+LayerSize, x[1]+LayerSize, x[2]+LayerSize] for x in aVertices]
vTimeIndexPerSurface = 0
NewShellSurface.AddSurface (vVertices, vTriangles,vNormals,vTimeIndexPerSurface)
# NewShellSurface.AddSurfacesList(vVertices,vNumberOfVerticesPerSurface,vTriangles,vNumberOfTrianglesPerSurface,vNormals,vTimeIndexPerSurface)
NewShellSurface.SetName("Adjusted nuclei")
GroupOfObjects.AddChild(NewShellSurface, -1)
logtime('Get vertices END - image_'+str(FileIndex))
#==============================================================================
# This function: removes all objects created in scene
#==============================================================================
def RemoveObjectsCreated(vScene, ListOfContainers):
for i in ListOfContainers:
vScene.RemoveChild(i)
def GetSegmentedObjects(DistanceOptions,FileIndex):
logtime('Object detection START - image_'+str(FileIndex))
NucleusSurface = NucleolusSurface = ChromocenterSurface = None
vScene = vImaris.GetSurpassScene()
vFactory = vImaris.GetFactory()
numberSceneInstance = vScene.GetNumberOfChildren()
ContainerNotFound = True
i = 0
while i <= numberSceneInstance and ContainerNotFound:
selection = vScene.GetChild(i)
vContainer = vFactory.ToDataContainer(selection)
if vContainer is not None:
ContainerNotFound = False
i=i+1
ContainerName = vContainer.GetName()
if vContainer is not None and ContainerName=="Segmented objects":
numberSceneInstance = vContainer.GetNumberOfChildren()
i = 0
# ImmunoSpotNames = []
# ImmunoSpotList = []
while i <= numberSceneInstance :
selection = vContainer.GetChild(i)
vObject = vFactory.ToSurfaces(selection)
if vObject is not None:
if vObject.GetName() == "Nucleus" and DistanceOptions[0] : NucleusSurface = vObject
if vObject.GetName() == "Nucleolus" and DistanceOptions[1] : NucleolusSurface = vObject
if vObject.GetName() == "Chromocenters" and DistanceOptions[2] : ChromocenterSurface = vObject
# vObject = vFactory.ToSpots(selection)
# if vObject is not None:
# PositionTable = vObject.GetPositionsXYZ()
# if "Immuno" in vObject.GetName() and PositionTable is not None:
# ImmunoSpotNames = ImmunoSpotNames + [vObject.GetName()]
# ImmunoSpotList = ImmunoSpotList + [vObject]
i+=1
logtime('Object detection END - image_'+str(FileIndex))
# return NucleusSurface, NucleolusSurface, ChromocenterSurface, ImmunoSpotNames, ImmunoSpotList
return NucleusSurface, NucleolusSurface, ChromocenterSurface
#==============================================================================
# This function:
# Segments chanenls into surface (nucleus, nucleolus and chromocenters) or spots (RNA PolII foci),
# Create masks with the surfaces created
# Count the number of spots in each mask
# And saves results
#==============================================================================
def GetImageFeatures(FileIndex, Result_pathway, vFileName, DoSegmentation, ParametersList, BatchProcessing, vFullFileName, DistanceOptions):
global vImaris
global numberIndex
global DAPIChannel
global ListOfContainers
ListOfContainers = [] # To keep track of all the containers I will create in scene, so that I can remove them after saving image
ChromocenterSurface = NucleusSurface = NucleolusSurface = None
vImage = vImaris.GetDataSet()
vScene = vImaris.GetSurpassScene()
vFactory = vImaris.GetFactory().CreateFactory()
if vImage is not None:
numberIndex = vImage.GetSizeC()
date = str(datetime.datetime.now()).split(" ")[0]
date += " 00:00:00"
vImage.SetTimePoint(0, date) # time point needs to be set for each image to avoid creating different time frames when closing and opening a new image.
#Ask user to set FISH channel to segment into surface
if FileIndex == 1:
FISHChannelList= SN.Ask_user(numberIndex, "FISH")
numberIndex +=1
GroupOfObjects = vFactory.CreateDataContainer()
GroupOfObjects.SetName('Segmented objects')
#==============================================================================
# SEGMENT SURFACES
#==============================================================================
if DoSegmentation:
IsImageCorrect, NucleusSurface,ChromocenterSurface, NucleolusSurface, DAPIChannel, GroupOfObjects=SN.SegmentAndGetFeatures(vImage, FileIndex, Result_pathway, vFileName, DistanceOptions, ParametersList, BatchProcessing, vFullFileName, True)
logtime('DAPI Segmentation END - image_'+str(FileIndex))
else:
NucleusSurface, NucleolusSurface, ChromocenterSurface = GetSegmentedObjects(DistanceOptions, FileIndex)
#Adjust nucleus surfaces
if ParametersList[8]>0:
getSurfaceVertices(numberIndex, NucleusSurface, FileIndex, Result_pathway, "Nucleus ", GroupOfObjects, ParametersList[8])
#Segment FISH channel into surfaces
for FISHChannel in FISHChannelList:
fishChannelColor = vImage.GetChannelColorRGBA(FISHChannel)
SN.SmoothChannel(FISHChannel, NucleusSurface, numberIndex, ParametersList, "Smoothed FISH Channel", fishChannelColor)
SmoothedChannel=numberIndex-1
FISHSurface = SN.SegHighIntensity(SmoothedChannel, NucleusSurface,"High FISH intensity","FISH Surface",numberIndex, GroupOfObjects, ParametersList)
GroupOfObjects.AddChild(FISHSurface, -1)
vScene.AddChild(GroupOfObjects, -1)
logtime('FISH Channel-'+str(FISHChannel)+' Segmentation END - image_'+str(FileIndex))
ListOfContainers.append(GroupOfObjects)
ResultFileName="FISHCh"+str(FISHChannel)+"_SurfaceFeatures"
SN.ExtractSurfaceFeatures([FISHSurface],["FISH"], Result_pathway, FileIndex, vImage, ResultFileName, NucleusSurface)
logtime('FISH Channel-'+str(FISHChannel)+' surface features saved END - image_'+str(FileIndex))
vPath = os.path.join(Result_pathway, vFileName+".ims")
vImaris.FileSave(vPath, "")
else:
print ("No image detected in file: "+vFileName)
quit()
if len(ListOfContainers)>0 and BatchProcessing:
RemoveObjectsCreated(vScene, ListOfContainers)
# os.remove(vFullFileName)
return vImage is None
#==============================================================================
#==============================================================================
# Functions required to log and display progress of the plugin
#==============================================================================
def logtime(aTitle):
# type: (object) -> object
global gLasttime
curtime = datetime.datetime.now()
if (gLasttime is not None):
diff = (curtime-gLasttime).total_seconds()
else:
diff = '-'
gLasttime = curtime
print (curtime.ctime(), '[', str(diff), ']', aTitle)
#==============================================================================
# Pop-up windows to ask user to set folder pathway and channels to segment
#==============================================================================
class Checkbar(Frame):
def __init__(self, parent=None, picks=[], side=LEFT, anchor=W):
Frame.__init__(self, parent)
self.vars = []
for pick in picks:
var = IntVar()
chk = Checkbutton(self, text=pick, variable=var)
chk.pack(side=side, anchor=anchor, expand=YES)
self.vars.append(var)
def state(self):
return map((lambda var: var.get()), self.vars)
def allstates():
global User_selection
global root
if sum(list(lng.state()))>0:
User_selection=list(lng.state())
root.destroy()
else:
Message="Please select one of the options."
Label(root, text=Message).grid(row=1)
def PopUpMessage(OPTIONS, Messge):
global root
global lng
root = Tk()
label_text = Messge
option = OPTIONS
Label(root, text=label_text).grid(row=0)
lng = Checkbar(root, option)
lng.grid(row=2)
lng.config(relief=GROOVE, bd=2)
Button(root, text='Quit', fg="red", command=quit).grid(row=4)
Button(root, text='Submit', fg="darkgreen", command=allstates).grid(row=5)
root.mainloop()
#==============================================================================
# Function to get parameters for this plugin from file:XTCountSpotPerShell_Parmaeters.csv
#==============================================================================
def GetPluginParameters():
currentDirectory = os.getcwd()
AllFilesInDirectory = os.listdir(currentDirectory)
ParametersList = None
if "XTWholeMountSegmentation_Parameters.csv" in AllFilesInDirectory:
ParameterData = pd.read_csv("XTWholeMountSegmentation_Parameters.csv", sep=";", header='infer',decimal='.')
if "Value" in ParameterData.columns:
ParametersList = list(ParameterData["Value"])
else:
tkMessageBox.showinfo(title="Error", message="Please make sure the 'XTWholeMountSegmentation_Parameters.csv' file contains a column 'Value' containing the values necessary for this plugin.")
quit()
else:
tkMessageBox.showinfo(title="Error", message="Please make sure there is a 'XTWholeMountSegmentation_Parameters.csv' in the folder containing the 'XTCountSpotPerShell.py'.")
quit()
return ParametersList
#==============================================================================
# Function required to create folder to save the plugins results
#==============================================================================
#Function to create a folder under the same directory as the images to save files that are produced
def CreateDirectoryToSaveFiles(Result_pathway):
if os.path.exists(Result_pathway):
tkMessageBox.showinfo(title="Alert", message="Please save the folder 'XTWholeMountSegmentation_Result' under another name first!")
quit()
else:
os.makedirs(Result_pathway)
#==============================================================================
# Main function:
# Connects to Imaris and get image
# Process images
#==============================================================================
def XTWholeMountSegmentation(aImarisId):
logging.basicConfig(level=logging.DEBUG, filename= "log[XTWholeMountSegmentation].log")
try:
#Declare global variables
global gLasttime
global vImaris
global SelectedChanelIndex
gLasttime = None
FISHChannel = None
logtime('Extension XTWholeMountSegmentation START')
print ("Hello!")
FileNameList = []
# Step1: Connect to Imaris
#==============================================================================
vImarisLib = ImarisLib.ImarisLib()
# Get an imaris object with id aImarisId
vImaris = vImarisLib.GetApplication(aImarisId)
ParametersList = GetPluginParameters()
SN.vImaris=vImaris
SN.gLasttime=gLasttime
# """BEGIN LOOP OVER ALL IMAGES"""
# Open File and get filename
if vImaris is not None :
logtime('Connected to Imaris')
#Set camera to orthographic view
vImaris.GetSurpassCamera().SetOrthographic(True)
vImaris.GetSurpassCamera().Fit() #Sets the zoom and the position so that the bounding box of all visible objects fits into the window
ListOfOptions = [["Batch of images", "Just one image"], ["Segment & Get Features", "Get Features"], ["Nucleus", "Nucleolus", "Chromocenters"]]
ListOfMessages = ["Do you wish to run the script on a batch of images or just on one image already opened?", "Do you wish to do automated segmentation?", "Which nucleus features do you wish to segment?"]
UserParameterList = []
for i in range(len(ListOfOptions)):
OPTIONS = ListOfOptions[i]
Messge = ListOfMessages[i]
PopUpMessage(OPTIONS, Messge)
UserParameterList = UserParameterList + [User_selection]
BatchProcessing = UserParameterList[0][0]
DoSegmentation = UserParameterList[1][0]
DistanceOptions = UserParameterList[2]
FileIndex = 1 #This variable is used to count the number of files analysed
if BatchProcessing :
# Step2: Here the user is asked to set the path to the folder containing image to be analysed
#==============================================================================
root1 = Tk()
Image_folder = tkFileDialog.askdirectory(parent=root1, initialdir="/",title='Please select the directory containing the images to be processed. \n The folder containing the resulting files will be saved in this directory.')
root1.destroy()
FolderName = os.path.basename(Image_folder)
Result_pathway = os.path.join(Image_folder, "XTSegmentNuclei_Result")
CreateDirectoryToSaveFiles(Result_pathway)
AllFilesInDirectory = os.listdir(Image_folder) #get all files in the Image_folder directory
logtime('Get all files')
AllFilesToBeProcessed = [i for i in AllFilesInDirectory if i.endswith('.ims') or i.endswith('.ics')] #select files with .ims or .ics extensions
TotalNumberFile = len(AllFilesToBeProcessed)
logtime('Start bach processing')
if TotalNumberFile > 0 :
for vFileName in AllFilesToBeProcessed:
try:
vFullFileName = os.path.join(Image_folder, vFileName)
vImaris.FileOpen(vFullFileName, "")
# with concurrent.futures.ProcessPoolExecutor() as executor:
ImageIsEmpty = GetImageFeatures(FileIndex, Result_pathway, vFileName, DoSegmentation, ParametersList, BatchProcessing, vFullFileName, DistanceOptions, FISHChannel)
if not ImageIsEmpty :
FileIndex += 1
FileNameList.append(vFileName)
except:
logging.exception("Image="+vFileName+" :")
print ("There is an issue with file : "+ vFileName)
continue #it will not treat this image and move on to the next one
df=pd.DataFrame(FileNameList)
vPathToSaveTables = os.path.join(Result_pathway, "FileName.csv")
df.to_csv(path_or_buf=vPathToSaveTables, na_rep='', float_format=None, columns=None, header=True, index=False, decimal='.')
else:
tkMessageBox.showinfo(title="Alert", message="There is no .ims or .ics file detected in the selected folder.")
quit()
else :
TotalNumberFile = 1
vFileName = vImaris.GetCurrentFileName()
ImageIsEmpty = True
if vFileName !="" :
vFileName = vImaris.GetCurrentFileName()
vFilePath = os.path.dirname(vFileName)
vFullFileName = os.path.join(vFilePath, vFileName)
Result_pathway = os.path.join(vFilePath, "XTWholeMountSegmentation_Result")
CreateDirectoryToSaveFiles(Result_pathway)
vFileName = os.path.split(vFileName)[1]
ImageIsEmpty = GetImageFeatures(FileIndex, Result_pathway, vFileName, DoSegmentation, ParametersList, BatchProcessing, vFullFileName, DistanceOptions)
if not ImageIsEmpty :
FileIndex += 1
FileNameList.append(vFileName)
if ImageIsEmpty :
tkMessageBox.showinfo(title="Alert", message="No image is detected. \n Please open an image and select on 'CountSpotPerShell' again.")
quit()
logtime('XTWholeMountSegmentation extension done')
print ("All tasks have been completed successfully. \n Resulting files are saved in the folder XTWholeMountSegmentation_Result")
raw_input("Press Enter to terminate.")
else:
tkMessageBox.showinfo(title="Alert", message="Imaris application is not found!")
logtime('Extension XTWholeMountSegmentation END')
except:
logging.exception("Oops:")
#==============================================================================
# End of extension
#==============================================================================
| 2.046875
| 2
|
cli/user.py
|
sgtdarkskull/passwordManager
| 0
|
12784373
|
import os
import sql_query as sql
from styling import color
import password_checker
import password_generator
# Login Main class
class Login:
def __init__(self):
self.username = ''
self.password = ''
self.is_login = False
self.website = ''
self.account = ''
try:
os.system('clear')
print(color.BOLD + color.RED + 'User Login'.center(30) + color.END)
print('')
self.username = input('Enter Username : ')
self.password = input('Enter Password : ')
auth = sql.get_user_Password(self.username)
if auth == self.password:
self.is_login = True
else:
self.is_login = False
except:
print('An Error Occurred!')
return
def get_Website(self, arr):
for n, web in enumerate(arr):
print(f'{n + 1}. {web}')
if len(arr) > 0:
n = input('Select Website : ')
if n.isnumeric() and len(n) == 1:
self.website = arr[int(n) - 1]
else:
self.website = n
return True
else:
print('No password is saved')
return False
def get_Account(self, arr):
for n, acc in enumerate(arr):
print(f'{n + 1}. {acc}')
n = input('Select Account : ')
if n.isnumeric() and len(n) == 1:
self.account = arr[int(n) - 1]
else:
self.account = n
def select(self):
websites = sql.all_websites(self.username)
present = self.get_Website(websites)
if present:
accounts = sql.all_accounts(self.username, self.website)
self.get_Account(accounts)
return True
return False
def add_password(self):
os.system('clear')
print(color.BOLD + color.RED + 'Add Password'.center(30) + color.END + '\n')
url = input('Website/Url : ')
identifier = input('Account/Username/Identifier : ')
password = input('Password : ')
sql.add_password(url, identifier, password, self.username)
def delete_password(self):
os.system('clear')
print(color.BOLD + color.RED + 'Delete Passwrod'.center(30) + color.END)
print('')
if self.select():
sql.delete(self.username, self.website, self.account)
def modify_password(self):
os.system('clear')
print(color.BOLD + color.RED + 'Modify Password'.center(30) + color.END)
print('')
if self.select():
if input('Want to generate Password (Y/N): ').lower() == 'y':
password_generator.main()
new_pass = input('New Password : ')
sql.modify(self.username, self.website, self.account, new_pass)
def show_password(self):
os.system('clear')
print(color.BOLD + color.RED + 'Show Password'.center(30) + color.END)
print('')
if self.select():
sql.show_password(self.username, self.website, self.account)
return
def main(self):
while True:
os.system('clear')
print(color.BOLD + color.RED + self.username.center(30) + color.END)
print('')
print('1. Add Password')
print('2. Delete a Password')
print('3. Modify a Password')
print('4. Show Password')
print('5. Generate a strong password')
print('6. Check my password if secure')
print('7. Log out')
ch = input().lower()
if ch == '1' or ch == 'add':
self.add_password()
elif ch == '2' or ch == 'delete':
self.delete_password()
elif ch == '3' or ch == 'modify':
self.modify_password()
elif ch == '4' or ch == 'show':
self.show_password()
elif ch == '5' or ch == 'generate':
password_generator.main()
elif ch == '6' or ch == 'check':
password_checker.main()
elif ch == '7' or ch == 'exit':
return
else:
print('Invalid Choice!')
en = input("'Y' to Log out, or press Enter....").lower()
if en == 'y':
return
else:
pass
def user_login():
user = Login()
if user.is_login:
user.main()
else:
print('Login Failure, Wrong password or username..')
return
def user_signup():
try:
os.system('clear')
print(color.BOLD + color.RED + 'User Sign Up'.center(30) + color.END)
print('')
username = input('Enter Username : ')
password = input('Enter Password : ')
if password == input('Confirm Password : '):
sql.sign_up(username, password)
else:
print('Wrong Password...\nSign up failure!')
return
except:
print('An Error Occurred!')
return
| 3.5625
| 4
|
scripts/localizer/text_processor.py
|
Theodeus/www.html5rocks.com
| 4
|
12784374
|
# Copyright 2012 Google Inc. All Rights Reserved.
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts a given string from Django template to HTML and back."""
__author__ = ('<EMAIL> (<NAME>)')
import re
UNTRANSLATABLE_BEGIN = r'<!--DO_NOT_TRANSLATE_BLOCK>'
UNTRANSLATABLE_END = r'</DO_NOT_TRANSLATE_BLOCK-->'
CONTENT_BEGIN = """
<!--CONTENT_BLOCK ***********************************************************-->
"""
CONTENT_END = """
<!--/END_CONTENT_BLOCK ******************************************************-->
"""
class TextProcessor(object):
"""Translates text from Django template format to l10nable HTML and back.
Properties:
dango: The Django template representation of the text.
html: The HTML representation of the text.
"""
def __init__(self, django='', html=''):
self._django = django
self._html = html
@property
def django(self):
if not self._django:
self._django = self.__HtmlToDjango(self._html)
return self._django
@property
def html(self):
if not self._html:
self._html = self.__DjangoToHtml(self._django)
return self._html
def __DjangoToHtml(self, text):
"""Given a Django template's content, return HTML suitable for l10n.
Args:
text: The text to convert from Django to HTML.
Returns:
A string containing the newly HTMLized content.
* Django tags like `{% tag %}` will be rendered inside an HTML comment:
`<!--DO_NOT_TRANSLATE_BLOCK>{% tag %}</DO_NOT_TRANSLATE_BLOCK-->`.
* `pre`, `script`, and `style` tags' content will be likewise wrapped:
`<pre><!--DO_NOT_TRANSLATE_BLOCK>Text!</DO_NOT_TRANSLATE_BLOCK-->`.
* The article's content will be wrapped:
<!--CONTENT_BLOCK ***************************************************-->
Content goes here!
<!--END_CONTENT_BLOCK ***********************************************-->
"""
django_tag_before = r'(?P<tag>{%.+?%})'
django_tag_after = r'%s\g<tag>%s' % (UNTRANSLATABLE_BEGIN,
UNTRANSLATABLE_END)
open_notranslate_before = r'(?P<tag><(?:pre|script|style)[^>]*?>)'
open_notranslate_after = r'\g<tag>%s' % UNTRANSLATABLE_BEGIN
close_notranslate_before = r'(?P<tag></(?:pre|script|style)[^>]*?>)'
close_notranslate_after = r'%s\g<tag>' % UNTRANSLATABLE_END
open_content = r'{% block content %}'
close_content = r'{% endblock %}'
# Walk the given text line by line
to_return = []
in_content = False
for line in text.splitlines(True):
# Process Django tags
line = re.sub(django_tag_before, django_tag_after, line)
# Preprocess script/pre/style blocks
line = re.sub(open_notranslate_before, open_notranslate_after, line)
line = re.sub(close_notranslate_before, close_notranslate_after, line)
# Preprocess content block
if re.search(open_content, line):
line = CONTENT_BEGIN
in_content = True
elif re.search(close_content, line) and in_content:
line = CONTENT_END
in_content = False
to_return.append(line)
return ''.join(to_return)
def __HtmlToDjango(self, text):
"""Given localized HTML, return text formatted as a Django template.
Args:
text: The text to convert from HTML to Django.
Returns:
A string containing the newly Djangoized content, stripped of leading
and trailing whitespace.
See the documentation for `django_to_html` and imagine the inverse. :)
"""
# Strip UNTRANSLATABLE_BEGIN and UNTRANSLATABLE_END comments.
text = text.replace(UNTRANSLATABLE_BEGIN, '')
text = text.replace(UNTRANSLATABLE_END, '')
# Replace CONTENT_BEGIN with `{% block content %}` and CONTENT_END with
# `{% endblock %}`.
text = text.replace(CONTENT_BEGIN, '{% block content %}\n')
text = text.replace(CONTENT_END, '{% endblock %}')
# Return the result, stripped of leading/training whitespace.
return text.strip()
| 2.09375
| 2
|
py/lib/utils/util.py
|
zjZSTU/YOLO_v1
| 8
|
12784375
|
# -*- coding: utf-8 -*-
"""
@date: 2020/2/29 下午7:31
@file: util.py
@author: zj
@description:
"""
import numpy as np
import torch
import sys
def error(msg):
print(msg)
sys.exit(0)
def get_device():
return torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def iou(pred_box, target_box):
"""
计算候选建议和标注边界框的IoU
:param pred_box: 大小为[4]
:param target_box: 大小为[N, 4]
:return: [N]
"""
if len(target_box.shape) == 1:
target_box = target_box[np.newaxis, :]
xA = np.maximum(pred_box[0], target_box[:, 0])
yA = np.maximum(pred_box[1], target_box[:, 1])
xB = np.minimum(pred_box[2], target_box[:, 2])
yB = np.minimum(pred_box[3], target_box[:, 3])
# 计算交集面积
intersection = np.maximum(0.0, xB - xA + 1) * np.maximum(0.0, yB - yA + 1)
# 计算两个边界框面积
boxAArea = (pred_box[2] - pred_box[0] + 1) * (pred_box[3] - pred_box[1] + 1)
boxBArea = (target_box[:, 2] - target_box[:, 0] + 1) * (target_box[:, 3] - target_box[:, 1] + 1)
scores = intersection / (boxAArea + boxBArea - intersection)
return scores
def compute_ious(rects, bndboxs):
iou_list = list()
for rect in rects:
scores = iou(rect, bndboxs)
iou_list.append(max(scores))
return iou_list
def parse_output(outputs, S, B, C):
"""
每个网格保存置信度最高的检测边界框
:param outputs: (N, S*S, B*5+C)
:return: cates, probs, bboxs
cates: (N, S*S)
probs: (N, S*S)
bboxs: (N, S*S, 4)
"""
N = outputs.shape[0]
# (N*S*S, C)
probs = outputs[:, :, :C].reshape(-1, C)
# (N*S*S, B)
confidences = outputs[:, :, C:(C + B)].reshape(-1, B)
# (N*S*S, 4*B)
bboxs = outputs[:, :, (C + B):].reshape(-1, 4 * B)
# 计算每个网格所属类别 (N*S*S)
cates = torch.argmax(probs, dim=1)
# 计算每个网格最高置信度 (N*S*S)
idxs = torch.argmax(confidences, dim=1)
# 计算分类概率 (N*S*S)
cate_probs = probs[range(len(cates)), cates] * confidences[range(len(idxs)), idxs]
# 计算对应边界框坐标 (N*S*S, 4)
obj_boxs = bboxs[range(len(idxs)), idxs * 4: (idxs + 1) * 4]
return cates.reshape(N, S * S), cate_probs(N, S * S), obj_boxs(N, S * S, 4)
def bbox_corner_to_center(bboxs):
"""
[xmin, ymin, xmax, ymax] -> [x_center, y_center, w, h]
:param bboxs: [N, 4]
"""
assert len(bboxs.shape) == 2
tmp = np.zeros(bboxs.shape)
# w
tmp[:, 2] = bboxs[:, 2] - bboxs[:, 0] + 1
# h
tmp[:, 3] = bboxs[:, 3] - bboxs[:, 1] + 1
# x_center
tmp[:, 0] = bboxs[:, 0] + tmp[:, 2] / 2
# y_center
tmp[:, 1] = bboxs[:, 1] + tmp[:, 3] / 2
return tmp
def bbox_center_to_corner(bboxs):
"""
[x_center, y_center, w, h] -> [xmin, ymin, xmax, ymax]
:param bboxs: [N, 4]
"""
assert len(bboxs.shape) == 2
tmp = np.zeros(bboxs.shape)
# xmin
tmp[:, 0] = bboxs[:, 0] - bboxs[:, 2] / 2
# ymin
tmp[:, 1] = bboxs[:, 1] - bboxs[:, 3] / 2
# xmax
tmp[:, 2] = bboxs[:, 0] + bboxs[:, 2] / 2
# ymax
tmp[:, 3] = bboxs[:, 1] + bboxs[:, 3] / 2
return tmp
def deform_bboxs(pred_bboxs, data_dict, S):
"""
:param pred_bboxs: [S*S, 4]
:return:
"""
scale_h, scale_w = data_dict['scale_size']
grid_w = scale_w / S
grid_h = scale_h / S
bboxs = np.zeros(pred_bboxs.shape)
for i in range(S * S):
row = int(i / S)
col = int(i % S)
x_center, y_center, box_w, box_h = pred_bboxs[i]
bboxs[i, 0] = (col + x_center) * grid_w
bboxs[i, 1] = (row + y_center) * grid_h
bboxs[i, 2] = box_w * scale_w
bboxs[i, 3] = box_h * scale_h
# (x_center, y_center, w, h) -> (xmin, ymin, xmax, ymax)
bboxs = bbox_center_to_corner(bboxs)
ratio_h, ratio_w = data_dict['ratio']
bboxs[:, 0] /= ratio_w
bboxs[:, 1] /= ratio_h
bboxs[:, 2] /= ratio_w
bboxs[:, 3] /= ratio_h
# 最大最小值
h, w = data_dict['src_size']
bboxs[:, 0] = np.maximum(bboxs[:, 0], 0)
bboxs[:, 1] = np.maximum(bboxs[:, 1], 0)
bboxs[:, 2] = np.minimum(bboxs[:, 2], w)
bboxs[:, 3] = np.minimum(bboxs[:, 3], h)
return bboxs.astype(int)
def nms(rect_list, score_list, cate_list, thresh=0.3):
"""
非最大抑制
:param rect_list: list,大小为[N, 4]
:param score_list: list,大小为[N]
:param cate_list: list, 大小为[N]
"""
nms_rects = list()
nms_scores = list()
nms_cates = list()
rect_array = np.array(rect_list)
score_array = np.array(score_list)
cate_array = np.array(cate_list)
# 一次排序后即可
# 按分类概率从大到小排序
idxs = np.argsort(score_array)[::-1]
rect_array = rect_array[idxs]
score_array = score_array[idxs]
cate_array = cate_array[idxs]
while len(score_array) > 0:
# 添加分类概率最大的边界框
nms_rects.append(rect_array[0])
nms_scores.append(score_array[0])
nms_cates.append(cate_array[0])
rect_array = rect_array[1:]
score_array = score_array[1:]
cate_array = cate_array[1:]
length = len(score_array)
if length <= 0:
break
# 计算IoU
iou_scores = iou(np.array(nms_rects[len(nms_rects) - 1]), rect_array)
# print(iou_scores)
# 去除重叠率大于等于thresh的边界框
idxs = np.where(iou_scores < thresh)[0]
rect_array = rect_array[idxs]
score_array = score_array[idxs]
cate_array = cate_array[idxs]
return nms_rects, nms_scores, nms_cates
| 2.359375
| 2
|
utils/derivative_calculator.py
|
nomagiclab/balancing-ball
| 4
|
12784376
|
from collections import deque
N = 3
# Calculates mean of last N derivatives.
class DerivativeCalculator:
def __init__(self):
# Value doesn't matter as we won't use it.
self.last_x = None
self.last_time = 0
self.dx_list = deque(iterable=[0 for _ in range(N + 1)], maxlen=N + 1)
# curr_dx is always equal to mean of _N last dxs.
self.curr_dx = 0
def update_derivative(self, new_x, new_time):
# Update dx list.
self.dx_list.pop()
if self.last_time == 0:
self.last_time = new_time
if self.last_time != new_time:
self.dx_list.appendleft((new_x - self.last_x) / (new_time - self.last_time))
else:
self.dx_list.appendleft(0)
# Calculate moving average of last _N dx.
self.curr_dx = self.curr_dx + (self.dx_list[0] - self.dx_list[N]) / N
self.last_x = new_x
self.last_time = new_time
def get_current_derivative(self) -> float:
return self.curr_dx
def get_and_update_derivative(self, new_x, new_time) -> float:
self.update_derivative(new_x, new_time)
return self.get_current_derivative()
| 3.75
| 4
|
pita/urls.py
|
Doktor/saltpita.com
| 0
|
12784377
|
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from pita import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL)
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
comic_patterns = [
path('<slug:slug>/<int:number>', views.view_comic, name='comic_page'),
path('<slug:slug>', views.view_comic, name='comic'),
path('', views.comic_index, name='comic_index'),
]
urlpatterns += [
path('', views.index, name='index'),
path('contact/', views.ContactView.as_view(), name='contact'),
path('comics/', include(comic_patterns)),
url(r'^(?P<slug>[a-z0-9]+(?:-[a-z0-9]+)*)$', views.page, name='page'),
]
| 1.9375
| 2
|
rsinc/rsinc.py
|
asch99/rsinc
| 48
|
12784378
|
# rsinc : two-way / bi-drectional sync for rclone
import argparse
import os
import subprocess
import logging
import re
from datetime import datetime
import ujson
import halo
from pyfiglet import Figlet
from .sync import sync, calc_states
from .rclone import make_dirs, lsl
from .packed import pack, merge, unpack, get_branch, empty
from .classes import Flat
from .colors import grn, ylw, red
from .config import config_cli
from .__init__ import __version__
SPIN = halo.Halo(spinner="dots", placement="right", color="yellow")
CONFIG_FILE = os.path.expanduser("~/.rsinc/config.json") # Default config path
custom_fig = Figlet(font="graffiti")
print(custom_fig.renderText("Rsinc"))
print("Copyright 2019 <NAME> (CHURCHILL COLLEGE)")
print("This is free software with ABSOLUTELY NO WARRANTY")
def qt(string):
return '"' + string + '"'
def read(file):
"""Reads json do dict and returns dict."""
try:
with open(file, "r") as fp:
d = ujson.load(fp)
if not isinstance(d, dict):
raise ValueError("old file format")
except Exception as e:
emsg = "{} is corrupt ({}). ".format(file, e)
if file.endswith("master.json"):
emsg += "Delete it and restart rsinc to rebuild it."
raise TypeError(emsg)
return d
def write(file, d):
"""Writes dict to json"""
with open(file, "w") as fp:
ujson.dump(d, fp, sort_keys=True, indent=2)
def strtobool(string):
return string.lower() in STB
def escape(string):
tmp = []
for char in string:
tmp.append(ESCAPE.get(char, char))
return "".join(tmp)
# ****************************************************************************
# * Set-up/Parse *
# ****************************************************************************
def formatter(prog):
return argparse.HelpFormatter(prog, max_help_position=52)
parser = argparse.ArgumentParser(formatter_class=formatter)
parser.add_argument("folders", help="Folders to sync", nargs="*")
parser.add_argument("-d", "--dry", action="store_true", help="Do a dry run")
parser.add_argument(
"-c", "--clean", action="store_true", help="Clean directories"
)
parser.add_argument(
"-D", "--default", help="Sync defaults", action="store_true"
)
parser.add_argument(
"-r", "--recovery", action="store_true", help="Enter recovery mode"
)
parser.add_argument(
"-a", "--auto", help="Don't ask permissions", action="store_true"
)
parser.add_argument(
"-p", "--purge", help="Reset history for all folders", action="store_true"
)
parser.add_argument(
"-i", "--ignore", help="Find .rignore files", action="store_true"
)
parser.add_argument(
"-v", "--version", action="version", version=f"rsinc version: {__version__}"
)
parser.add_argument(
"--config", action="store_true", help="Enter interactive CLI configurer"
)
parser.add_argument(
"--config_path", help="Path to config file (default ~/.rsinc/config.json)"
)
parser.add_argument(
"args",
nargs=argparse.REMAINDER,
help="Global flags to pass to rclone commands",
)
args = parser.parse_args()
# ****************************************************************************
# * Configuration *
# ****************************************************************************
# Read config and assign variables.
if args.config_path is None:
config_path = CONFIG_FILE
else:
config_path = args.config_path
if not os.path.isfile(config_path) or args.config:
config_cli(config_path)
config = read(config_path)
CASE_INSENSATIVE = config["CASE_INSENSATIVE"]
DEFAULT_DIRS = config["DEFAULT_DIRS"]
LOG_FOLDER = config["LOG_FOLDER"]
HASH_NAME = config["HASH_NAME"]
TEMP_FILE = config["TEMP_FILE"]
MASTER = config["MASTER"]
BASE_R = config["BASE_R"]
BASE_L = config["BASE_L"]
FAST_SAVE = config["FAST_SAVE"]
# Set up logging.
logging.basicConfig(
filename=LOG_FOLDER + datetime.now().strftime("%Y-%m-%d"),
level=logging.DEBUG,
datefmt="%H:%M:%S",
format="%(asctime)s %(levelname)s: %(message)s",
)
# ****************************************************************************
# * Main Program *
# ****************************************************************************
def main():
# Entry point for 'rsinc' as terminal command.
recover = args.recovery
dry_run = args.dry
auto = args.auto
# Decide which folder(s) to sync.
if args.default:
tmp = DEFAULT_DIRS
elif len(args.folders) == 0:
tmp = [os.getcwd()]
else:
tmp = []
for f in args.folders:
if os.path.isabs(f):
tmp.append(os.path.normpath(f))
else:
tmp.append(os.path.abspath(f))
folders = []
for f in tmp:
if BASE_L not in f:
print(ylw("Rejecting:"), f, "not in", BASE_L)
elif not os.path.isdir(f):
if strtobool(
input(
ylw("WARN: ")
+ f"{f} does not exist in local, sync anyway? "
)
):
folders.append(os.path.relpath(f, BASE_L))
else:
folders.append(os.path.relpath(f, BASE_L))
# Get & read master.
if args.purge or not os.path.exists(MASTER):
print(ylw("WARN:"), MASTER, "missing, this must be your first run")
write(MASTER, {'history':[], 'ignores':[], 'nest':empty()})
master = read(MASTER)
history = master['history']
ignores = master['ignores']
nest = master['nest']
history = set(history)
# Find all the ignore files in lcl and save them.
if args.ignore:
ignores = []
for dirpath, dirnames, filenames in os.walk(BASE_L, followlinks=False):
for name in filenames:
if name == '.rignore':
ignores.append(os.path.join(dirpath, name))
print("Found:", ignores)
write(MASTER, {'history':list(history), 'ignores':ignores, 'nest':nest})
# Detect crashes.
if os.path.exists(TEMP_FILE):
corrupt = read(TEMP_FILE)["folder"]
if corrupt in folders:
folders.remove(corrupt)
folders.insert(0, corrupt)
recover = True
print(red("ERROR") + ", detected a crash, recovering", corrupt)
logging.warning("Detected crash, recovering %s", corrupt)
# Main loop.
for folder in folders:
print("")
path_lcl = os.path.join(BASE_L, folder)
path_rmt = os.path.join(BASE_R, folder)
# Determine if first run.
if os.path.join(BASE_L, folder) in history:
print(grn("Have:"), qt(folder) + ", entering sync & merge mode")
else:
print(ylw("Don't have:"), qt(folder) + ", entering first_sync mode")
recover = True
# Build relative regular expressions
rmt_regexs, lcl_regexs, plain = build_regexs(
BASE_L, BASE_R, path_lcl, ignores
)
print("Ignore:", plain)
# Scan directories.
SPIN.start(("Crawling: ") + qt(folder))
lcl = lsl(path_lcl, HASH_NAME)
rmt = lsl(path_rmt, HASH_NAME)
old = Flat(path_lcl)
SPIN.stop_and_persist(symbol="✔")
lcl.tag_ignore(lcl_regexs)
rmt.tag_ignore(rmt_regexs)
# First run & recover mode.
if recover:
print("Running", ylw("recover/first_sync"), "mode")
else:
print("Reading last state")
branch = get_branch(nest, folder)
unpack(branch, old)
calc_states(old, lcl)
calc_states(old, rmt)
print(grn("Dry pass:"))
total, new_dirs, _, _ = sync(
lcl,
rmt,
old,
recover,
dry_run=True,
case=CASE_INSENSATIVE,
flags=args.args,
)
print("Found:", total, "job(s)")
print("With:", len(new_dirs), "folder(s) to make")
if not dry_run and (
auto or total == 0 or strtobool(input("Execute? "))
):
if total != 0 or recover:
print(grn("Live pass:"))
write(TEMP_FILE, {"folder": folder})
make_dirs(new_dirs)
_, _, lcl, rmt, = sync(
lcl,
rmt,
old,
recover,
total=total,
case=CASE_INSENSATIVE,
dry_run=dry_run,
flags=args.args,
)
SPIN.start(grn("Saving: ") + qt(folder))
# Get post sync state
if total == 0:
print("Skipping crawl as no jobs")
now = lcl
elif FAST_SAVE:
print("Skipping crawl as FAST_SAVE")
now = lcl
else:
now = lsl(path_lcl, HASH_NAME)
now.tag_ignore(lcl_regexs)
now.rm_ignore()
# Merge into history.
history.add(os.path.join(BASE_L, folder))
history.update(d for d in now.dirs)
# Merge into nest
merge(nest, folder, pack(now))
write(MASTER, {'history':list(history), 'ignores':ignores, 'nest':nest})
subprocess.run(["rm", TEMP_FILE])
SPIN.stop_and_persist(symbol="✔")
if args.clean:
SPIN.start(grn("Pruning: ") + qt(folder))
subprocess.run(["rclone", "rmdirs", path_rmt])
subprocess.run(["rclone", "rmdirs", path_lcl])
SPIN.stop_and_persist(symbol="✔")
recover = args.recovery
print("")
print(grn("All synced!"))
def build_regexs(BASE_L, BASE_R, path_lcl, files):
lcl_regex = []
rmt_regex = []
plain = []
for file in files:
for f_char, p_char in zip(os.path.dirname(file), path_lcl):
if f_char != p_char:
break
else:
if os.path.exists(file):
with open(file, "r") as fp:
for line in fp:
if line.rstrip() == "":
continue
mid = os.path.dirname(file)
mid = mid[len(BASE_L) + 1:]
mid = os.path.join(escape(mid), line.rstrip())
lcl = os.path.join(escape(BASE_L), mid)
rmt = os.path.join(escape(BASE_R), mid)
plain.append(mid)
lcl_regex.append(re.compile(lcl))
rmt_regex.append(re.compile(rmt))
return rmt_regex, lcl_regex, plain
STB = (
"yes",
"ye",
"y",
"1",
"t",
"true",
"",
"go",
"please",
"fire away",
"punch it",
"sure",
"ok",
"hell yes",
)
ESCAPE = {
"\\": "\\\\",
".": "\\.",
"^": "\\^",
"$": "\\$",
"*": "\\*",
"+": "\\+",
"?": "\\?",
"|": "\\|",
"(": "\\(",
")": "\\)",
"{": "\\{",
"}": "\\}",
"[": "\\[",
"]": "\\]",
}
| 2.234375
| 2
|
tests/configure_bst_feature_api_ct.py
|
r-cc-c/ops-broadview
| 0
|
12784379
|
<reponame>r-cc-c/ops-broadview<gh_stars>0
'''
*
* (C) Copyright Broadcom Corporation 2015
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
'''
#!/usr/bin/env python
import os
import sys
import ConfigParser
import json
import pprint
from bstUtil import *
from BstRestService import *
import bstRest as rest
class configure_bst_feature_api_ct(object):
def __init__(self,ip,port,params="",debug=False):
self.obj = BstRestService(ip,port)
self.debug = debug
self.params = params
def step1(self,jsonData):
"""Configure BST feature"""
try:
pprint.pprint(jsonData)
resp = self.obj.postResponse(jsonData)
if resp[0] == "INVALID":
return "FAIL","Connection refused/Invalid JSON request... Please check the ip address provided in 'ini' file/BroadViewAgent is running or not/JSON data is valid or not ..."
except Exception,e:
return "FAIL","Unable to perform the rest call with given JSON data, Occured Exception ... "+str(e)
try:
self.obj.debugJsonPrint(self.debug,jsonData,resp)
except:
return "FAIL","Invalid JSON Response data received"
self.nextStepCheckParams=jsonData
if returnStatus(resp[0], 200)[0] == "FAIL": return "FAIL","Obtained {0}".format(resp[0])
return returnStatus(resp[0], 200,"","Unable to get the 200 OK response, got reponse "+str(resp[0]))
def step2(self,jsonData):
"""Get BST Feature Status"""
try:
resp = self.obj.postResponse(jsonData)
if resp[0] == "INVALID":
return "FAIL","Connection refused/Invalid JSON request... Please check the ip address provided in 'ini' file/BroadViewAgent is running or not/JSON data is valid or not ..."
except Exception,e:
return "FAIL","Unable to perform the rest call with given JSON data, Occured Exception ... "+str(e)
try:
self.obj.debugJsonPrint(self.debug,jsonData,resp)
except:
return "FAIL","Invalid JSON Response data received"
if returnStatus(resp[0], 200)[0] == "FAIL": return "FAIL","Obtained {0}".format(resp[0])
if not resp[1]: return "FAIL","Got null response"
resp_ = resp[1].replace('Content-Type: text/json', '')
data_dict = json.loads(resp_)
if not "result" in data_dict: return "FAIL","No Result key in Response JSON Data"
resultDict = data_dict['result']
jsonDict = json.loads(self.nextStepCheckParams)
paramsDict = jsonDict['params']
valsCheck = True if cmp(resultDict, paramsDict) == 0 else False
added,removed,modified,same=dict_compare(resultDict,paramsDict)
if not added and not removed and not modified:
msg = ""
else:
params_list=paramsDict.keys()
diff_list=list(set(params_list) - same)
msg="params "+" ".join(diff_list)+" contains wrong values in response."
return returnStatus(valsCheck,True,"",msg)
step3, step4 = step1, step2
step5, step6 = step1, step2
step7, step8 = step1, step2
step9, step10 = step1, step2
step11, step12 = step1, step2
step13, step14 = step1, step2
step15, step16 = step1, step2
step17, step18 = step1, step2
step19, step20 = step1, step2
step21, step22 = step1, step2
def getSteps(self):
return sorted([ i for i in dir(self) if i.startswith('step') ], key=lambda item: int(item.replace('step','')))
def main(ip_address,port):
jsonText = ConfigParser.ConfigParser()
cwdir, f = os.path.split(__file__)
jsonText.read(cwdir + '/testCaseJsonStrings.ini')
json_dict = dict(jsonText.items('configure_bst_feature_api_ct'))
params=json_dict.get("paramslist","")
tcObj = configure_bst_feature_api_ct(ip_address,port,params,debug=True)
stepResultMap = {}
printStepHeader()
for step in tcObj.getSteps():
if step in json_dict:
resp=getattr(tcObj,step)(json_dict[step])
desc=getattr(tcObj,step).__doc__
stepResultMap[step] = resp
printStepResult(step,desc,resp[0], resp[1])
else:
resp=getattr(tcObj,step)()
desc=""
stepResultMap[step] = resp
printStepResult(step,desc,resp[0], resp[1])
if resp[0] == 'FAIL': break
printStepFooter()
statusMsgTuple = [ s for s in stepResultMap.values() if s[0] == "FAIL" ]
if statusMsgTuple:
return False, statusMsgTuple[0][1]
return True, "Test Case Passed"
if __name__ == '__main__':
main()
| 1.804688
| 2
|
orders/apps.py
|
Marlinekhavele/Ordering-pizza
| 0
|
12784380
|
<gh_stars>0
from django.apps import AppConfig
from django.contrib.auth.models import User
from accounts.models import Customer
from django.db.models.signals import post_save
from django.utils.translation import ugettext_lazy as _
from orders.signals import create_customer_order, save_customer_order
class OrdersConfig(AppConfig):
name = 'orders'
verbose_name = _('Order')
def ready(self):
post_save.connect(create_customer_order, sender=User)
post_save.connect(save_customer_order, sender=User)
| 2
| 2
|
updater_main.py
|
luyu103713/variantDB_updater
| 0
|
12784381
|
<filename>updater_main.py
import optparse
import pymysql
import os
from lib import readFile,featuresRelyOn
from features import feature_process,map_back_to_hg19
from cut_file import count_time
class updater_opt:
def __init__(self):
parser = optparse.OptionParser()
parser.add_option("-i", "--input", dest="input", help="input file(absolute or relative)")
parser.add_option("-o", "--output", dest="output", help="output file path(absolute or relative)")
parser.add_option("-n", "--filename", dest="filename", help="job name or id")
parser.add_option("-v", "--variant_type", dest="variant_type", help="optional: variant_type,1:hg19(default),2.Mutation CDS,3.Protein variant,4.Protein structure mutation,5.hg38")
parser.add_option("-c", "--feature_config", dest="feature_config", help="optional: Custom features calculation")
parser.add_option("-s", "--split_type", dest="split_type", help="optional:how to split input file.\n t:tab(default)\n c:comma")
parser.add_option("-t", "--title", dest="title", help="optional:Has title or not.\n 1:no(default)\n 2:yes")
self.options, self.args = parser.parse_args()
#print(self.options,self.args)
def verification(self):
if not self.options.input or not self.options.output or not self.options.filename:
exit('ERROR: must support input,output and file id parameters!\nType "python updater.py -h" to seek for help')
def check_rely(config_dict,rely_dict):
# check rely_on_dict itself, recursive algorithm to fix:
#print(config_dict)
rely_count = 1
while rely_count != 0:
rely_count = 0
for key in rely_dict:
for rely_key in rely_dict[key]:
if rely_key in rely_dict:
if len(list(set(rely_dict[key] + rely_dict[rely_key]))) != len(rely_dict[key]):
rely_count = len(list(set(rely_dict[key] + rely_dict[rely_key]))) - len(rely_dict[key])
rely_dict[key] = list(set(rely_dict[key] + rely_dict[rely_key]))
#match rely on #{'biodbnet': ['transvar'], 'transfic': ['annovar', 'biodbnet', 'transvar'], 'oncokb': ['transvar']}
for k in rely_dict:
if k in config_dict:
if config_dict[k]:
for rely_key in rely_dict[k]:
config_dict[rely_key] = True
return config_dict
def match_config(config_dict): # Logic of calculation order!!
normal_methods = ['transvar','annovar','biodbnet','transfic','oncokb','candra','fathmm_cancer','to_pdb_structure']
rely_dict = featuresRelyOn.relyOnDict
del_list = []
for k in config_dict: #first delete key not right in config
if k not in normal_methods:
del_list.append(k)
for k in del_list:
config_dict.pop(k)
for k in config_dict: #2nd , if not False,we do it
if config_dict[k] != 'False':
config_dict[k] = True
else:
config_dict[k] = False
for k in normal_methods: #3rd , the methods not in config, we do it
if k not in config_dict:
config_dict[k] = True
config_dict = check_rely(config_dict,rely_dict)
return config_dict
def index_input(input_file,split_symbol,hasTitle,output_path,jobid):
f = open(input_file,'r')
ls = f.readlines()
fw = open(output_path+ '/' + jobid + '/' + jobid+'_index.input','w')
if hasTitle:
col = ls[0]
nl = 'index' + split_symbol + col
fw.write(nl)
ls = ls[1:]
index = 0
for l in ls:
nl = str(index) + split_symbol + l
fw.write(nl)
index += 1
f.close()
fw.close()
@count_time
def main():
main_opt = updater_opt()
main_opt.verification()
input_file = main_opt.options.input
output_path = main_opt.options.output
jobid = main_opt.options.filename
if not os.path.exists(output_path):
os.makedirs(output_path)
if not os.path.exists(output_path+ '/' + jobid):
os.makedirs(output_path + '/' + jobid)
if main_opt.options.split_type == 'c':
split_symbol = ','
else:
split_symbol = '\t'
if main_opt.options.title == '2':
hasTitle = True
else:
hasTitle = False
if main_opt.options.variant_type == '4':
variant_type = 'protein'
elif main_opt.options.variant_type == '2':
variant_type ='cds'
elif main_opt.options.variant_type == '3':
variant_type = 'aa'
elif main_opt.options.variant_type == '5':
variant_type = 'hg38'
else:
variant_type = 'hg19'
#print(main_opt.options.feature_config)
if main_opt.options.feature_config:
feature_config = main_opt.options.feature_config
f_config = open(feature_config,'r')
ls = f_config.readlines()
config_dict = {}
for l in ls:
l = l.strip()
temp = l.split(':')
config_dict[temp[0]] = temp[1]
config_dict = match_config(config_dict)
else:
config_dict = None
#print(feature_config)
#print('\n'.join([''.join([('VariantDB'[(x-y) % len('VariantDB')] if ((x*0.05)**2+(y*0.1)**2-1)**3-(x*0.05)**2*(y*0.1)**3 <= 0else' ') for x in range(-30, 30)]) for y in range(30, -30, -1)]))
#print('\n')
print('Begin work : ')
if variant_type != 'protein': # begin from hg19 or pdb_structures /protein module is a independent part
real_input_file = map_back_to_hg19(input_file,split_symbol,variant_type,hasTitle,output_path,jobid)
split_symbol = "\t" # use out.tsv for input
index_input(input_file,split_symbol,hasTitle,output_path,jobid)
error_code,error_massage,var_list,file_base_list= readFile.readFileFromInput(real_input_file,'variant',split_symbol,hasTitle) # now only read variant
if error_code:
exit(error_massage)
#print(var_list)
print('Variant number : ' + str(len(var_list)))
print(config_dict)
feature_process(var_list,output_path,jobid,config_dict)
#print()
#result_dict = collect_result(output_path,jobid)
if __name__ == '__main__':
main()
| 2.25
| 2
|
sorting_algorithms/quicksort_Lomuto.py
|
Pinkwjp/Hello-Algorithms
| 0
|
12784382
|
<reponame>Pinkwjp/Hello-Algorithms
"""quicksort with Lomuto partition scheme"""
from random import randint
from typing import List
def partition(A: List[int], low, high) -> int:
"""divide the list of numbers into two partitions
return the index of the pivot number
choosing the rightmost element in the range as pivot
(Lomuto partition scheme)
"""
pivot = A[high]
i = low - 1
for j in range(low, high):
if A[j] <= pivot:
i += 1
A[i], A[j] = A[j], A[i]
A[i+1], A[high] = A[high], A[i+1]
return i+1
def quicksort(A: List[int], low, high) -> None:
"""sort a list of numbers"""
if low < high:
p = partition(A, low, high)
quicksort(A, low, p-1)
quicksort(A, p+1, high)
def test():
for _ in range(1000):
n = randint(2, 40)
numbers = [randint(0, 1000) for _ in range(n)]
result = sorted(list(numbers))
quicksort(numbers, 0, len(numbers)-1)
assert numbers == result
if __name__ == "__main__":
test()
| 4.15625
| 4
|
preprocessing/image.py
|
ahmedbally/Show-And-Tell-Keras
| 0
|
12784383
|
'''
Module to preprocess filckr8k image data
'''
import cv2
import numpy as np
import os
from _pickle import dump, load
from keras.applications.inception_v3 import InceptionV3
from keras.layers import Flatten
from keras.models import load_model
from keras.preprocessing import image
from keras.applications.inception_v3 import preprocess_input
from keras.models import Model
from PIL import Image
def load_images_as_arrays(directory):
img_array_dict = {}
for img_file in os.listdir(directory):
img_path = directory + '/' + img_file
img = Image.open(img_path)
x = np.array(img)
img_array_dict[os.path.splitext(img_file)[0]] = x
return img_array_dict
def extract_features(directory):
# base_model = InceptionV3(weights='imagenet')
# model = Model(inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output)
#model = load_model('./preprocessing/CNN_encoder_100epoch.h5')
#top = Flatten()(model.output)
#model = Model(inputs=model.input, outputs=top)
#print(model.summary())
img_id = []
img_matrices = []
i = 0
for img_file in os.listdir(directory):
print(i, ":", i > 1999 and i < 8000 or i > 8999)
'''if (i > 1999 and i < 8000 or i > 8999):
i += 1
continue'''
img_path = directory + '/' + img_file
resizeDim = (256, 512)
img = cv2.imread(img_path)
img = cv2.resize(img, resizeDim, interpolation=cv2.INTER_AREA)
img = img.astype('float16') / 255
#x = img.reshape(img.shape + (1,))
img_id.append(os.path.splitext(img_file)[0])
img_matrices.append(img)
i += 1
img_matrices = np.array(img_matrices)
#img_features = model.predict(img_matrices, verbose=1)
return {'ids': img_id, 'features': img_matrices}
def extract_feature_from_image(file_dir):
img = image.load_img(file_dir, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# base_model = InceptionV3(weights='imagenet')
# model = Model(inputs=base_model.input, outputs=base_model.get_layer('avg_pool').output)
model = load_model('CNN_encoder_100epoch.h5')
return model.predict(x)
def load_features(dict_dir, dataset_dir, repeat_times=1):
assert (repeat_times >= 1)
img_ids = []
with open(dataset_dir, 'r') as f:
for line in f.readlines():
img_ids.append(os.path.splitext(line)[0])
features_dict = load(open(dict_dir, 'rb'))
#features_dict = extract_features('./datasets/Flickr8k_Dataset')
dataset_features = []
for img_id in img_ids:
fidx = features_dict['ids'].index(img_id)
dataset_features.append(np.vstack([features_dict['features'][fidx, :]] * repeat_times))
#dataset_features = np.vstack(dataset_features)
return np.array(dataset_features)
if __name__ == "__main__":
# pre-extract image features from Inception Net
image_directory = './datasets/Flickr8k_Dataset'
features_dict = extract_features(image_directory)
dump(features_dict, open('./datasets/features_dict2.pkl', 'wb'),protocol=4)
| 2.53125
| 3
|
voicesBack/api/FileConverter.py
|
wdariasm/voices
| 0
|
12784384
|
import subprocess
import psycopg2
import multiprocessing
import threading
import os
import pathlib
import sendgrid
import datetime
import shutil
import boto3
from botocore.exceptions import ClientError
from boto3.dynamodb.conditions import Key, Attr
from sendgrid.helpers.mail import *
MAX_WORKERS = 1
PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
VOICES_ROOT = os.path.join(PROJECT_ROOT, 'static')
dynamodb = boto3.resource('dynamodb')
def ConvertAudio(fileImput):
#validar fileImput
inFile = GetOrginalPath(fileImput[1])
outputFileName = VOICES_ROOT + '/voices/processed/' + GetFileName(inFile) + '.mp3'
if IsMp3(fileImput[1]):
print("--mp3Detected--")
shutil.copy(inFile, outputFileName)
PostProcessFile(fileImput[0], outputFileName, fileImput[2], fileImput[3])
return True
else:
print("--NonMp3Detected--")
result = subprocess.run(['ffmpeg', '-i', inFile, '-acodec', 'libmp3lame', outputFileName])
#command = "ffmpeg -i {0} -acodec libmp3lame {1}".format(fileImput, outputFileName)
#result = os.system(command)
if result.returncode is 0:
PostProcessFile(fileImput[0], outputFileName, fileImput[2], fileImput[3])
return True
else:
return False
def PostProcessFile(fileId, outFile, mail, url):
UpdateProcessedFile(fileId, outFile)
#SendEmailSendgrid(mail, url)
SendEmailSeS(mail, url)
def GetOrginalPath(relativepath):
file = pathlib.PurePath(relativepath).name
return VOICES_ROOT + '/voices/original/' + file
def IsMp3(filePath):
file_extension = pathlib.PurePath(filePath).suffix
if file_extension == '.mp3':
return True
else:
return False
def GetFileName(filePath):
return pathlib.PurePath(filePath).stem
def UpdateProcessedFile(fileId, filePath):
print("updatefile--" + str(fileId))
conn = None
with threading.Lock():
try:
outputFileDB = 'voices/processed/' + GetFileName(filePath) + '.mp3'
table = dynamodb.Table('grabacion')
table.update_item(
Key={
'Archivo_Original': fileId
},
UpdateExpression='SET Estado_Archivo=:val1, Archivo_Final=:val2',
ExpressionAttributeValues={
':val1': 1,
':val2': outputFileDB
}
)
except(Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def SendEmailSendgrid(mail, url):
print("usendmail--" + mail)
sg = sendgrid.SendGridAPIClient(
apikey=os.environ.get('SENDGRID_API_KEY')
)
from_email = Email("<EMAIL>")
to_email = Email(mail)
subject = "La voz ya esta disponible"
WS_IP = os.environ.get('IP_HOST')
content = Content(
"text/plain", "La voz ya se encuentra disponible en la página principal del concurso " +
WS_IP + "/concursar/" + url
)
mail = Mail(from_email, subject, to_email, content)
response = sg.client.mail.send.post(request_body=mail.get())
def SendEmailSeS(mail, url):
print("usendmai2--" + mail)
client = boto3.client('ses')
WS_IP = os.environ.get('IP_HOST') + '/concursar/' + url
mensaje = '<html><head></head><body><p>La voz ya se encuentra disponible en la página principal del ' +\
'concurso, visite</p> <a href="' + WS_IP + '">Supervoices</a> ' +\
'<p>para mas informacion</p></body></html>'
# Try to send the email.
try:
# Provide the contents of the email.
response = client.send_email(
Destination={
'ToAddresses': [
mail,
],
},
Message={
'Body': {
'Html': {
'Charset': 'UTF8',
'Data': mensaje,
},
},
'Subject': {
'Charset': 'UTF8',
'Data': 'La voz ya esta disponible',
},
},
Source='<EMAIL>',
#ConfigurationSetName='UTF8',
)
# Display an error if something goes wrong.
except ClientError as e:
print("catchSeS" + mail)
print(e.response['Error']['Message'])
else:
print("Email sent! Message ID:"),
print(response['MessageId'])
def GetPendingFiles():
conn = None
files = None
try:
table = dynamodb.Table('grabacion')
response = table.query(
KeyConditionExpression=Key('Estado_Archivo').eq(0)
)
items = response['Items']
#cur.execute("""SELECT gr.id, gr."Archivo_Original", gr."Mail_Autor", co."Url" FROM api_grabacion gr, api_concurso co WHERE gr."Estado_Archivo" = 0 and gr."Concurso_id" = co.id""")
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
return items
def UpdateLogTable(startTime, endTime, totalFiles):
conn = None
try:
table = dynamodb.Table('grabacion')
table.put_item(
Item={
'startDate': startTime,
'endDate': endTime,
'totalFiles': totalFiles
}
)
except(Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
def StartJob():
startTime = datetime.datetime.now(datetime.timezone.utc)
pendingFiles = GetPendingFiles()
if pendingFiles is not None:
with multiprocessing.Pool(MAX_WORKERS) as pool:
results = pool.imap(ConvertAudio, pendingFiles)
totalFiles = sum(results)
endTime = datetime.datetime.now(datetime.timezone.utc)
UpdateLogTable(startTime, endTime, totalFiles)
else:
UpdateLogTable(startTime, startTime, 0)
print("No files to Tansform")
if __name__ == '__main__':
StartJob()
| 1.929688
| 2
|
backend/src/features/friend/mappers.py
|
ihsaro/socioworld
| 0
|
12784385
|
from features.friend.entities import Friendship
from features.friend.models import FriendshipOutput
def map_friendship_to_friendship_output(*, friendship: Friendship) -> FriendshipOutput:
return FriendshipOutput(
client_id=friendship.client_id,
friend_id=friendship.friend_id,
requested=friendship.requested,
approved=friendship.approved
)
| 2.421875
| 2
|
mnist_py/models/get_layers_shapes.py
|
vijayagril/mnist_cnn_cuda
| 0
|
12784386
|
from train import models_initializers
model_name = 'basic_nn'
model = models_initializers.get(model_name)()
for l in model.layers:
print(f'{l.name} -- {l.input_shape} -- {l.output_shape}')
| 3.03125
| 3
|
caldera/utils/mp/_decorators.py
|
jvrana/caldera
| 2
|
12784387
|
<filename>caldera/utils/mp/_decorators.py
import inspect
from functools import wraps
from multiprocessing import Pool
from typing import Callable
from typing import List
from typing import Optional
from ._mp_tools import _resolved_sorted_handler
from ._mp_tools import _S
from ._mp_tools import _T
from ._mp_tools import argmap
from ._mp_tools import run_with_pool
from ._mp_tools import valid_varname
# class MultiProcess(object):
#
# def __init__(self, on: str, auto_annotate: bool = True, attach: bool = True, attach_as="pooled"):
# if not isinstance(attach_as, str):
# raise TypeError("attach_as must be a str, not a {}".format(attach_as.__class__))
# if not valid_varname(attach_as):
# raise ValueError("attach_as '{}' is not a valid variable name".format(attach_as))
#
#
# self.on = on
# self.auto_annotate = auto_annotate
# self.attach = attach
# self.attach_as = attach_as
# self.validate()
#
# def validate(self):
# self.validate_on()
# self.validate_attach_as()
#
# def validate_attach_as(self):
# if not isinstance(self.attach_as, str):
# raise TypeError("attach_as must be a str, not a {}".format(self.attach_as.__class__))
# if not valid_varname(self.attach_as):
# raise ValueError("attach_as '{}' is not a valid variable name".format(self.attach_as))
#
# def validate_on(self):
# if not isinstance(self.on, str):
# raise TypeError('argument must be a str, found a {} {}'.format(self.on, self.on.__class__))
# elif not valid_varname(self.on):
# raise ValueError("argument '{}' is not a valid variable name".format(self.on))
#
# def validate_func(self, f):
# argspec = inspect.getfullargspec(f)
# if not self.on in argspec.args:
# raise ValueError("argument '{}' not in signature. Use any of {}".format(self.on, argspec.args))
#
# @staticmethod
# def starstar(f):
# def wrapped(kwargs):
# return f(**kwargs)
# return wrapped
#
# def pooled_inner(self, f, pool_kwargs, override_kwargs_key: str):
# @wraps(f)
# def _pooled_inner(*args, **kwargs) -> List[_S]:
# inner_kwargs = dict(pool_kwargs)
# if override_kwargs_key is not None:
# if override_kwargs_key in kwargs:
# pooled_opts = kwargs[override_kwargs_key]
# inner_kwargs.update(pooled_opts)
# inner_kwargs = dict(pool_kwargs)
# n_cpus = inner_kwargs['n_cpus']
# chunksize = inner_kwargs['chunksize']
# callback = inner_kwargs['callback']
# error_callback = inner_kwargs['error_callback']
#
# amap = argmap(f, args, kwargs)
# pooled_kwargs = []
# if not hasattr(amap[self.on], '__iter__'):
# raise ValueError('argument "{}" is not iterable for pooled function'.format(on))
# for v in amap[self.on]:
# _kwargs = dict(amap)
# _kwargs[self.on] = v
# pooled_kwargs.append(_kwargs)
#
# with Pool(n_cpus) as pool:
# run_with_pool(pool,
# f,
# wrapper=self.starstar,
# chunksize=chunksize,
# args=pooled_kwargs,
# callback=callback,
# error_callback=error_callback,
# resolved_handler=_resolved_sorted_handler
# )
# return _pooled_inner
#
# # @staticmethod
# # def override(f, outer_kwargs: Dict[str, Any], override_kwargs: str):
# # @wraps(f)
# # def wrapped(*args, **kwargs):
# # inner_kwargs = dict(outer_kwargs)
# # if override_kwargs is not None:
# # if override_kwargs in kwargs:
# # pooled_opts = kwargs[override_kwargs]
# # inner_kwargs.update(pooled_opts)
# # f(*args, **inner_kwargs)
# # return wrapped
#
# def do_auto_annotate(self, func, type = List):
# if self.auto_annotate:
# func.__annotations__ = dict(func.__annotations__)
# if self.on in func.__annotations__:
# func.__annotations__[self.on] = type[func.__annotations__[self.on]]
# else:
# func.__annotations__[self.on] = list
#
# def bind(self, func, pooled_outer):
# if '__attach_as' is None or self.attach is False:
# wrapped = pooled_outer
# elif self.attach is True:
# @wraps(func)
# def wrapped(*args, **kwargs) -> _S:
# return func(*args, **kwargs)
# wrapped.__dict__[self.attach_as] = pooled_outer
# return wrapped
#
# def override(self, f, okwargs, key):
# def wrapped(*args, **kwargs):
# def _wrapped(*args, **kwargs):
# pass
# return _wrapped
# return wrapped
#
# def __call__(self, f):
# self.validate_func(f)
#
# def pooled_outer(n_cpus: Optional[int] = None, chunksize: int = 1, callback=None, error_callback=None,
# override_kwargs='pooled_opts'):
# pool_kwargs = {
# 'n_cpus': n_cpus,
# 'chunksize': chunksize,
# 'callback': callback,
# 'error_callback': error_callback
# }
# pooled_inner = self.pooled_inner(f, pool_kwargs, override_kwargs)
# if self.auto_annotate:
# self.do_auto_annotate(pooled_inner)
# return pooled_inner
#
# return self.bind(f, pooled_outer)
def _auto_annotate_on(on: str, func, type=List):
func.__annotations__ = dict(func.__annotations__)
if on in func.__annotations__:
func.__annotations__[on] = type[func.__annotations__[on]]
else:
func.__annotations__[on] = list
def multiprocess(
on: str, auto_annotate: bool = True, attach: bool = True, attach_as="pooled"
):
"""A decorator that attaches a pooled version of the function.
.. code-block::
import time
@mp('y')
def foo(x, y, z):
time.sleep(x/10.)
return x + y + z
# standard call signature
foo(1, 3, 2)
# pooled call signature
foo.pooled(1, range(20), 2)
:param on: Variable to pool. The pooled version will require an iterable to be
passed at the positional argument or key word argument indicated.
:param auto_annotate: whether to dynamically change the annotation of the pooled function.
This likely will not appear in the IDE.
:param attach: if False, return only the pooled version of the function
:param attach_as: key to attach to the original function
:return:
"""
if not isinstance(attach_as, str):
raise TypeError("attach_as must be a str, not a {}".format(attach_as.__class__))
if not valid_varname(attach_as):
raise ValueError(
"attach_as '{}' is not a valid variable name".format(attach_as)
)
def _mp(
f: Callable[[_T], _S]
) -> Callable[[Callable[[_T], _S]], Callable[[_T], _S]]:
if not isinstance(on, str):
raise TypeError(
"argument must be a str, found a {} {}".format(on, on.__class__)
)
elif not valid_varname(on):
raise ValueError("argument '{}' is not a valid variable name".format(on))
argspec = inspect.getfullargspec(f)
if on not in argspec.args:
raise ValueError(
"argument '{}' not in signature. Use any of {}".format(on, argspec.args)
)
def pooled_outer(
n_cpus: Optional[int] = None,
chunksize: int = 1,
callback=None,
error_callback=None,
override_kwargs="pooled_opts",
):
outer_kwargs = {
"n_cpus": n_cpus,
"chunksize": chunksize,
"callback": callback,
"error_callback": error_callback,
}
@wraps(f)
def pooled_inner(*args, **kwargs) -> List[_S]:
# overrides
inner_kwargs = dict(outer_kwargs)
if override_kwargs is not None:
if override_kwargs in kwargs:
pooled_opts = kwargs[override_kwargs]
inner_kwargs.update(pooled_opts)
inner_kwargs = dict(outer_kwargs)
n_cpus = inner_kwargs["n_cpus"]
chunksize = inner_kwargs["chunksize"]
callback = inner_kwargs["callback"]
error_callback = inner_kwargs["error_callback"]
amap = argmap(f, args, kwargs)
pooled_kwargs = []
if not hasattr(amap[on], "__iter__"):
raise ValueError(
'argument "{}" is not iterable for pooled function'.format(on)
)
for v in amap[on]:
_kwargs = dict(amap)
_kwargs[on] = v
pooled_kwargs.append(_kwargs)
def starstar(f):
def wrapped(kwargs):
return f(**kwargs)
return wrapped
with Pool(n_cpus) as pool:
run_with_pool(
pool,
f,
wrapper=starstar,
chunksize=chunksize,
args=pooled_kwargs,
callback=callback,
error_callback=error_callback,
resolved_handler=_resolved_sorted_handler,
)
if auto_annotate:
_auto_annotate_on(on, pooled_inner)
return pooled_inner
if attach_as is None or attach is False:
wrapped = pooled_outer()
elif attach is True:
@wraps(f)
def wrapped(*args, **kwargs) -> _S:
return f(*args, **kwargs)
wrapped.__dict__[attach_as] = pooled_outer
return wrapped
return _mp
| 2.234375
| 2
|
project.py
|
anton-kachurin/item-catalog
| 3
|
12784388
|
<reponame>anton-kachurin/item-catalog<gh_stars>1-10
from flask import Flask, render_template, request, redirect, url_for
from flask import jsonify, session, make_response, g
import os, random, string, json, httplib2, requests
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
from db_scheme import Category, Item, User
from db_scheme import NotAuthorized, NotAuthenticated, NotFound
# constants for Google Plus oAuth2
G_SECRETS_FILE = 'g_client_secrets.json'
g_client_secrets = json.loads(open(G_SECRETS_FILE, 'r').read())
G_CLIENT_ID = g_client_secrets['web']['client_id']
REDIRECT_URI = 'postmessage'
# constansts for Facebook aAuth2
FB_SECRETS_FILE = 'fb_client_secrets.json'
fb_client_secrets = json.loads(open(FB_SECRETS_FILE, 'r').read())
FB_CLIENT_ID = fb_client_secrets['web']['app_id']
app = Flask(__name__)
def json_result(message, code=401):
""" Generate JSON response with given message and HTTP code """
response = make_response(json.dumps(message), code)
response.headers['Content-Type'] = 'application/json'
return response
def json_not_found():
return json_result('no results found', 404)
def field_list():
""" List of required fields of an article """
return [
{'name': 'title', 'label': 'Title'},
{'name': 'author', 'label': 'Author'},
{'name': 'source', 'label': 'Source URL'},
{'name': 'image', 'label': 'Illustration URL'},
{'name': 'text', 'label': 'Content', 'textarea': 1}
]
def extend_fields_with_value(fields, title, author, source, image, text):
for field in fields:
if field['name'] == 'title':
field['value'] = title
if field['name'] == 'author':
field['value'] = author
if field['name'] == 'source':
field['value'] = source
if field['name'] == 'image':
field['value'] = image
if field['name'] == 'text':
field['value'] = text
def is_url(url):
""" Check if given string is a valid URL """
url = url.lower()
return url.startswith('http://') or url.startswith('https://')
def check_request_fields(fields):
""" Get parameters from `request` object and check it's validity;
return error message if it's invalid;
otherwise, extend `fields` object with parameter values and return `None`.
"""
title = request.form.get('title')
author = request.form.get('author')
source = request.form.get('source')
image = request.form.get('image')
text = request.form.get('text')
extend_fields_with_value(fields=fields, title=title, author=author,
source=source, image=image, text=text)
error = ''
if not title or not author or not text or not source or not image:
error = 'All fields are required'
if not is_url(image):
error = 'Please provide a valid image URL'
if not is_url(source):
error = 'Please provide a valid link to the original article'
if error:
return error
else:
return None
@app.before_request
def before_request():
""" Set g.current_user property before any view function will run """
if 'email' in session:
# user is logged in, use its email to get user from db
# User.create will make sure not to create duplicate entry in db
g.current_user = User.create(username=session.get('username'),
email=session.get('email'),
picture=session.get('picture'))
else:
g.current_user = None
@app.route('/login')
def show_login():
state = ''.join(random.choice(string.ascii_uppercase + string.digits) for x in xrange(32))
session['state'] = state
return render_template('login.html',
state_str=state,
g_client_id=G_CLIENT_ID,
fb_client_id=FB_CLIENT_ID,
redirect_uri = REDIRECT_URI)
@app.route('/gconnect', methods=["POST"])
def gconnect():
if request.args.get('state') != session.get('state'):
return json_result('Invalid state parameter')
code = request.data
try:
oauth_flow = flow_from_clientsecrets(G_SECRETS_FILE, scope='')
oauth_flow.redirect_uri = REDIRECT_URI
credentials = oauth_flow.step2_exchange(code)
except FlowExchangeError:
return json_result('Failed to upgrade the authorization code')
access_token = credentials.access_token
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'
% access_token)
h = httplib2.Http()
result = json.loads(h.request(url, 'GET')[1])
access_token_error = result.get('error')
if access_token_error is not None:
return json_result(access_token_error, 500)
gplus_id = credentials.id_token['sub']
if result['user_id'] != gplus_id:
return json_result("Token's user ID doesn't match given user ID")
if result['issued_to'] != G_CLIENT_ID:
return json_result("Token's client ID doesn't match given client ID")
stored_access_token = session.get('access_token')
stored_gplus_id = session.get('gplus_id')
if gplus_id == stored_gplus_id and stored_access_token is not None:
session['access_token'] = access_token
return json_result('User is already connected', 200)
session['provider'] = 'google'
session['access_token'] = access_token
session['gplus_id'] = gplus_id
# Get user info
userinfo_url = "https://www.googleapis.com/oauth2/v1/userinfo"
params = {'access_token': access_token, 'alt': 'json'}
answer = requests.get(userinfo_url, params=params)
data = answer.json()
session['username'] = data['name']
session['picture'] = data['picture']
session['email'] = data['email']
return 'welcome, ' + session.get('username')
def gdisconnect():
# only disconnect a connected user
access_token = session.get('access_token')
if access_token is None:
return json_result('Current user is not connected')
url = ('https://accounts.google.com/o/oauth2/revoke?token=%s'
% access_token)
h = httplib2.Http()
result = h.request(url, 'GET')[0]
if(result['status'] == '200'):
del session['username']
del session['picture']
del session['email']
del session['access_token']
del session['gplus_id']
del session['provider']
return json_result('Successfully disconnected', 200)
else:
return json_result('Failed to revoke token for given user', 400)
@app.route('/fbconnect', methods=['POST'])
def fbconnect():
if request.args.get('state') != session.get('state'):
return json_result('Invalid state parameter')
access_token = request.data
app_secret = fb_client_secrets['web']['app_secret']
url = 'https://graph.facebook.com/oauth/access_token?grant_type=fb_exchange_token&client_id=%s&client_secret=%s&fb_exchange_token=%s' % (
FB_CLIENT_ID, app_secret, access_token)
h = httplib2.Http()
result = h.request(url, 'GET')[1]
userinfo_url = "https://graph.facebook.com/v2.4/me"
# strip expire tag from access token
token = result.split("&")[0]
url = 'https://graph.facebook.com/v2.4/me?%s&fields=name,id,email' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
session['provider'] = 'facebook'
session['username'] = data['name']
session['email'] = data['email']
session['fb_id'] = data['id']
# Strip out the information before the equals sign in our token
stored_token = token.split("=")[1]
session['access_token'] = stored_token
# Get user picture
url = 'https://graph.facebook.com/v2.4/me/picture?%s&redirect=0&height=200&width=200' % token
h = httplib2.Http()
result = h.request(url, 'GET')[1]
data = json.loads(result)
session['picture'] = data["data"]["url"]
return 'welcome, ' + session.get('username')
def fbdisconnect():
# Only disconnect a connected user.
fb_id = session.get('fb_id')
access_token = session.get('access_token')
if fb_id is None:
return json_result('Current user is not connected')
url = 'https://graph.facebook.com/%s/permissions?access_token=%s' % (fb_id, access_token)
h = httplib2.Http()
result = h.request(url, 'DELETE')[1]
data = json.loads(result)
if 'success' in data and data['success'] == True:
del session['username']
del session['picture']
del session['email']
del session['access_token']
del session['fb_id']
del session['provider']
return json_result('Successfully disconnected', 200)
else:
return json_result('Failed to revoke token for given user', 400)
@app.route('/force_logout', methods=["POST"])
def force_logout():
""" Make server to clean session data for current user when
regular disconnect fails. This will allow to get new oAuth credentials
later, i.e to relogin
"""
del session['username']
del session['picture']
del session['email']
del session['access_token']
del session['provider']
if 'fb_id' in session:
del session['fb_id']
if 'gplus_id' in session:
del session['gplus_id']
return json_result('Forced to disconnect', 200)
@app.route('/logout', methods=["POST"])
def disconnect():
""" Recognize what authorization option is currently being used,
and try to revoke authorization via corresponding provider
"""
if 'provider' in session:
provider = session.get('provider')
if provider == 'facebook':
return fbdisconnect()
elif provider == 'google':
return gdisconnect()
else:
return json_result('Internal error', 500)
else:
return json_result('Current user is not connected')
@app.route('/')
def redirect_to_main():
return redirect(url_for('show_catalog'))
@app.route('/catalog')
def show_catalog():
categories = Category.get_all()
return render_template('catalog.html', categories=categories)
@app.route('/catalog/<string:category_path>')
def show_category(category_path):
category = Category.get_one(category_path)
items = Item.get_all(category)
return render_template('items.html', category=category, items=items)
@app.route('/catalog/<string:category_path>/<string:item_label>')
def show_article(category_path, item_label):
category = Category.get_one(category_path)
item = Item.get_one(category, item_label)
return render_template('article.html', category=category, item=item)
@app.route('/catalog/<string:category_path>/add',
methods=['GET', 'POST'])
def add_item(category_path):
category = Category.get_one(category_path)
fields = field_list()
if request.method == 'GET':
return render_template('add.html', fields=fields, category=category)
else:
error = check_request_fields(fields)
if error:
return render_template('add.html', fields=fields,
category=category,
error=error)
else:
obj = {}
for field in fields:
obj[field['name']] = field['value']
error = Item.add(g.current_user, category, Item(**obj))
if error:
return render_template('add.html', fields=fields,
category=category,
error=error)
else:
return redirect(url_for('show_category',
category_path=category.path))
@app.route('/catalog/<string:category_path>/<string:item_label>/edit',
methods=['GET', 'POST'])
def edit_item(category_path, item_label):
category = Category.get_one(category_path)
item = Item.get_one(category, item_label)
fields = field_list()
if request.method == 'GET':
title = item.title
author = item.author
source = item.source
image = item.image
text = item.text
extend_fields_with_value(fields=fields, title=title, author=author,
source=source, image=image, text=text)
return render_template('add.html', fields=fields, category=category)
else:
error = check_request_fields(fields)
if error:
return render_template('add.html', fields=fields,
category=category,
error=error)
else:
obj = {}
for field in fields:
obj[field['name']] = field['value']
error = item.edit(g.current_user, obj)
if error:
return render_template('add.html', fields=fields,
category=category,
error=error)
else:
return redirect(url_for('show_category',
category_path=category.path))
@app.route('/catalog/<string:category_path>/<string:item_label>/delete',
methods=['POST'])
def delete_item(category_path, item_label):
category = Category.get_one(category_path)
item = Item.get_one(category, item_label)
item.delete(g.current_user)
return json_result('deleted successfully', 200)
# JSON endpoints
@app.route('/JSON/catalog')
def all_categories_JSON():
categories = Category.get_all()
return jsonify(categoryList=[category.serialized
for category in categories])
@app.route('/JSON/catalog/<string:category_path>')
def items_of_category_JSON(category_path):
try:
category = Category.get_one(category_path)
items = Item.get_all(category)
return jsonify(itemList=[item.serialized for item in items])
except NotFound:
return json_not_found()
@app.route('/JSON/catalog/<string:category_path>/<string:item_label>')
def item_JSON(category_path, item_label):
try:
category = Category.get_one(category_path)
item = Item.get_one(category, item_label)
return jsonify(item.serialized)
except NotFound:
return json_not_found()
@app.errorhandler(NotFound)
def not_found(e):
error = "404. Nothing is found for this URL"
return render_template('403-404.html', error=error), 404
@app.errorhandler(NotAuthorized)
def not_found(e):
error = "403. You can't perform this action"
return render_template('403-404.html', error=error), 403
@app.errorhandler(NotAuthenticated)
def not_found(e):
return render_template('401.html'), 401
APP_CONFIG_FILE = 'config' #.py
if os.path.isfile(APP_CONFIG_FILE + '.py'):
app.config.from_object(APP_CONFIG_FILE)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
| 2.546875
| 3
|
python/dynamic_graph/sot/torque_control/main.py
|
jviereck/sot-torque-control
| 0
|
12784389
|
<reponame>jviereck/sot-torque-control
# -*- coding: utf-8 -*-1
"""
2014, LAAS/CNRS
@author: <NAME>
"""
from dynamic_graph import plug
from dynamic_graph.sot.torque_control.se3_trajectory_generator import SE3TrajectoryGenerator
from dynamic_graph.sot.torque_control.create_entities_utils import create_trajectory_switch, connect_synchronous_trajectories, create_force_traj_gen
from dynamic_graph.sot.torque_control.create_entities_utils import create_trajectory_generator, create_com_traj_gen, create_encoders
from dynamic_graph.sot.torque_control.create_entities_utils import create_imu_offset_compensation, create_estimators, create_imu_filter
from dynamic_graph.sot.torque_control.create_entities_utils import create_base_estimator, create_position_controller, create_torque_controller
from dynamic_graph.sot.torque_control.create_entities_utils import create_balance_controller, create_ctrl_manager, create_ros_topics
from dynamic_graph.sot.torque_control.create_entities_utils import create_free_flyer_locator, create_flex_estimator, create_floatingBase
from dynamic_graph.sot.torque_control.create_entities_utils import create_current_controller, connect_ctrl_manager
from dynamic_graph.sot.torque_control.create_entities_utils import create_tracer, create_topic, create_admittance_ctrl
from dynamic_graph.ros import RosPublish
from dynamic_graph.sot.torque_control.utils.sot_utils import start_sot, stop_sot, go_to_position, Bunch
from dynamic_graph.sot.torque_control.utils.filter_utils import create_chebi2_lp_filter_Wn_03_N_4
from time import sleep
def get_default_conf():
import dynamic_graph.sot.torque_control.hrp2.balance_ctrl_conf as balance_ctrl_conf
import dynamic_graph.sot.torque_control.hrp2.admittance_ctrl_conf as admittance_ctrl_conf
import dynamic_graph.sot.torque_control.hrp2.base_estimator_conf as base_estimator_conf
import dynamic_graph.sot.torque_control.hrp2.control_manager_conf as control_manager_conf
import dynamic_graph.sot.torque_control.hrp2.current_controller_conf as current_controller_conf
import dynamic_graph.sot.torque_control.hrp2.force_torque_estimator_conf as force_torque_estimator_conf
import dynamic_graph.sot.torque_control.hrp2.joint_torque_controller_conf as joint_torque_controller_conf
import dynamic_graph.sot.torque_control.hrp2.joint_pos_ctrl_gains as pos_ctrl_gains
import dynamic_graph.sot.torque_control.hrp2.motors_parameters as motor_params
conf = Bunch();
conf.balance_ctrl = balance_ctrl_conf;
conf.adm_ctrl = admittance_ctrl_conf;
conf.base_estimator = base_estimator_conf;
conf.control_manager = control_manager_conf;
conf.current_ctrl = current_controller_conf;
conf.force_torque_estimator = force_torque_estimator_conf;
conf.joint_torque_controller = joint_torque_controller_conf;
conf.pos_ctrl_gains = pos_ctrl_gains;
conf.motor_params = motor_params;
return conf;
''' Main function to call before starting the graph. '''
def main_v3(robot, startSoT=True, go_half_sitting=True, conf=None):
if(conf is None):
conf = get_default_conf();
dt = robot.timeStep;
# TMP: overwrite halfSitting configuration to use SoT joint order
robot.halfSitting = (
# Free flyer
0., 0., 0.648702, 0., 0. , 0.,
# Legs
0., 0., -0.453786, 0.872665, -0.418879, 0.,
0., 0., -0.453786, 0.872665, -0.418879, 0.,
# Chest and head
0., 0., 0., 0.,
# Arms
0.261799, -0.17453, 0., -0.523599, 0., 0., 0.1,
0.261799, 0.17453, 0., -0.523599, 0., 0., 0.1);
robot.device.setControlInputType('noInteg');
robot.ctrl_manager = create_ctrl_manager(conf.control_manager, conf.motor_params, dt);
robot.traj_gen = create_trajectory_generator(robot.device, dt);
robot.com_traj_gen = create_com_traj_gen(conf.balance_ctrl, dt);
robot.rf_force_traj_gen = create_force_traj_gen("rf_force_ref", conf.balance_ctrl.RF_FORCE_DES, dt);
robot.lf_force_traj_gen = create_force_traj_gen("lf_force_ref", conf.balance_ctrl.LF_FORCE_DES, dt);
robot.traj_sync = create_trajectory_switch();
robot.rf_traj_gen = SE3TrajectoryGenerator("tg_rf");
robot.lf_traj_gen = SE3TrajectoryGenerator("tg_lf");
robot.rf_traj_gen.init(dt);
robot.lf_traj_gen.init(dt);
robot.encoders = create_encoders(robot);
robot.imu_offset_compensation = create_imu_offset_compensation(robot, dt);
(robot.estimator_ft, robot.filters) = create_estimators(robot, conf.force_torque_estimator, conf.motor_params, dt);
robot.imu_filter = create_imu_filter(robot, dt);
robot.base_estimator = create_base_estimator(robot, dt, conf.base_estimator);
connect_synchronous_trajectories(robot.traj_sync,
[robot.com_traj_gen,
robot.rf_force_traj_gen, robot.lf_force_traj_gen,
robot.rf_traj_gen, robot.lf_traj_gen])
#robot.rf_traj_gen, robot.lf_traj_gen])
robot.pos_ctrl = create_position_controller(robot, conf.pos_ctrl_gains, dt);
robot.torque_ctrl = create_torque_controller(robot, conf.joint_torque_controller, conf.motor_params, dt);
robot.inv_dyn = create_balance_controller(robot, conf.balance_ctrl,conf.motor_params, dt);
robot.adm_ctrl = create_admittance_ctrl(robot, conf.adm_ctrl, dt);
robot.current_ctrl = create_current_controller(robot, conf.current_ctrl, conf.motor_params, dt);
connect_ctrl_manager(robot);
# create low-pass filter for computing joint velocities
robot.encoder_filter = create_chebi2_lp_filter_Wn_03_N_4('encoder_filter', dt, conf.motor_params.NJ)
plug(robot.encoders.sout, robot.encoder_filter.x)
plug(robot.encoder_filter.dx, robot.current_ctrl.dq);
plug(robot.encoder_filter.dx, robot.torque_ctrl.jointsVelocities);
#plug(robot.encoder_filter.x_filtered, robot.base_estimator.joint_positions);
#plug(robot.encoder_filter.dx, robot.base_estimator.joint_velocities);
robot.ros = RosPublish('rosPublish');
robot.device.after.addDownsampledSignal('rosPublish.trigger',1);
robot.estimator_ft.dgyro.value = (0.0, 0.0, 0.0);
robot.estimator_ft.gyro.value = (0.0, 0.0, 0.0);
# estimator.accelerometer.value = (0.0, 0.0, 9.81);
if(startSoT):
print "Gonna start SoT";
sleep(1.0);
start_sot();
if(go_half_sitting):
print "Gonna go to half sitting in 1 sec";
sleep(1.0);
go_to_position(robot.traj_gen, robot.halfSitting[6:], 10.0);
return robot;
''' Main function to call after having started the graph. '''
def main_post_start(robot):
ros = create_ros_topics(robot);
return ros;
''' Main function to call before starting the graph. '''
def main_v2(robot, delay=0.01, startSoT=True, go_half_sitting=True, urdfFileName='/opt/openrobots/share/hrp2_14_description/urdf/hrp2_14.urdf'):
dt = robot.timeStep;
robot.device.setControlInputType('position');
robot.traj_gen = create_trajectory_generator(robot.device, dt);
robot.com_traj_gen = create_com_traj_gen(dt);
robot.rf_traj_gen = SE3TrajectoryGenerator("tg_rf");
robot.lf_traj_gen = SE3TrajectoryGenerator("tg_lf");
robot.rf_traj_gen.init(dt);
robot.lf_traj_gen.init(dt);
(robot.estimator_ft, robot.filters) = create_estimators(robot, dt, delay);
robot.ff_locator = create_free_flyer_locator(robot, urdfFileName);
robot.flex_est = create_flex_estimator(robot, dt);
robot.floatingBase = create_floatingBase(robot);
robot.pos_ctrl = create_position_controller(robot, dt);
robot.torque_ctrl = create_torque_controller(robot);
# inv_dyn = create_inverse_dynamics(robot, dt);
robot.inv_dyn = create_balance_controller(robot, urdfFileName, dt);
robot.ctrl_manager = create_ctrl_manager(robot, dt);
robot.estimator_ft.gyro.value = (0.0, 0.0, 0.0);
# estimator.accelerometer.value = (0.0, 0.0, 9.81);
if(startSoT):
print "Gonna start SoT";
sleep(1.0);
start_sot();
if(go_half_sitting):
print "Gonna go to half sitting";
sleep(1.0);
go_to_position(robot.traj_gen, robot.halfSitting[6:], 10.0);
return robot;
| 1.515625
| 2
|
click_man/__main__.py
|
smarlowucf/click-man
| 1
|
12784390
|
<gh_stars>1-10
"""
click-man - Generate man pages for click application
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides a click CLI command to
generate man pages from a click application.
:copyright: (c) 2016 by <NAME>.
:license: MIT, see LICENSE for more details.
"""
import os
import click
from pkg_resources import iter_entry_points, get_distribution
from click_man.core import write_man_pages
@click.command(context_settings={'help_option_names': ['-h', '--help']})
@click.option('--target', '-t', default=os.path.join(os.getcwd(), 'man'),
type=click.Path(file_okay=False, dir_okay=True, resolve_path=True),
help='Target location for the man pages')
@click.version_option(get_distribution('click-man').version, '-V', '--version')
@click.argument('name')
def cli(target, name):
"""
Generate man pages for the scripts defined in the ``console_acripts`` entry point.
The cli application is gathered from entry points of installed packages.
The generated man pages are written to files in the directory given
by ``--target``.
"""
console_scripts = [ep for ep in iter_entry_points('console_scripts', name=name)]
if len(console_scripts) < 1:
raise click.ClickException('"{0}" is not an installed console script.'.format(name))
# Only generate man pages for first console script
entry_point = console_scripts[0]
# create target directory if it does not exist yet
try:
os.makedirs(target)
except OSError:
pass
click.echo('Load entry point {0}'.format(name))
cli = entry_point.resolve()
click.echo('Generate man pages for {0} in {1}'.format(name, target))
write_man_pages(name, cli, version=entry_point.dist.version, target_dir=target)
if __name__ == '__main__':
cli()
| 2.640625
| 3
|
goutdotcom/treatment/models.py
|
Spiewart/goutdotcom
| 0
|
12784391
|
<reponame>Spiewart/goutdotcom
from datetime import datetime, timezone
from decimal import *
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.db.models.fields.related import ForeignKey
from django.urls import reverse
from django.utils import timezone
from django_extensions.db.models import TimeStampedModel
from ..flareaid.models import FlareAid
from ..ppxaid.models import PPxAid
from ..ultplan.models import ULTPlan
from .choices import *
# Create your models here.
class Treatment(TimeStampedModel):
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True, blank=True)
generic_name = models.CharField(max_length=60, choices=MEDICATION_CHOICES)
brand_names = [""]
dose = models.IntegerField(null=True, blank=True)
freq = models.CharField(max_length=50, choices=FREQ_CHOICES, default=QDAY, null=True, blank=True)
side_effects = models.CharField(max_length=100, null=True, blank=True, help_text="Have you had any side effects?")
drug_class = models.CharField(max_length=50, choices=DRUG_CLASS_CHOICES)
date_started = models.DateField(null=True, blank=True)
date_ended = models.DateField(null=True, blank=True, default=None)
class Meta:
abstract = True
def __str__(self):
if self.dose:
return f"{str(self.generic_name)} {str(self.dose)} mg {str(self.freq)}"
else:
return f'{str(self.generic_name) + " (dose not recorded)"}'
def get_absolute_url(self):
return reverse("treatment:detail", kwargs={"pk": self.pk, "treatment": self.generic_name})
def __unicode__(self):
return self.generic_name
class FlareTreatment(Treatment):
flareaid = models.OneToOneField(FlareAid, on_delete=models.CASCADE, null=True, blank=True, default=None)
ppxaid = models.OneToOneField(PPxAid, on_delete=models.CASCADE, null=True, blank=True, default=None)
ultplan = models.OneToOneField(ULTPlan, on_delete=models.CASCADE, null=True, blank=True, default=None)
prn = models.BooleanField(
choices=BOOL_CHOICES,
default=True,
null=True,
blank=True,
help_text="Do you take this medication only as needed (PRN)?",
)
as_prophylaxis = models.BooleanField(
choices=BOOL_CHOICES,
verbose_name="Flare prophylaxis?",
help_text="Is this for flare prophylaxis while initiating ULT?",
default=False,
blank=True,
null=True,
)
duration = models.IntegerField(
null=True, blank=True, default=7, validators=[MaxValueValidator(14), MinValueValidator(1)]
)
def flareclaimer(self):
natural_history = "Most flares last between 5-7 days. Flare treatments are design to improve (not eliminate) symptoms over that duration. If your symptoms improve more quickly, it is OK to discontinue your flare treatment early. If your symptoms last longer, you should consult your provider."
return natural_history
class Meta:
abstract = True
class ULTTreatment(Treatment):
ultplan = models.OneToOneField(ULTPlan, on_delete=models.CASCADE, null=True, blank=True, default=None)
date_started = models.DateField(default=timezone.now, null=True, blank=True)
class Meta:
abstract = True
class Allopurinol(ULTTreatment):
generic_name = models.CharField(max_length=60, choices=MEDICATION_CHOICES, default=ALLOPURINOL)
brand_names = ["Xyloprim", "Aloprim"]
dose = models.IntegerField(choices=ALLOPURINOL_DOSE_CHOICES, default=100)
freq = models.CharField(max_length=50, choices=FREQ_CHOICES, default=QDAY)
side_effects = models.CharField(
max_length=100,
choices=ALLOPURINOL_SIDE_EFFECT_CHOICES,
null=True,
blank=True,
help_text="Have you had any side effects?",
)
de_sensitized = models.BooleanField(null=True, blank=True, help_text="Have you been de-sensitized to allopurinol?")
drug_class = models.CharField(max_length=50, choices=DRUG_CLASS_CHOICES, default=ULT)
class Febuxostat(ULTTreatment):
generic_name = models.CharField(max_length=60, choices=MEDICATION_CHOICES, default=FEBUXOSTAT)
brand_names = ["Uloric"]
dose = models.IntegerField(choices=FEBUXOSTAT_DOSE_CHOICES, default=40)
freq = models.CharField(max_length=50, choices=FREQ_CHOICES, default=QDAY)
side_effects = models.CharField(
max_length=100,
choices=FEBUXOSTAT_SIDE_EFFECT_CHOICES,
null=True,
blank=True,
help_text="Have you had any side effects?",
)
drug_class = models.CharField(max_length=50, choices=DRUG_CLASS_CHOICES, default=ULT)
class Colchicine(FlareTreatment):
generic_name = models.CharField(max_length=60, choices=MEDICATION_CHOICES, default=COLCHICINE)
brand_names = ["Colcrys"]
dose = models.FloatField(choices=COLCHICINE_DOSE_CHOICES, null=True, blank=True, default=1.2)
freq = models.CharField(max_length=50, choices=FREQ_CHOICES, null=True, blank=True, default=ONCE)
dose2 = models.FloatField(choices=COLCHICINE_DOSE_CHOICES, null=True, blank=True, default=0.6)
freq2 = models.CharField(max_length=50, choices=FREQ_CHOICES, null=True, blank=True, default=ONCE)
dose3 = models.FloatField(choices=COLCHICINE_DOSE_CHOICES, null=True, blank=True, default=0.6)
freq3 = models.CharField(max_length=50, choices=FREQ_CHOICES, null=True, blank=True, default=BID)
duration = models.IntegerField(
null=True, blank=True, default=7, validators=[MaxValueValidator(14), MinValueValidator(1)]
)
side_effects = models.CharField(
max_length=100, choices=COLCHICINE_SIDE_EFFECT_CHOICES, blank=True, help_text="Have you had any side effects?"
)
drug_class = models.CharField(max_length=50, choices=DRUG_CLASS_CHOICES, default=ANTIINFLAMMATORY)
def __str__(self):
if self.dose and self.dose2 and self.dose3:
return f"{str(self.generic_name)} {str(self.dose)} mg (2 tabs) {str(self.freq)} then {str(self.dose2)} mg (1 tab) {str(self.freq2)} an hour later then {str(self.dose3)} mg {str(self.freq3)} for {str(self.duration)} days or until flare resolves"
elif self.dose:
return f"{str(self.generic_name)} {str(self.dose)} mg (1 tab) {str(self.freq)}"
else:
return f'{str(self.generic_name) + " (dose not recorded)"}'
class Ibuprofen(FlareTreatment):
generic_name = models.CharField(max_length=60, choices=MEDICATION_CHOICES, default=IBUPROFEN)
brand_names = ["Advil"]
dose = models.IntegerField(choices=IBUPROFEN_DOSE_CHOICES, null=True, blank=True, default=800)
freq = models.CharField(max_length=50, choices=FREQ_CHOICES, null=True, blank=True, default=TID)
side_effects = models.CharField(
max_length=100,
choices=NSAID_SIDE_EFFECT_CHOICES,
null=True,
blank=True,
help_text="Have you had any side effects?",
)
drug_class = models.CharField(max_length=50, choices=DRUG_CLASS_CHOICES, default=NSAID)
def __str__(self):
if self.dose == 200:
return f"{str(self.generic_name)} {str(self.dose)} mg (one 200 mg tab) {str(self.freq)} for {str(self.duration)} days or until flare resolves"
elif self.dose == 400:
return f"{str(self.generic_name)} {str(self.dose)} mg (two 200 mg tabs) {str(self.freq)} for {str(self.duration)} days or until flare resolves"
elif self.dose == 600:
return f"{str(self.generic_name)} {str(self.dose)} mg (three 200 mg tabs) {str(self.freq)} for {str(self.duration)} days or until flare resolves"
elif self.dose == 800:
return f"{str(self.generic_name)} {str(self.dose)} mg (four 200 mg tabs) {str(self.freq)} for {str(self.duration)} days or until flare resolves"
else:
return f'{str(self.generic_name) + " (dose not recorded)"}'
class Naproxen(FlareTreatment):
generic_name = models.CharField(max_length=60, choices=MEDICATION_CHOICES, default=NAPROXEN)
brand_names = ["Aleve"]
dose = models.IntegerField(choices=NAPROXEN_DOSE_CHOICES, null=True, blank=True, default=440)
freq = models.CharField(max_length=50, choices=FREQ_CHOICES, null=True, blank=True, default=BID)
side_effects = models.CharField(
max_length=100, choices=NSAID_SIDE_EFFECT_CHOICES, blank=True, help_text="Have you had any side effects?"
)
drug_class = models.CharField(max_length=50, choices=DRUG_CLASS_CHOICES, default=NSAID)
def __str__(self):
if self.dose == 220 or self.dose == 250:
if self.prn == True:
return f"{str(self.generic_name)} {str(self.dose)} mg (1 tab) {str(self.freq)} (twice daily) for {str(self.duration)} days"
else:
return f"{str(self.generic_name)} {str(self.dose)} mg (1 tab) {str(self.freq)} (twice daily)"
if self.dose == 440 or self.dose == 500:
return f"{str(self.generic_name)} {str(self.dose)} mg (2 tabs) {str(self.freq)} (twice daily) for {str(self.duration)} days"
else:
return f'{str(self.generic_name) + " (dose not recorded)"}'
class Meloxicam(FlareTreatment):
generic_name = models.CharField(max_length=60, choices=MEDICATION_CHOICES, default=MELOXICAM)
brand_names = ["Mobic"]
dose = models.DecimalField(decimal_places=1, max_digits=3, choices=MELOXICAM_DOSE_CHOICES, null=True, blank=True)
freq = models.CharField(max_length=50, choices=FREQ_CHOICES, default=QDAY, blank=True)
side_effects = models.CharField(
max_length=100, choices=NSAID_SIDE_EFFECT_CHOICES, blank=True, help_text="Have you had any side effects?"
)
drug_class = models.CharField(max_length=50, choices=DRUG_CLASS_CHOICES, default=NSAID)
class Celecoxib(FlareTreatment):
generic_name = models.CharField(max_length=60, choices=MEDICATION_CHOICES, default=CELECOXIB)
brand_names = ["Aleve"]
dose = models.IntegerField(choices=CELECOXIB_DOSE_CHOICES, null=True, blank=True)
freq = models.CharField(max_length=50, choices=FREQ_CHOICES, default=QDAY, blank=True)
side_effects = models.CharField(
max_length=100, choices=NSAID_SIDE_EFFECT_CHOICES, blank=True, help_text="Have you had any side effects?"
)
drug_class = models.CharField(max_length=50, choices=DRUG_CLASS_CHOICES, default=NSAID)
class Indomethacin(FlareTreatment):
generic_name = models.CharField(max_length=60, choices=MEDICATION_CHOICES, default=INDOMETHACIN)
brand_names = ["Indocin"]
dose = models.IntegerField(choices=INDOMETHACIN_DOSE_CHOICES, null=True, blank=True)
freq = models.CharField(max_length=50, choices=FREQ_CHOICES, default=QDAY, blank=True)
side_effects = models.CharField(
max_length=100, choices=NSAID_SIDE_EFFECT_CHOICES, blank=True, help_text="Have you had any side effects?"
)
drug_class = models.CharField(max_length=50, choices=DRUG_CLASS_CHOICES, default=NSAID)
class Prednisone(FlareTreatment):
generic_name = models.CharField(max_length=60, choices=MEDICATION_CHOICES, default=PREDNISONE)
brand_names = ["Prednisone"]
dose = models.IntegerField(choices=PREDNISONE_DOSE_CHOICES, null=True, blank=True, default=40)
freq = models.CharField(max_length=50, choices=FREQ_CHOICES, null=True, blank=True, default=QDAY)
duration = models.IntegerField(
null=True, blank=True, default=4, validators=[MaxValueValidator(14), MinValueValidator(1)]
)
dose2 = models.IntegerField(choices=PREDNISONE_DOSE_CHOICES, null=True, blank=True, default=20)
freq2 = models.CharField(max_length=50, choices=FREQ_CHOICES, null=True, blank=True, default=QDAY)
duration2 = models.IntegerField(
null=True, blank=True, default=4, validators=[MaxValueValidator(14), MinValueValidator(1)]
)
side_effects = models.CharField(
max_length=100,
choices=PREDNISONE_SIDE_EFFECT_CHOICES,
null=True,
blank=True,
help_text="Have you had any side effects?",
)
drug_class = models.CharField(max_length=50, choices=DRUG_CLASS_CHOICES, default=SYSSTEROID)
def __str__(self):
if self.dose and self.dose2:
return f"{str(self.generic_name)} {str(self.dose)} mg {str(self.freq)} then {str(self.dose2)} {str(self.freq2)} for {str(self.duration)} days or until flare resolves"
elif self.dose:
return f"{str(self.generic_name)} {str(self.dose)} mg {str(self.freq)}"
else:
return f'{str(self.generic_name) + " (dose not recorded)"}'
class Methylprednisolone(FlareTreatment):
generic_name = models.CharField(max_length=60, choices=MEDICATION_CHOICES, default=METHYLPREDNISOLONE)
brand_names = ["Depomedrol"]
dose = models.IntegerField(choices=METHYLPREDNISOLONE_DOSE_CHOICES, null=True, blank=True)
freq = models.CharField(max_length=50, choices=FREQ_CHOICES, default=QDAY, blank=True)
side_effects = models.CharField(
max_length=100, choices=INJECTION_SIDE_EFFECT_CHOICES, blank=True, help_text="Have you had any side effects?"
)
drug_class = models.CharField(max_length=50, choices=DRUG_CLASS_CHOICES, default=LOCSTEROID)
as_injection = models.BooleanField(
choices=BOOL_CHOICES,
verbose_name="Given by joint injection?",
help_text="Was this given by an injection into your joint?",
default=False,
blank=True,
null=True,
)
def __str__(self):
if self.as_injection == True:
return f'{str(self.generic_name) + " " + str(self.dose) + " mg injection"}'
elif self.dose:
return f'{str(self.generic_name) + " " + str(self.dose) + " mg " + str(self.freq)}'
else:
return f'{str(self.generic_name) + " dose not recorded"}'
class Probenecid(ULTTreatment):
generic_name = models.CharField(max_length=60, choices=MEDICATION_CHOICES, default=PROBENECID)
brand_names = ["Probalan"]
dose = models.IntegerField(choices=PROBENECID_DOSE_CHOICES)
freq = models.CharField(max_length=50, choices=FREQ_CHOICES, default=BID)
side_effects = models.CharField(
max_length=100,
choices=PROBENECID_SIDE_EFFECT_CHOICES,
null=True,
blank=True,
help_text="Have you had any side effects?",
)
drug_class = models.CharField(max_length=50, choices=DRUG_CLASS_CHOICES, default=URATEEXCRETAGOGUE)
class Tinctureoftime(FlareTreatment):
generic_name = models.CharField(max_length=60, choices=MEDICATION_CHOICES, default=TINCTUREOFTIME)
brand_names = ["Tincture of time"]
duration = models.IntegerField(help_text="How long did it take to get better?", null=True, blank=True)
dose = models.IntegerField(blank=True, null=True, help_text="Any optional information on your dose?")
freq = models.CharField(
max_length=50,
choices=FREQ_CHOICES,
default=QDAY,
null=True,
blank=True,
help_text="Any optional information on your frequency?",
)
side_effects = models.CharField(
max_length=400, null=True, blank=True, help_text="Have you had any side effects? Please list"
)
drug_class = models.CharField(max_length=50, choices=DRUG_CLASS_CHOICES, default=TINCTUREOFTIME)
def __str__(self):
return f'{"Tincture of time for: " + str(self.duration) + " days"}'
class Othertreat(FlareTreatment):
generic_name = models.CharField(max_length=60, choices=MEDICATION_CHOICES, default=OTHER)
brand_names = ["Other"]
name = models.CharField(max_length=100, null=True, blank=True)
description = models.CharField(max_length=300, null=True, blank=True)
dose = models.IntegerField(blank=True, null=True, help_text="Any optional information on your dose?")
freq = models.CharField(
max_length=50,
choices=FREQ_CHOICES,
default=QDAY,
null=True,
blank=True,
help_text="Any optional information on your frequency?",
)
side_effects = models.CharField(max_length=400, blank=True, help_text="Have you had any side effects? Please list")
drug_class = models.CharField(max_length=50, choices=DRUG_CLASS_CHOICES, default=OTHER)
def __str__(self):
return self.name
| 2.265625
| 2
|
WEB(BE)/drf/detection_status/urls.py
|
dev-taewon-kim/ai_web_RISKOUT_BTS
| 1
|
12784392
|
from django.urls import path
from .views import *
urlpatterns = [
path('analyze/', AnalyzedDataView.as_view(), name='analyze'),
path('trends/', TrendsDataView.as_view(), name='trends'),
path('wordcloud/', WordcloudDataView.as_view(), name='wordcloud'),
path('article/volume/', ArticleVolumeDataView.as_view(), name='article_volume'),
path('sentiment/bar/', SentimentBarDataView.as_view(), name='sentiment_bar'),
path('sentiment/pie/', SentimentPieDataView.as_view(), name='sentiment_pie'),
path('report/', ReportDataView.as_view(), name='report'),
]
| 1.757813
| 2
|
src/netatmo/__init__.py
|
elcombato/netatmo
| 33
|
12784393
|
<filename>src/netatmo/__init__.py<gh_stars>10-100
from .netatmo import *
| 1.109375
| 1
|
jaraco/windows/privilege.py
|
jaraco/jaraco.windows
| 21
|
12784394
|
<gh_stars>10-100
import ctypes
from ctypes import wintypes
from .api import security
from .api import privilege
from .api import process
def get_process_token():
"""
Get the current process token
"""
token = wintypes.HANDLE()
res = process.OpenProcessToken(
process.GetCurrentProcess(), process.TOKEN_ALL_ACCESS, token
)
if not res > 0:
raise RuntimeError("Couldn't get process token")
return token
def get_symlink_luid():
"""
Get the LUID for the SeCreateSymbolicLinkPrivilege
"""
symlink_luid = privilege.LUID()
res = privilege.LookupPrivilegeValue(
None, "SeCreateSymbolicLinkPrivilege", symlink_luid
)
if not res > 0:
raise RuntimeError("Couldn't lookup privilege value")
return symlink_luid
def get_privilege_information():
"""
Get all privileges associated with the current process.
"""
# first call with zero length to determine what size buffer we need
return_length = wintypes.DWORD()
params = [
get_process_token(),
privilege.TOKEN_INFORMATION_CLASS.TokenPrivileges,
None,
0,
return_length,
]
res = privilege.GetTokenInformation(*params)
# assume we now have the necessary length in return_length
buffer = ctypes.create_string_buffer(return_length.value)
params[2] = buffer
params[3] = return_length.value
res = privilege.GetTokenInformation(*params)
assert res > 0, "Error in second GetTokenInformation (%d)" % res
privileges = ctypes.cast(
buffer, ctypes.POINTER(privilege.TOKEN_PRIVILEGES)
).contents
return privileges
def report_privilege_information():
"""
Report all privilege information assigned to the current process.
"""
privileges = get_privilege_information()
print("found {0} privileges".format(privileges.count))
tuple(map(print, privileges))
def enable_symlink_privilege():
"""
Try to assign the symlink privilege to the current process token.
Return True if the assignment is successful.
"""
# create a space in memory for a TOKEN_PRIVILEGES structure
# with one element
size = ctypes.sizeof(privilege.TOKEN_PRIVILEGES)
size += ctypes.sizeof(privilege.LUID_AND_ATTRIBUTES)
buffer = ctypes.create_string_buffer(size)
tp = ctypes.cast(buffer, ctypes.POINTER(privilege.TOKEN_PRIVILEGES)).contents
tp.count = 1
tp.get_array()[0].enable()
tp.get_array()[0].LUID = get_symlink_luid()
token = get_process_token()
res = privilege.AdjustTokenPrivileges(token, False, tp, 0, None, None)
if res == 0:
raise RuntimeError("Error in AdjustTokenPrivileges")
ERROR_NOT_ALL_ASSIGNED = 1300
return ctypes.windll.kernel32.GetLastError() != ERROR_NOT_ALL_ASSIGNED
class PolicyHandle(wintypes.HANDLE):
pass
class LSA_UNICODE_STRING(ctypes.Structure):
_fields_ = [
('length', ctypes.c_ushort),
('max_length', ctypes.c_ushort),
('buffer', ctypes.wintypes.LPWSTR),
]
def OpenPolicy(system_name, object_attributes, access_mask):
policy = PolicyHandle()
raise NotImplementedError(
"Need to construct structures for parameters "
"(see http://msdn.microsoft.com/en-us/library/windows"
"/desktop/aa378299%28v=vs.85%29.aspx)"
)
res = ctypes.windll.advapi32.LsaOpenPolicy(
system_name, object_attributes, access_mask, ctypes.byref(policy)
)
assert res == 0, "Error status {res}".format(**vars())
return policy
def grant_symlink_privilege(who, machine=''):
"""
Grant the 'create symlink' privilege to who.
Based on http://support.microsoft.com/kb/132958
"""
flags = security.POLICY_CREATE_ACCOUNT | security.POLICY_LOOKUP_NAMES
policy = OpenPolicy(machine, flags)
return policy
def main():
assigned = enable_symlink_privilege()
msg = ['failure', 'success'][assigned]
print("Symlink privilege assignment completed with {0}".format(msg))
if __name__ == '__main__':
main()
| 2.625
| 3
|
unstrip.py
|
pzread/unstrip
| 103
|
12784395
|
<reponame>pzread/unstrip
import sys
import sqlite3
import msgpack
from scan import *
from mark import *
if __name__ == '__main__':
conn = sqlite3.connect('fin.db')
try:
conn.execute('CREATE TABLE flowfin (label text primary key,len int,fin blob,hash text);')
conn.execute('CREATE INDEX index_flowfin_len ON flowfin (len);')
conn.execute('CREATE INDEX index_flowfin_hash ON flowfin (hash);')
except sqlite3.OperationalError:
pass
if 'gendb' in sys.argv:
gen_db(conn)
else:
filepath = sys.argv[-1]
exe = EXE(filepath,filepath)
mark_list = []
call_loc = set()
start_pc = exe.elf.header['e_entry']
call_loc = exe.ScanBlock(exe.GetSection(start_pc))
main_pc = None
cur = conn.cursor()
cur.execute('SELECT * FROM flowfin WHERE label=?;',
('libc-start.o # __libc_start_main',))
finent = cur.fetchone()
if finent != None:
finb = msgpack.unpackb(finent[2])
for pos,loc in call_loc:
fina = exe.FuncFin(loc,set())
if CmpFin(fina,finb) == 0:
ins,_ = Disasm(pos[0],pos[1] - 7)
main_pc = ins.operands[1].value.imm
break
if main_pc != None:
mark_list.append((exe.GetSection(main_pc),'main'))
call_loc.update(exe.ScanBlock(exe.GetSection(main_pc)))
for pos,loc in call_loc:
fina = exe.FuncFin(loc,set())
find_name = None
for row in conn.execute('SELECT * FROM flowfin WHERE len<=?;',
(len(fina),)):
finb = msgpack.unpackb(row[2])
dis = CmpFin(fina,finb)
if dis == 0:
find_name = row[0]
break
if find_name == None:
find_name = '<unknown>'
else:
mark_list.append((loc,find_name.split(' # ')[1]))
print('%016lx - %s'%(loc[0].base + loc[1],find_name))
mark(exe,mark_list)
| 2.203125
| 2
|
configs/bead/cascade_mask_rcnn_r50_fpn_1x_coco_type_6.py
|
anley1/Swin-Transformer-Object-Detection
| 0
|
12784396
|
<reponame>anley1/Swin-Transformer-Object-Detection
_base_ = [
'./cascade_mask_rcnn_r50_fpn.py',
'../_base_/datasets/bead_cropped_type_6_mask.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
#checkpoint_config = dict(max_keep_ckpts=4)
| 0.996094
| 1
|
unimodal_irl/bv_maxlikelihood_irl.py
|
aaronsnoswell/UniModal-IRL
| 2
|
12784397
|
<filename>unimodal_irl/bv_maxlikelihood_irl.py
import copy
from pprint import pprint
import numpy as np
import warnings
import itertools as it
from numba import jit
from mdp_extras import (
Linear,
vi,
q_grad_fpi,
BoltzmannExplorationPolicy,
OptimalPolicy,
)
@jit(nopython=True)
def nb_smq_value_iteration(
t_mat, gamma, rs, rsa, rsas, beta=0.5, eps=1e-6, verbose=False, max_iter=None
):
"""Value iteration to find the SoftMax-optimal state-action value function
This bellman recursion is defined in Section 3 of Apprenticeship Learning about
Multiple Intentions by Babes-Vroman et al. 2011
(http://www.icml-2011.org/papers/478_icmlpaper.pdf).
Essentially, the max over actions from the regular Q-function is replaced with
an operator that averages over all possible actions, where the weight of each
Q(s, a) is given by e^{βQ(s, a)} / Σ_{a'} e^{βQ(s, a')}.
Args:
t_mat (numpy array): |S|x|A|x|S| transition matrix
gamma (float): Discount factor
rs (numpy array): |S| State reward vector
rsa (numpy array): |S|x|A| State-action reward vector
rsas (numpy array): |S|x|A|x|S| State-action-state reward vector
beta (float): Boltzmann exploration policy scale parameter
eps (float): Value convergence tolerance
verbose (bool): Extra logging
max_iter (int): If provided, iteration will terminate regardless of convergence
after this many iterations.
Returns:
(numpy array): |S|x|A| matrix of state-action values
"""
q_value_fn = np.zeros((t_mat.shape[0], t_mat.shape[1]))
_iter = 0
while True:
delta = 0
for s1 in range(t_mat.shape[0]):
for a in range(t_mat.shape[1]):
# q_weights defines the weight of each Q(s, a) term in the
# SoftMax operator
q_weights = np.exp(q_value_fn.copy() * beta)
norm = np.sum(q_weights, axis=1)
# Normalize weights to proper probabilities
for _a in range(q_weights.shape[1]):
q_weights[:, _a] = q_weights[:, _a] / norm
q = q_value_fn[s1, a]
state_values = np.zeros(t_mat.shape[0])
for s2 in range(t_mat.shape[2]):
state_values[s2] += t_mat[s1, a, s2] * (
rs[s1]
+ rsa[s1, a]
+ rsas[s1, a, s2]
+ gamma
* (q_value_fn[s2, :].flatten() @ q_weights[s2, :].flatten())
)
q_value_fn[s1, a] = np.sum(state_values)
delta = max(delta, np.abs(q - q_value_fn[s1, a]))
if max_iter is not None and _iter >= max_iter:
if verbose:
print("Terminating before convergence, # iterations = ", _iter)
break
# Check value function convergence
if delta < eps:
break
else:
if verbose:
print("Value Iteration #", _iter, " delta=", delta)
_iter += 1
return q_value_fn
def bv_maxlikelihood_irl(
x,
xtr,
phi,
rollouts,
weights=None,
boltzmann_scale=0.5,
qge_tol=1e-3,
nll_only=False,
):
"""Compute the average rollout Negative Log Likelihood (and gradient) for ML-IRL
This method is biased to prefer shorter paths through any MDP.
TODO ajs 29/Oct/2020 Support SoftMax Q function from Babes-Vroman 2011 paper via
nb_smq_value_iteration()
Args:
x (numpy array): Current reward function parameter vector estimate
xtr (mdp_extras.DiscreteExplicitExtras): Extras object for the MDP being
optimized
phi (mdp_extras.FeatureFunction): Feature function to use with linear reward
parameters. We require len(phi) == len(x).
rollouts (list): List of (s, a) rollouts.
weights (numpy array): Optional path weights for weighted IRL problems
boltzmann_scale (float): Optimality parameter for Boltzmann policy. Babes-Vroman
use 0.5. Values closer to 1.0 cause slower convergence, but values closer to
0 model the demonstrations as being non-expert. Empirically I find 0.2 works
well.
qge_tol (float): Tolerance for q-gradient estimation.
nll_only (bool): If true, only return NLL
"""
if weights is None:
weights = np.ones(len(rollouts)) / len(rollouts)
# Compute Q*, pi* for current reward guess
reward = Linear(x)
_, q_star = vi(xtr, phi, reward)
# To use the soft Q function from Babes-Vroman's paper, uncomment below
# q_star = nb_smq_value_iteration(
# xtr.t_mat, xtr.gamma, *reward.structured(xtr, phi), boltzmann_scale
# )
pi = BoltzmannExplorationPolicy(q_star, scale=boltzmann_scale)
if not nll_only:
# Get Q* gradient for current reward parameters
dq_dtheta = q_grad_fpi(reward.theta, xtr, phi, tol=qge_tol)
# Sweep demonstrated state-action pairs
nll = 0
nll_grad = np.zeros_like(x)
num_sa_samples = 0
for path, weight in zip(rollouts, weights):
for s, a in path[:-1]:
num_sa_samples += 1
ell_theta = pi.prob_for_state_action(s, a)
# Accumulate negative log likelihood of demonstration data
nll += -1 * weight * np.log(ell_theta)
if not nll_only:
expected_action_grad = np.sum(
[
pi.prob_for_state_action(s, b) * dq_dtheta[s, b, :]
for b in xtr.actions
],
axis=0,
)
dl_dtheta = boltzmann_scale * (
expected_action_grad - dq_dtheta[s, a, :]
)
nll_grad += weight * dl_dtheta
# Convert NLL and gradient to average, not sum
# This makes for consistent magnitude values regardless of dataset size
nll /= len(rollouts)
nll_grad /= len(rollouts)
if nll_only:
return nll
else:
return nll, nll_grad
def maxlikelihood_ml_path(
xtr, phi, reward, start, goal, max_path_length, boltzmann_scale=0.5
):
"""Find the ML path from s1 to sg under a MaxLikelihood model
If transitions can inccur +ve rewards the returned paths may contain loops
NB ajs 14/Jan/2020 The log likelihood of the path that we compute internally
is fine for doing viterbi ML path inference, but it's not the actual path
log likelihood - it's not normalized, and the gamma time offset
is incorrect (depending on what start time the Viterbi alg picks).
Args:
xtr (DiscreteExplicitExtras): MDP Extras object
phi (FeatureFunction): MDP Featrure function
reward (Linear): Linear reward function
start (int): Starting state
goal (int): End state
max_path_length (int): Maximum allowable path length to search
boltzmann_scale (float): Boltzmann scale parameter
Returns:
(list): Maximum Likelihood path from start to goal under the given MaxEnt reward
function, or None if no path is possible
"""
_, q_star = vi(xtr, phi, reward)
# Initialize an SxA LL Viterbi trellis
sa_lls = np.zeros((len(xtr.states), len(xtr.actions), max_path_length)) - np.inf
for a in xtr.actions:
sa_lls[goal, :, :] = boltzmann_scale * q_star[goal, a]
# Suppress divide by zero - we take logs of many zeroes here
with np.errstate(divide="ignore"):
# Walk backward to propagate the maximum LL
for t in range(max_path_length - 2, -1, -1):
# Max-Reduce over actions to compute state LLs
# (it's a max because we get to choose our actions)
s_lls = np.max(sa_lls, axis=1)
# Sweep end states
for s2 in xtr.states:
if np.isneginf(s_lls[s2, t + 1]):
# Skip this state - it hasn't been reached by probability messages yet
continue
# Sweep actions
for a in xtr.actions:
# Sweep starting states
for s1 in xtr.states:
if xtr.terminal_state_mask[s1]:
# We can't step forward from terminal states - skip this one
continue
transition_ll = boltzmann_scale * q_star[s1, a] + np.log(
xtr.t_mat[s1, a, s2]
)
if np.isneginf(transition_ll):
# This transition is impossible - skip
continue
# Store the max because we're after the maximum likelihood path
sa_lls[s1, a, t] = max(
sa_lls[s1, a, t], transition_ll + s_lls[s2, t + 1]
)
# Max-reduce to get state/action ML trellises for conveience
s_lls = np.max(sa_lls, axis=1)
# Identify our starting time
if np.isneginf(np.max(s_lls[start])):
# There is no feasible path from s1 to sg less or equal to than max_path_length
return None
start_time = np.argmax(s_lls[start, :])
# Walk forward from start state, start time to re-construct path
state = start
time = start_time
ml_path = []
while state != goal:
action = np.argmax(sa_lls[state, :, time])
ml_path.append((state, action))
successor_states = [s for (a, s) in xtr.children[state] if a == action]
# Choose successor state with highest log likelihood at time + 1
ml = -np.inf
next_state = None
for s2 in successor_states:
s2_ll = s_lls[s2, time + 1]
if s2_ll >= ml:
next_state = s2
ml = s2_ll
state = next_state
time = time + 1
# Add final (goal) state
ml_path.append((state, None))
return ml_path
def main():
"""Main function"""
# Test BV on PuddleWorld
from scipy.optimize import minimize
# from multimodal_irl.envs import CanonicalPuddleWorldEnv, puddle_world_extras
from multimodal_irl.envs import ElementWorldEnv, element_world_extras
# env = CanonicalPuddleWorldEnv(wind=0.0)
# xtr, phi, gt_rewards = puddle_world_extras(env)
# reward = gt_rewards["dry"]
env = ElementWorldEnv(wind=0.1, num_elements=3)
xtr, phi, gt_rewards = element_world_extras(env)
reward = gt_rewards[0]
print(reward.theta)
scale = 2.0
_, q_star = vi(xtr, phi, reward)
pi_star = BoltzmannExplorationPolicy(q_star, scale=scale)
demo_star = pi_star.get_rollouts(env, 10)
phi_bar_star = phi.demo_average(demo_star, xtr.gamma)
print(phi_bar_star)
x0 = np.zeros(len(phi))
bounds = np.array([(-10.0, 0.0) for _ in range(len(phi))])
res = minimize(
bv_maxlikelihood_irl,
x0,
args=(xtr, phi, demo_star, None, scale),
jac=True,
bounds=bounds,
options=dict(disp=True),
)
print(res)
pass
if __name__ == "__main__":
main()
| 2.359375
| 2
|
routes/case.py
|
SimoneCff/TW6-PJ-PAPC
| 2
|
12784398
|
from bson.json_util import dumps
from flask import request, render_template
from app import Carrello, app
from complements.db import SearchIntoDb, SearchviaAttributesCASE
from complements.forms import Searchfor, CaseSelect
@app.route('/case', methods=['POST', 'GET'])
def case():
form1 = Searchfor()
form2 = CaseSelect()
qir = list()
if request.method == 'POST':
if request.form.get('submit'):
query = SearchIntoDb("CASE", request.form.get('search')).findquery()
for x in query:
qir.insert(1, [dumps(x['name']), dumps(x['marca']), dumps(x['COSTO']), dumps(x['_id'])])
return render_template("case.html", form=form1, form2=form2, queri=qir)
if request.form.get('val'):
x = str(request.form.get('val'))
x = x.split('"$oid": "', 1)[1]
x = x.split('"', 1)[0]
Carrello.Insert(x, 6, "CASE")
if request.form.get("submitf"):
marche = list()
model = list()
if request.form.get("Col"):
marche.append("Cooler Master")
if request.form.get("Shark"):
marche.append("Sharkoon")
if request.form.get("Therm"):
marche.append("Thermaltake")
if request.form.get("ATX"):
model.append("ATX")
if request.form.get("mATX"):
model.append("mATX")
if request.form.get('minmonet'):
min = request.form.get('minmonet')
else:
min = "0"
if request.form.get('maxmonet'):
max = request.form.get('maxmonet')
else:
max = "10000"
query = SearchviaAttributesCASE("CASE", " ".join(marche), min, max, " ".join(model)
).findqueryattr()
for x in query:
qir.insert(1, [dumps(x['name']), dumps(x['marca']), dumps(x['COSTO']), dumps(x['_id'])])
return render_template("case.html", form=form1, form2=form2, queri=qir)
return render_template("case.html", form=form1, form2=form2)
| 2.21875
| 2
|
Part 2/Chapter 02/Exercises/excercise_13.py
|
phuycke/Practice-of-computing-using-Python
| 1
|
12784399
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
email: <EMAIL>
GitHub: phuycke
"""
#%%
asked = str(input('Input an integer: '))
while not asked.isdigit():
asked = str(input('Error: try again. Input an integer: '))
else:
print('The integer is : {}'.format(asked))
| 3.765625
| 4
|
pbc_utils.py
|
DrSuiunbek/pbc
| 0
|
12784400
|
<reponame>DrSuiunbek/pbc
import json
import rlp
from Crypto.Hash import keccak
def generate():
data = {
'Producers': ['P1', 'P2', 'P3'],
'Freighters': ['F1', 'F2'],
'Shops': ['S1', 'S2'],
'Miners': ['M1'],
'NetworkID': '2003',
'Password': '<PASSWORD>'
}
jstr = json.dumps(data, indent=4)
print(jstr)
def read_config(filename):
with open(filename) as inputfile:
data = json.load(inputfile)
if not 'Producers' in data:
print('Producers input is missing.')
return None
if not 'Freighters' in data:
print('Freighters input is missing.')
return None
if not 'Shops' in data:
print('Shops input is missing.')
return None
if not 'Miners' in data:
print('Miners input is missing.')
return None
if not 'NetworkID' in data:
print('NetworkID input is missing.')
return None
if not 'Password' in data:
print('Password input is missing.')
return None
return data
def normalize_address(x, allow_blank=False):
if allow_blank and x == '':
return ''
if len(x) in (42, 50) and x[:2] == '0x':
x = x[2:]
if len(x) in (40, 48):
x = rlp.utils.decode_hex(x)
if len(x) != 20:
raise Exception("Invalid address format: %r" % x)
return x
def mk_contract_address(sender, nonce):
keccak_hash = keccak.new(digest_bits=256)
keccak_hash.update(rlp.encode([normalize_address(sender), nonce]))
return keccak_hash.hexdigest()[24:]
| 2.796875
| 3
|
datazen/targets.py
|
vkottler/datazen
| 2
|
12784401
|
<filename>datazen/targets.py
"""
datazen - An interface for parsing and matching targets.
"""
# built-in
from collections import defaultdict
from copy import deepcopy
import re
from typing import Dict, Tuple, List, NamedTuple
# internal
from datazen.paths import (
advance_dict_by_path,
unflatten_dict,
format_resolve_delims,
)
from datazen.parsing import merge
KW_OPEN = "{"
KW_CLOSE = "}"
KW_PATTERN = "[a-zA-Z0-9-_.]+"
def target_is_literal(name: str) -> bool:
"""
Determine if a named target has keywords or not (is otherwise literal).
"""
return name.count(KW_OPEN) == name.count(KW_CLOSE) == 0
class ParseResult(NamedTuple):
"""
An encapsulation for a regular expression and the in-order keywords that
can be mapped to 'group' indices.
"""
pattern: re.Pattern
keys: List[str]
def parse_target(name: str) -> ParseResult:
"""
From a target name, provide a compiled pattern and an in-order list of
the names of the keyword arguments that will appear (group order).
"""
open_len = name.count(KW_OPEN)
assert open_len == name.count(KW_CLOSE)
pattern = "^"
keys = []
for _ in range(open_len):
start = name.index(KW_OPEN) + 1
end = name.index(KW_CLOSE)
pattern += name[: start - 1]
pattern += f"({KW_PATTERN})"
keys.append(name[start:end])
name = name[end + 1 :]
pattern += name + "$"
assert len(keys) == open_len
return ParseResult(re.compile(pattern), keys)
def parse_targets(
targets: List[dict],
) -> Tuple[Dict[str, dict], Dict[str, dict]]:
"""
From a list of target structures, parse them into a dictionary with keys
as target names and data initialization to support future interaction.
"""
literals: Dict[str, dict] = {}
patterns: Dict[str, dict] = {}
for target in targets:
data: dict = {}
assert "name" in target
data["literal"] = target_is_literal(target["name"])
dest_set = literals if data["literal"] else patterns
data["data"] = target
parsed = parse_target(target["name"])
data["pattern"] = parsed.pattern
data["keys"] = parsed.keys
dest_set[target["name"]] = data
return literals, patterns
MatchData = Dict[str, str]
class TargetMatch(NamedTuple):
"""
An encapsulation of results when attempting to patch a target name to a
pattern. If a target was matched and had keyword substitutions, the actual
values used will be set as match data.
"""
found: bool
substitutions: MatchData
def match_target(
name: str, pattern: re.Pattern, keys: List[str]
) -> TargetMatch:
"""
From a target name, attempt to match against a pattern and resolve a set
of key names.
"""
data: MatchData = defaultdict(str)
result = pattern.fullmatch(name)
if result is None:
return TargetMatch(False, data)
for idx, key in enumerate(keys):
data[key] = result.group(1 + idx)
return TargetMatch(True, data)
def resolve_target_list(target_list: list, match_data: MatchData) -> list:
"""
Resovle matched-target data into a list form of target data from a
manifest.
"""
result: list = []
for value in target_list:
if isinstance(value, dict):
result.append(resolve_target_data(value, match_data))
elif isinstance(value, list):
result.append(resolve_target_list(value, match_data))
elif isinstance(value, str):
result.append(format_resolve_delims(value, match_data))
else:
result.append(value)
assert len(result) == len(target_list)
return result
def resolve_dep_data(entry: dict, data: dict) -> dict:
"""
Implements the business logic for applying match data to manifest entries.
"""
if "overrides" in entry and "override_path" in entry:
data = deepcopy(data)
to_update = advance_dict_by_path(
entry["override_path"].split("."), data
)
if isinstance(to_update, dict):
merge(
to_update,
unflatten_dict(entry["overrides"]),
expect_overwrite=True,
)
return data
def resolve_target_data(target_data: dict, match_data: MatchData) -> dict:
"""Resolve matched-target data into a target's data from a manifest."""
result: dict = {}
for key, value in target_data.items():
if isinstance(value, dict):
result[key] = resolve_target_data(value, match_data)
elif isinstance(value, list):
result[key] = resolve_target_list(value, match_data)
elif isinstance(value, str):
result[key] = format_resolve_delims(value, match_data)
else:
result[key] = value
assert len(result.keys()) == len(target_data.keys())
return result
| 2.734375
| 3
|
test.py
|
chensteven/sec-edgar
| 0
|
12784402
|
from secedgar.filings import Filing, FilingType
# This will download the past 15 10-Q filings made by Apple.
import pandas as pd
path = '/Users/schen/sec-scraper/data/cik_ticker.csv'
df = pd.read_csv(path, sep='|')
def run(df):
cik = list(df['CIK'])
names = list(df['Name'])
for c, n in zip(cik, names):
if len(str(c)) < 10:
missing = 10 - len(str(c))
temp = ("0" * missing) + str(c)
print("SCRAPING {} ...".format(temp))
my_filings = Filing(cik=temp, filing_type=FilingType.FILING_10K) # 10-Q filings for Apple (NYSE: AAPL)
try:
my_filings.save('./filings/') # Saves last 15 10Q reports from AAPL to ~/path/to/dir
except ValueError:
print("No {}".format(n))
run(df)
| 3.109375
| 3
|
taskbooks/custom1.py
|
no-such-anthony/net-run
| 1
|
12784403
|
<filename>taskbooks/custom1.py
#from connections import get_connection, close_connection
def custom1(device, **kwargs):
# pop out any additional kwargs you may have passed
#tasks = kwargs.pop('tasks', [])
#connection_type = kwargs.pop('connection_type', '')
#connection_key = kwargs.pop('connection_key', '')
# connect to device
#device['nc'] = get_connection(device, connection_type, connection_key)
print('device = ', device['name'])
#close_connection(device['nc'], connection_type)
# return either a dictionary with at least a 'result' key/value pair, or simply a string/integer
output = {}
output['result'] = "it's evolving!"
return output
taskbook = {}
# need at least a primary_task pointing to a callable function
taskbook['name'] = "Custom, example 1!"
taskbook['primary_task'] = custom1
taskbook['kwargs'] = {}
#taskbook['kwargs']['connection_type'] = None
#taskbook['kwargs']['connection_key'] = None
#taskbook['kwargs']['tasks'] = []
| 2.8125
| 3
|
src/trainers/__init__.py
|
pomelyu/ML_HW
| 0
|
12784404
|
from .classification_trainer import ClassificationTrainer
from .gan_trainer import GANTrainer
from .regression_trainer import RegressionTrainer
from .trainer import Trainer
from .hw3_trainer import HW3Trainer
| 1.054688
| 1
|
adafruit_picam/camera/__init__.py
|
avirshup/adafruit-pi-cam
| 0
|
12784405
|
import typing as _t
from .base import Camera
from .mock import MockCamera
try:
from .pi import RaspberryPiCamera
except ImportError as exc:
assert exc.name == "picamera"
_rpi_exc = exc
RaspberryPiCamera = None
else:
_rpi_exc = None
def get_cls(fake: bool = False) -> _t.Type[Camera]:
if fake:
return MockCamera
elif _rpi_exc is not None:
raise _rpi_exc
else:
return RaspberryPiCamera
| 2.34375
| 2
|
kata/07/string_end_with.py
|
vyahello/upgrade-python-kata
| 0
|
12784406
|
"""
Complete the solution so that it returns true if the first argument(string) passed in ends with the 2nd argument (also a string).
"""
def solution(string: str, ending: str) -> bool:
"""Checks if given string ends with specific string.
Examples:
>>> assert solution("abcb", "cb")
>>> assert not solution("abcb", "d")
"""
return string.endswith(ending)
if __name__ == "__main__":
print(solution("abcb", "cb"))
| 4.25
| 4
|
codesmith/CodeCommit/PipelineTrigger/test_pipline_trigger.py
|
codesmith-gmbh/forge
| 0
|
12784407
|
import unittest
import codesmith.CodeCommit.PipelineTrigger.pipline_trigger as pt
import json
import io
import zipfile
COMMIT_REFERENCE = {
"commit": "5<PASSWORD>",
"ref": "refs/heads/master"
}
TAG_REFERENCE = {
"commit": "<PASSWORD>",
"ref": "refs/tags/v1.1.0"
}
class PipelineTriggerTest(unittest.TestCase):
def test_extract_repository_name(self):
self.assertEqual('my-repo', pt.extract_repository_name('arn:aws:codecommit:eu-west-1:123456789012:my-repo'))
self.assertEqual('', pt.extract_repository_name(''))
self.assertEqual('anything', pt.extract_repository_name('anything'))
def test_is_commit(self):
self.assertTrue(pt.is_commit(COMMIT_REFERENCE))
self.assertFalse(pt.is_commit(TAG_REFERENCE))
def test_is_tag(self):
self.assertTrue(pt.is_tag(TAG_REFERENCE))
self.assertFalse(pt.is_tag(COMMIT_REFERENCE))
def test_extract_tag(self):
self.assertEqual('v1.1.0', pt.extract_tag(TAG_REFERENCE))
def test_event(self):
with open('code_commit_event.json') as f:
event = json.load(f)
pipeline_trigger = pt.derive_trigger(event['Records'][0])
self.assertEqual('eu-west-1', pipeline_trigger.aws_region)
self.assertEqual('my-repo', pipeline_trigger.repository)
self.assertEqual('git checkout 5<PASSWORD>0<PASSWORD>', pipeline_trigger.checkout_command)
buf = io.BytesIO(pipeline_trigger.generate_zip_file())
with zipfile.ZipFile(buf) as zf:
given_files = [file.filename for file in zf.filelist]
expected_files = ['buildspec.yaml', 'chechkout.sh']
self.assertEqual(expected_files, given_files)
given_checkout_text = pipeline_trigger.generate_files()['chechkout.sh']
expected_checkout_text = '''#!/bin/bash
git config --global credential.helper '!aws codecommit credential-helper $@'
git config --global credential.UseHttpPath true
git clone --shallow-submodules https://git-codecommit.eu-west-1.amazonaws.com/v1/repos/my-repo repo
cd repo
git checkout 5<PASSWORD>
cd
'''
self.assertEqual(expected_checkout_text, given_checkout_text)
if __name__ == '__main__':
unittest.main()
| 2.3125
| 2
|
diediedie/brick_lvm.py
|
WaltHP/hpe-openstack-tools
| 1
|
12784408
|
<reponame>WaltHP/hpe-openstack-tools<gh_stars>1-10
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Sample script to dump out the initiator connector dictionary on the
current system.
"""
import sys
from oslo_config import cfg
from oslo_log import log
from diediedie import auth_args
from diediedie import utils
from os_brick.local_dev import lvm
parser = auth_args.parser
parser.add_argument("-g", "--group",
metavar="<lvm-volume-group>",
default=None,
help='LVM volume group')
CONF = cfg.CONF
log.register_options(CONF)
CONF([], project='brick', version='1.0')
log.setup(CONF, 'brick')
LOG = log.getLogger(__name__)
def main():
"""The main."""
args = parser.parse_args()
vg_name = args.group
#vg = lvm.LVM(vg_name, 'sudo')
root_helper = 'sudo'
groups = lvm.LVM.get_all_volume_groups(root_helper, vg_name=vg_name)
for group in groups:
utils.print_dict(group, value_align='l', disable_unicode=True)
vols = lvm.LVM.get_lv_info(root_helper, vg_name=group['name'])
for vol in vols:
utils.print_dict(vol)
if __name__ == "__main__":
main()
| 1.90625
| 2
|
handroll/date.py
|
iter8ve/handroll
| 17
|
12784409
|
# Copyright (c) 2017, <NAME>
from datetime import datetime
import time
def convert(date):
"""Convert a date string into a datetime instance. Assumes date string
is RfC 3339 format."""
time_s = time.strptime(date, '%Y-%m-%dT%H:%M:%S')
return datetime.fromtimestamp(time.mktime(time_s))
| 3.84375
| 4
|
src/network_utils_test.py
|
omid55/dynamic_sparse_balance_theory
| 0
|
12784410
|
# Omid55
# Test module for network_utils.
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import networkx as nx
import pandas as pd
import numpy as np
import unittest
import datetime
import re
from parameterized import parameterized
import utils
import network_utils
class MyTestClass(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.triad_map, cls.triad_list = (
network_utils.generate_all_possible_sparse_triads())
@classmethod
def tearDownClass(cls):
del cls.triad_map
del cls.triad_list
# =========================================================================
# ==================== extract_graph ======================================
# =========================================================================
@parameterized.expand([
["latest_multiple_edge_weight", False],
["sum_of_multiple_edge_weights", True]])
def test_extract_graph(self, name, sum_multiple_edge):
matrix_edges = [
[1, 2, +1, datetime.datetime(2017, 1, 1)],
[1, 2, +5, datetime.datetime(2017, 1, 2)],
[2, 3, +3, datetime.datetime(2017, 1, 4)],
[3, 1, +1, datetime.datetime(2017, 2, 5)],
[2, 3, -2, datetime.datetime(2017, 1, 6)],
[1, 4, -1, datetime.datetime(2017, 2, 13)],
[4, 3, -5, datetime.datetime(2017, 2, 22)],
[4, 3, -5, datetime.datetime(2017, 2, 24)]]
sample_edge_list = pd.DataFrame(
matrix_edges, columns=['source', 'target', 'weight', 'edge_date'])
expected = nx.DiGraph()
expected.add_nodes_from([1, 2, 3, 4])
if sum_multiple_edge:
expected.add_edge(1, 2, weight=6)
else:
expected.add_edge(1, 2, weight=5)
if sum_multiple_edge:
expected.add_edge(2, 3, weight=1)
else:
expected.add_edge(2, 3, weight=-2)
expected.add_edge(3, 1, weight=1)
expected.add_edge(1, 4, weight=-1)
if sum_multiple_edge:
expected.add_edge(4, 3, weight=-10)
else:
expected.add_edge(4, 3, weight=-5)
computed = network_utils.extract_graph(
sample_edge_list, sum_multiple_edge=sum_multiple_edge)
self.assertTrue(
utils.graph_equals(
expected,
computed,
weight_column_name='weight'))
# =========================================================================
# ==================== extract_graphs =====================================
# =========================================================================
def test_extract_graphs_raises_with_missing_columns(self):
sample_edge_list = pd.DataFrame({'source': [1, 2], 'target': [5, 6]})
with self.assertRaises(ValueError):
network_utils.extract_graphs(edge_list=sample_edge_list)
@parameterized.expand(
[["seperated graphs", False],
["accumulative graphs", True]])
def test_extract_graphs(self, name, accumulative):
# source, target, weight, edge_date
matrix_edges = [[1, 2, +1, datetime.datetime(2017, 1, 1)],
[2, 3, +3, datetime.datetime(2017, 1, 4)],
[3, 1, +1, datetime.datetime(2017, 2, 5)],
[1, 4, -1, datetime.datetime(2017, 2, 13)],
[4, 3, -5, datetime.datetime(2017, 2, 24)],
[-1, -1, -1, datetime.datetime(2017, 2, 28)]]
# The last one is going to be ignored because fall into another period
# which is neglected.
sample_edge_list = pd.DataFrame(
matrix_edges, columns=['source', 'target', 'weight', 'edge_date'])
g1 = nx.DiGraph()
g1.add_nodes_from([1, 2, 3])
g1.add_edge(1, 2, weight=1)
g1.add_edge(2, 3, weight=3)
g2 = nx.DiGraph()
g2.add_nodes_from([1, 3, 4])
g2.add_edge(3, 1, weight=1)
g2.add_edge(1, 4, weight=-1)
g2.add_edge(4, 3, weight=-5)
g3 = nx.DiGraph()
g3.add_nodes_from([1, 2, 3, 4])
g3.add_edge(1, 2, weight=1)
g3.add_edge(2, 3, weight=3)
g3.add_edge(3, 1, weight=1)
g3.add_edge(1, 4, weight=-1)
g3.add_edge(4, 3, weight=-5)
if not accumulative:
expected = [g1, g2]
else:
expected = [g1, g3]
computed = network_utils.extract_graphs(
edge_list=sample_edge_list, weeks=4, accumulative=accumulative)
for expected_graph, computed_graph in zip(expected, computed):
self.assertTrue(
utils.graph_equals(
expected_graph,
computed_graph,
weight_column_name='weight'))
# =========================================================================
# ====================== get_all_degrees ==================================
# =========================================================================
def test_get_all_degrees(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 1, weight=6)
dg.add_edge(1, 2, weight=1)
dg.add_edge(1, 4, weight=-5)
dg.add_edge(2, 2, weight=-1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-4)
dg.add_edge(3, 2, weight=4)
dg.add_edge(4, 4, weight=-10)
computed = network_utils.get_all_degrees(dg)
expected = (
{1: {'self': 6, 'out': -4, 'in': -4},
2: {'self': -1, 'out': 1, 'in': 5},
3: {'self': 0, 'out': 0, 'in': 1},
4: {'self': -10, 'out': 0, 'in': -5},
5: {'self': 0, 'out': 0, 'in': 0}})
self.assertDictEqual(computed, expected)
# =========================================================================
# ===================== get_just_periods ==================================
# =========================================================================
def test_get_just_periods(self):
matrix_edges = [[1, 2, +1, datetime.datetime(2017, 1, 1)],
[2, 3, +3, datetime.datetime(2017, 1, 4)],
[3, 1, +1, datetime.datetime(2017, 2, 5)],
[1, 4, -1, datetime.datetime(2017, 2, 13)],
[4, 3, -5, datetime.datetime(2017, 2, 24)],
[-1, -1, -1, datetime.datetime(2017, 2, 28)]]
sample_edge_list = pd.DataFrame(
matrix_edges, columns=['source', 'target', 'weight', 'edge_date'])
expected = [['2017-01-01', '2017-01-29'], ['2017-01-29', '2017-02-26']]
computed = network_utils.get_just_periods(
sample_edge_list, weeks=4, accumulative=False)
self.assertEqual(expected, computed)
# =========================================================================
# ==================== get_metrics_for_network ============================
# =========================================================================
def test_get_metrics_for_network(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-1)
dg.add_edge(1, 3, weight=-1)
computed = network_utils.get_metrics_for_network(dg)
expected = {
'#edges': 4,
'#edges/#nodes': 1,
'#gcc edges': 3,
'#gcc neg edges': 1,
'#gcc nodes': 3,
'#gcc pos edges': 2,
'#neg edges': 2,
'#nodes': 4,
'#pos edges': 2,
'algebraic connectivity': 0,
'average (und) clustering coefficient': 0.75,
'average betweenness': 0.0833,
'average closeness': 0.3888,
'average degree': 2,
'average eigenvector': 0.4222,
'average harmonic': 1.25,
'average in degree': 1,
'average w in degree': 0,
'average w out degree': 0,
'average load': 0.0833,
'average out degree': 1,
'gcc algebraic connectivity': 2.9999,
'gcc diameter': 1,
'unbalanced cycles 3 ratio': 1,
'weights max': 1,
'weights average': 0,
'weights min': -1,
'weights std': 1
}
# utils.print_dict_pretty(computed)
# self.assertDictEqual(computed, expected)
for key, value in expected.items():
self.assertAlmostEqual(value, computed[key], places=3)
# =========================================================================
# ====================== cartwright_harary_balance_ratio ==================
# =========================================================================
def test_cartwright_harary_balance_ratio_notbalanced_graph1(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-1)
self.assertEqual(network_utils.cartwright_harary_balance_ratio(dg), 0)
def test_cartwright_harary_balance_ratio_notbalanced_graph2(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=-1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
self.assertEqual(network_utils.cartwright_harary_balance_ratio(dg), 0)
def test_cartwright_harary_balance_ratio_balanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
self.assertEqual(network_utils.cartwright_harary_balance_ratio(dg), 1)
def test_cartwright_harary_balance_ratio_halfbalanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
dg.add_edge(3, 4, weight=-1)
dg.add_edge(4, 1, weight=-1)
dg.add_edge(1, 5, weight=1)
dg.add_edge(5, 1, weight=-1)
dg.add_edge(2, 1, weight=1)
self.assertEqual(
network_utils.cartwright_harary_balance_ratio(dg), 0.5)
# =========================================================================
# ========================= sprase_balance_ratio ==========================
# =========================================================================
def test_sparse_balance_ratio_raises_when_incorrect_balance_type(self):
with self.assertRaises(ValueError):
network_utils.sprase_balance_ratio(
dgraph=nx.DiGraph(),
balance_type=0)
@parameterized.expand([
['CartwrightHarary', 1, [0.3, 3, 7]],
['Clustering', 2, [0.5, 5, 5]],
['Transitivity', 3, [0.9, 9, 1]]])
def test_sprase_balance_ratio(
self,
name,
balance_type,
expected_values):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 2, weight=5)
dg.add_edge(2, 3, weight=-4)
dg.add_edge(3, 1, weight=-7)
dg.add_edge(3, 4, weight=-1)
dg.add_edge(4, 1, weight=-2)
dg.add_edge(1, 5, weight=9)
dg.add_edge(5, 1, weight=-11)
dg.add_edge(2, 1, weight=100)
computed = network_utils.sprase_balance_ratio(
dgraph=dg,
balance_type=balance_type)
np.testing.assert_array_almost_equal(
computed, expected_values, decimal=2)
# =========================================================================
# ======================= fullyconnected_balance_ratio ====================
# =========================================================================
def test_fullyconnected_balance_ratio_raises_when_incorrect_balance_type(
self):
with self.assertRaises(ValueError):
network_utils.fullyconnected_balance_ratio(
dgraph=nx.DiGraph(),
balance_type=0)
def test_fullyconnected_balance_ratio_raises_when_negative_in_dgraph(self):
with self.assertRaises(ValueError):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=-1)
network_utils.fullyconnected_balance_ratio(
dgraph=dg,
balance_type=1)
@parameterized.expand([
['Classical', 1, [0.4, 4, 6]],
['Clustering', 2, [0.7, 7, 3]],
['Transitivity', 3, [0.8, 8, 2]]])
def test_fullyconnected_balance_ratio(
self,
name,
balance_type,
expected_values):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(5, 1, weight=1)
dg.add_edge(1, 5, weight=1)
dg.add_edge(5, 2, weight=1)
dg.add_edge(2, 5, weight=1)
dg.add_edge(5, 3, weight=1)
dg.add_edge(2, 3, weight=1)
computed = network_utils.fullyconnected_balance_ratio(
dgraph=dg,
balance_type=balance_type)
np.testing.assert_array_almost_equal(
computed, expected_values, decimal=2)
# =========================================================================
# ====================== count_different_signed_edges =====================
# =========================================================================
def test_count_different_signed_edges(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(3, 1, weight=-5)
dg.add_edge(1, 3, weight=-2)
self.assertEqual(network_utils.count_different_signed_edges(dg), 0)
def test_count_different_signed_edges1(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=3)
dg.add_edge(2, 1, weight=4)
dg.add_edge(3, 1, weight=1)
dg.add_edge(1, 3, weight=-1)
self.assertEqual(network_utils.count_different_signed_edges(dg), 1)
def test_count_different_signed_edges2(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=-1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(3, 1, weight=9)
dg.add_edge(1, 3, weight=-2)
self.assertEqual(network_utils.count_different_signed_edges(dg), 2)
# =========================================================================
# ==================== terzi_sprase_balance_ratio =========================
# =========================================================================
def test_terzi_sprase_balance_ratio_notbalanced_graph1(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-1)
expected = 0
computed = network_utils.terzi_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_terzi_sprase_balance_ratio_notbalanced_graph2(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=-1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
expected = 0
computed = network_utils.terzi_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_terzi_sprase_balance_ratio_balanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
expected = 1
computed = network_utils.terzi_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_terzi_sprase_balance_ratio_halfbalanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
dg.add_edge(3, 4, weight=-1)
dg.add_edge(4, 1, weight=-1)
dg.add_edge(1, 5, weight=1)
dg.add_edge(5, 1, weight=-1)
dg.add_edge(2, 1, weight=1)
expected = 0.5
computed = network_utils.terzi_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
# =========================================================================
# ================= kunegis_sprase_balance_ratio ==========================
# =========================================================================
def test_kunegis_sprase_balance_ratio_notbalanced_graph1(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-1)
expected = 0
computed = network_utils.kunegis_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_kunegis_sprase_balance_ratio_notbalanced_graph2(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=-1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
expected = 0
computed = network_utils.kunegis_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_kunegis_sprase_balance_ratio_balanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
expected = 1
computed = network_utils.kunegis_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected)
def test_kunegis_sprase_balance_ratio_halfbalanced_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(3, 1, weight=-1)
dg.add_edge(3, 4, weight=-1)
dg.add_edge(4, 1, weight=-1)
dg.add_edge(1, 5, weight=1)
dg.add_edge(5, 1, weight=-1)
dg.add_edge(2, 1, weight=1)
expected = 0.6
computed = network_utils.kunegis_sprase_balance_ratio(
dg, undirected=True)
np.testing.assert_almost_equal(computed, expected, decimal=1)
# =========================================================================
# ====================== compute_vanderijt_edge_balance ===================
# =========================================================================
def test_compute_vanderijt_edge_balance_small_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(2, 3, weight=-5)
dg.add_edge(3, 1, weight=-2)
expected = {(2, 1): {'#nodes3': 1, '#balanced_node3': 1}}
computed = network_utils.compute_vanderijt_edge_balance(dg)
self.assertDictEqual(computed, expected)
def test_compute_vanderijt_edge_balance_allnegative_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=-1)
dg.add_edge(2, 3, weight=-1)
dg.add_edge(1, 3, weight=-1)
dg.add_edge(2, 4, weight=-1)
dg.add_edge(4, 2, weight=-1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(3, 2, weight=1)
dg.add_edge(3, 1, weight=-1)
dg.add_edge(4, 1, weight=-5)
dg.add_edge(1, 4, weight=-5)
dg.add_edge(4, 3, weight=-2)
dg.add_edge(3, 4, weight=1)
expected = {
(1, 2): {'#balanced_node3': 1, '#nodes3': 2},
(3, 2): {'#balanced_node3': 1, '#nodes3': 2},
(1, 3): {'#balanced_node3': 0, '#nodes3': 2},
(3, 4): {'#balanced_node3': 1, '#nodes3': 2},
(3, 1): {'#balanced_node3': 1, '#nodes3': 2},
(1, 4): {'#balanced_node3': 1, '#nodes3': 2},
(2, 3): {'#balanced_node3': 1, '#nodes3': 2},
(2, 1): {'#balanced_node3': 2, '#nodes3': 2},
(4, 3): {'#balanced_node3': 0, '#nodes3': 2},
(4, 2): {'#balanced_node3': 1, '#nodes3': 2},
(4, 1): {'#balanced_node3': 1, '#nodes3': 2},
(2, 4): {'#balanced_node3': 2, '#nodes3': 2}}
computed = network_utils.compute_vanderijt_edge_balance(dg)
self.assertDictEqual(computed, expected)
# @parameterized.expand(
# [["no_isomorph_cycles", False], ["no_isomorph_cycles", True]])
# def test_compute_vanderijt_edge_balance_small_graph(
# self, name, no_isomorph_cycles):
# dg = nx.DiGraph()
# dg.add_nodes_from([1, 2, 3])
# dg.add_edge(1, 2, weight=1)
# dg.add_edge(2, 1, weight=1)
# dg.add_edge(2, 3, weight=-5)
# dg.add_edge(3, 1, weight=-2)
# if no_isomorph_cycles:
# expected = {
# (1, 2): {
# '#balanced': 1,
# '#cycle3': 1,
# 'weight_distance': 9,
# 'as_expected_sign': True}}
# else:
# expected = {
# (1, 2): {
# '#balanced': 1,
# '#cycle3': 1,
# 'weight_distance': 9,
# 'as_expected_sign': True},
# (3, 1): {
# '#balanced': 1,
# '#cycle3': 1,
# 'weight_distance': 3,
# 'as_expected_sign': True},
# (2, 3): {
# '#balanced': 1,
# '#cycle3': 1,
# 'weight_distance': 3,
# 'as_expected_sign': True}}
# computed = network_utils.compute_vanderijt_edge_balance(
# dg, no_isomorph_cycles=no_isomorph_cycles)
# self.assertDictEqual(computed, expected)
# @parameterized.expand(
# [["no_isomorph_cycles", False],
# ["no_isomorph_cycles", True]])
# def test_compute_vanderijt_edge_balance_allnegative_graph(
# self, name, no_isomorph_cycles):
# dg = nx.DiGraph()
# dg.add_nodes_from([1, 2, 3, 4])
# dg.add_edge(1, 2, weight=-1)
# dg.add_edge(2, 3, weight=-1)
# dg.add_edge(3, 1, weight=-1)
# dg.add_edge(1, 4, weight=-5)
# dg.add_edge(4, 3, weight=-2)
# if no_isomorph_cycles:
# expected = {
# (1, 2): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 2,
# 'as_expected_sign': False},
# (1, 4): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 7,
# 'as_expected_sign': False}}
# else:
# expected = {
# (1, 2): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 2,
# 'as_expected_sign': False},
# (1, 4): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 7,
# 'as_expected_sign': False},
# (2, 3): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 2,
# 'as_expected_sign': False},
# (3, 1): {
# '#balanced': 0,
# '#cycle3': 2,
# 'weight_distance': 13,
# 'as_expected_sign': False},
# (4, 3): {
# '#balanced': 0,
# '#cycle3': 1,
# 'weight_distance': 7,
# 'as_expected_sign': False}}
# computed = network_utils.compute_vanderijt_edge_balance(
# dg, no_isomorph_cycles=no_isomorph_cycles)
# self.assertDictEqual(computed, expected)
# =========================================================================
# ====================== compute_fairness_goodness ========================
# =========================================================================
def test_compute_fairness_goodness(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=1.0)
dg.add_edge(2, 3, weight=1.0)
dg.add_edge(3, 1, weight=1.0)
dg.add_edge(1, 4, weight=2.0)
dg.add_edge(4, 3, weight=-1.0)
expected = {'fairness': {1: 1.0, 2: 0.95, 3: 1.0, 4: 0.95},
'goodness': {1: 1.0, 2: 1.0, 3: 0.0, 4: 2.0}}
computed = network_utils.compute_fairness_goodness(dg, verbose=False)
self.assertDictEqual(computed, expected)
# =========================================================================
# ====================== is_sparsely_transitive_balanced ==================
# =========================================================================
def test_is_sparsely_transitive_balanced_raises_when_self_loops(self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_sparsely_transitive_balanced(triad_with_self_loop)
@parameterized.expand([
["120U", np.array(
[[0, 1, 1],
[1, 0, 1],
[-1, -1, 0]]), True],
["120D", np.array(
[[0, 1, -1],
[1, 0, -1],
[1, 1, 0]]), True],
["0122Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]]), True],
["030TZ", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), True],
["003", np.array(
[[0, -1, -1],
[-1, 0, -1],
[-1, -1, 0]]), True],
["0032Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[-1, -1, 0]]), True],
["030T", np.array(
[[0, 1, 1],
[-1, 0, 1],
[-1, -1, 0]]), True],
["021C", np.array(
[[0, 1, -1],
[-1, 0, 1],
[-1, -1, 0]]), False],
["030T2negZ", np.array(
[[0, 1, -1],
[0, 0, -1],
[0, 0, 0]]), True],
["021UnegZ", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, -1, 0]]), True],
["021DZ", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), True],
["210", np.array(
[[0, 1, -1],
[1, 0, 1],
[1, 1, 0]]), False],
["210Z", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False],
["003Z", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["102Z", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["102negZ", np.array(
[[0, -1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["102posnegZ", np.array(
[[0, 1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["012Z", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["012", np.array(
[[0, 1, -1],
[-1, 0, -1],
[-1, -1, 0]]), True]]
)
def test_is_sparsely_transitive_balanced(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_sparsely_transitive_balanced(triad),
expected_balance)
# =========================================================================
# ====================== is_sparsely_cartwright_harary_balanced ===========
# =========================================================================
def test_is_sparsely_cartwright_harary_balanced_raises_when_self_loops(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_sparsely_cartwright_harary_balanced(
triad_with_self_loop)
@parameterized.expand([
["120U", np.array(
[[0, 1, 1],
[1, 0, 1],
[-1, -1, 0]]), False],
["120D", np.array(
[[0, 1, -1],
[1, 0, -1],
[1, 1, 0]]), False],
["0122Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]]), False],
["030TZ", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), True],
["003", np.array(
[[0, -1, -1],
[-1, 0, -1],
[-1, -1, 0]]), False],
["0032Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[-1, -1, 0]]), False],
["030T", np.array(
[[0, 1, 1],
[-1, 0, 1],
[-1, -1, 0]]), False],
["021C", np.array(
[[0, 1, -1],
[-1, 0, 1],
[-1, -1, 0]]), False],
["030T2negZ", np.array(
[[0, 1, -1],
[0, 0, -1],
[0, 0, 0]]), True],
["021UnegZ", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, -1, 0]]), True],
["021DZ", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), True],
["210", np.array(
[[0, 1, -1],
[1, 0, 1],
[1, 1, 0]]), False],
["210Z", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False],
["003Z", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["102Z", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["102negZ", np.array(
[[0, -1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["102posnegZ", np.array(
[[0, 1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["012Z", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["012", np.array(
[[0, 1, -1],
[-1, 0, -1],
[-1, -1, 0]]), False]]
)
def test_is_sparsely_cartwright_harary_balanced(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_sparsely_cartwright_harary_balanced(triad),
expected_balance)
# =========================================================================
# ====================== is_sparsely_clustering_balanced ==================
# =========================================================================
def test_is_sparsely_clustering_balanced_raises_when_self_loops(self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_sparsely_clustering_balanced(
triad_with_self_loop)
@parameterized.expand([
["120U", np.array(
[[0, 1, 1],
[1, 0, 1],
[-1, -1, 0]]), False],
["120D", np.array(
[[0, 1, -1],
[1, 0, -1],
[1, 1, 0]]), False],
["0122Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[1, -1, 0]]), True],
["030TZ", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), True],
["003", np.array(
[[0, -1, -1],
[-1, 0, -1],
[-1, -1, 0]]), True],
["0032Z", np.array(
[[0, 0, -1],
[-1, 0, 0],
[-1, -1, 0]]), True],
["030T", np.array(
[[0, 1, 1],
[-1, 0, 1],
[-1, -1, 0]]), False],
["021C", np.array(
[[0, 1, -1],
[-1, 0, 1],
[-1, -1, 0]]), False],
["030T2negZ", np.array(
[[0, 1, -1],
[0, 0, -1],
[0, 0, 0]]), True],
["021UnegZ", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, -1, 0]]), True],
["021DZ", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), True],
["210", np.array(
[[0, 1, -1],
[1, 0, 1],
[1, 1, 0]]), False],
["210Z", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False],
["003Z", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["102Z", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["102negZ", np.array(
[[0, -1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["102posnegZ", np.array(
[[0, 1, 0],
[-1, 0, 0],
[0, 0, 0]]), True],
["012Z", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["012", np.array(
[[0, 1, -1],
[-1, 0, -1],
[-1, -1, 0]]), True]]
)
def test_is_sparsely_clustering_balanced(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_sparsely_clustering_balanced(triad),
expected_balance)
# =========================================================================
# ========= is_fullyconnected_cartwright_harary_balance ===================
# =========================================================================
def test_is_fullyconnected_cartwright_harary_balance_raises_when_selfloops(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_fullyconnected_cartwright_harary_balance(
triad_with_self_loop)
def test_is_fullyconnected_cartwright_harary_balance_raises_when_negative(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[-1, 0, 0]])
network_utils.is_fullyconnected_cartwright_harary_balance(
triad_with_self_loop)
@parameterized.expand([
["300", np.array(
[[0, 1, 1],
[1, 0, 1],
[1, 1, 0]]), True],
["102", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["003", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), False],
["120D", np.array(
[[0, 0, 1],
[1, 0, 1],
[1, 0, 0]]), False],
["120U", np.array(
[[0, 1, 1],
[0, 0, 0],
[1, 1, 0]]), False],
["030T", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), False],
["021D", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), False],
["021U", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 1, 0]]), False],
["012", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), False],
["021C", np.array(
[[0, 1, 0],
[0, 0, 1],
[0, 0, 0]]), False],
["111U", np.array(
[[0, 1, 0],
[1, 0, 1],
[0, 0, 0]]), False],
["111D", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 1, 0]]), False],
["030C", np.array(
[[0, 1, 0],
[0, 0, 1],
[1, 0, 0]]), False],
["201", np.array(
[[0, 1, 1],
[1, 0, 0],
[1, 0, 0]]), False],
["120C", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 0, 0]]), False],
["210", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False]]
)
def test_is_fullyconnected_cartwright_harary_balance(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_fullyconnected_cartwright_harary_balance(triad),
expected_balance)
# =========================================================================
# =============== is_fullyconnected_clustering_balanced ===================
# =========================================================================
def test_is_fullyconnected_clustering_balanced_raises_when_self_loops(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_fullyconnected_clustering_balanced(
triad_with_self_loop)
def test_is_fullyconnected_clustering_balanced_raises_when_negative(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[-1, 0, 0]])
network_utils.is_fullyconnected_clustering_balanced(
triad_with_self_loop)
@parameterized.expand([
["300", np.array(
[[0, 1, 1],
[1, 0, 1],
[1, 1, 0]]), True],
["102", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["003", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["120D", np.array(
[[0, 0, 1],
[1, 0, 1],
[1, 0, 0]]), False],
["120U", np.array(
[[0, 1, 1],
[0, 0, 0],
[1, 1, 0]]), False],
["030T", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), False],
["021D", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), False],
["021U", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 1, 0]]), False],
["012", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["021C", np.array(
[[0, 1, 0],
[0, 0, 1],
[0, 0, 0]]), False],
["111U", np.array(
[[0, 1, 0],
[1, 0, 1],
[0, 0, 0]]), False],
["111D", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 1, 0]]), False],
["030C", np.array(
[[0, 1, 0],
[0, 0, 1],
[1, 0, 0]]), False],
["201", np.array(
[[0, 1, 1],
[1, 0, 0],
[1, 0, 0]]), False],
["120C", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 0, 0]]), False],
["210", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False]]
)
def test_is_fullyconnected_clustering_balanced(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_fullyconnected_clustering_balanced(triad),
expected_balance)
# =========================================================================
# ============= is_fullyconnected_transitivity_balanced ===================
# =========================================================================
def test_is_fullyconnected_transitivity_balanced_raises_when_self_loops(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[0, 0, 0]])
network_utils.is_fullyconnected_transitivity_balanced(
triad_with_self_loop)
def test_is_fullyconnected_transitivity_balanced_raises_when_negative(
self):
with self.assertRaises(ValueError):
triad_with_self_loop = np.array(
[[0, 1, 0],
[0, 1, 1],
[-1, 0, 0]])
network_utils.is_fullyconnected_transitivity_balanced(
triad_with_self_loop)
@parameterized.expand([
["300", np.array(
[[0, 1, 1],
[1, 0, 1],
[1, 1, 0]]), True],
["102", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 0, 0]]), True],
["003", np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["120D", np.array(
[[0, 0, 1],
[1, 0, 1],
[1, 0, 0]]), True],
["120U", np.array(
[[0, 1, 1],
[0, 0, 0],
[1, 1, 0]]), True],
["030T", np.array(
[[0, 1, 1],
[0, 0, 1],
[0, 0, 0]]), True],
["021D", np.array(
[[0, 0, 0],
[1, 0, 1],
[0, 0, 0]]), True],
["021U", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 1, 0]]), True],
["012", np.array(
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]]), True],
["021C", np.array(
[[0, 1, 0],
[0, 0, 1],
[0, 0, 0]]), False],
["111U", np.array(
[[0, 1, 0],
[1, 0, 1],
[0, 0, 0]]), False],
["111D", np.array(
[[0, 1, 0],
[1, 0, 0],
[0, 1, 0]]), False],
["030C", np.array(
[[0, 1, 0],
[0, 0, 1],
[1, 0, 0]]), False],
["201", np.array(
[[0, 1, 1],
[1, 0, 0],
[1, 0, 0]]), False],
["120C", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 0, 0]]), False],
["210", np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]]), False]]
)
def test_is_fullyconnected_transitivity_balanced(
self, name, triad, expected_balance):
self.assertEqual(
network_utils.is_fullyconnected_transitivity_balanced(triad),
expected_balance)
# # =======================================================================
# # =================== is_sparsely_ranked_clustering_balanced ============
# # =======================================================================
# def test_is_sparsely_ranked_clustering_balanced_raises_when_self_loops(
# self):
# with self.assertRaises(ValueError):
# triad_with_self_loop = np.array(
# [[0, 1, 0],
# [0, 1, 1],
# [0, 0, 0]])
# network_utils.is_sparsely_ranked_clustering_balanced(
# triad_with_self_loop)
# @parameterized.expand([
# ["120U", np.array(
# [[0, 1, 1],
# [1, 0, 1],
# [-1, -1, 0]]), True],
# ["120D", np.array(
# [[0, 1, -1],
# [1, 0, -1],
# [1, 1, 0]]), True],
# ["0122Z", np.array(
# [[0, 0, -1],
# [-1, 0, 0],
# [1, -1, 0]]), True],
# ["030TZ", np.array(
# [[0, 1, 1],
# [0, 0, 1],
# [0, 0, 0]]), True],
# ["003", np.array(
# [[0, -1, -1],
# [-1, 0, -1],
# [-1, -1, 0]]), True],
# ["0032Z", np.array(
# [[0, 0, -1],
# [-1, 0, 0],
# [-1, -1, 0]]), True],
# ["030T", np.array(
# [[0, 1, 1],
# [-1, 0, 1],
# [-1, -1, 0]]), False],
# ["021C", np.array(
# [[0, 1, -1],
# [-1, 0, 1],
# [-1, -1, 0]]), False],
# ["030T2negZ", np.array(
# [[0, 1, -1],
# [0, 0, -1],
# [0, 0, 0]]), True],
# ["021UnegZ", np.array(
# [[0, 1, 0],
# [0, 0, 0],
# [0, -1, 0]]), True],
# ["021DZ", np.array(
# [[0, 0, 0],
# [1, 0, 1],
# [0, 0, 0]]), True],
# ["210", np.array(
# [[0, 1, -1],
# [1, 0, 1],
# [1, 1, 0]]), False],
# ["210Z", np.array(
# [[0, 1, 0],
# [1, 0, 1],
# [1, 1, 0]]), False],
# ["003Z", np.array(
# [[0, 0, 0],
# [0, 0, 0],
# [0, 0, 0]]), True],
# ["102Z", np.array(
# [[0, 1, 0],
# [1, 0, 0],
# [0, 0, 0]]), True],
# ["102negZ", np.array(
# [[0, -1, 0],
# [-1, 0, 0],
# [0, 0, 0]]), True],
# ["102posnegZ", np.array(
# [[0, 1, 0],
# [-1, 0, 0],
# [0, 0, 0]]), True],
# ["012Z", np.array(
# [[0, 1, 0],
# [0, 0, 0],
# [0, 0, 0]]), True],
# ["012", np.array(
# [[0, 1, -1],
# [-1, 0, -1],
# [-1, -1, 0]]), True]]
# )
# def test_is_sparsely_ranked_clustering_balanced(
# self, name, triad, expected_balance):
# self.assertEqual(
# network_utils.is_sparsely_ranked_clustering_balanced(triad),
# expected_balance)
# =========================================================================
# ====================== get_all_triad_permutations =======================
# =========================================================================
def test_get_all_triad_permutations(self):
triad_adj_matrix = np.array(
[[0, 1, 0],
[1, 0, 1],
[1, 1, 0]])
expected = set([
'[[0 1 1]\n [1 0 1]\n [1 0 0]]',
'[[0 0 1]\n [1 0 1]\n [1 1 0]]',
'[[0 1 1]\n [0 0 1]\n [1 1 0]]',
'[[0 1 1]\n [1 0 1]\n [0 1 0]]',
'[[0 1 1]\n [1 0 0]\n [1 1 0]]',
'[[0 1 0]\n [1 0 1]\n [1 1 0]]'])
computed = network_utils._get_all_triad_permutations(triad_adj_matrix)
self.assertEqual(expected, computed)
# =========================================================================
# ====================== generate_all_possible_sparse_triads ==============
# =========================================================================
def test_generate_all_possible_sparse_triads(self):
computed_triad_map, computed_triad_list = (
network_utils.generate_all_possible_sparse_triads())
# Testing triad_list
self.assertTrue(
len(computed_triad_list) == 138,
'Length of triad_list is not correct.')
np.testing.assert_array_equal(
computed_triad_list[0], np.array(
[[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]), 'First triad_list is incorrect.')
np.testing.assert_array_equal(
computed_triad_list[-1], np.array(
[[0, -1, -1],
[-1, 0, -1],
[-1, -1, 0]]), 'Last triad_list is incorrect.')
np.testing.assert_array_equal(
computed_triad_list[69], np.array(
[[0, 0, 1],
[1, 0, -1],
[1, 0, 0]]), 'Middle triad_list is incorrect.')
# Testing triad_map.
expected_key1 = '[[0 0 0]\n [1 0 0]\n [0 0 0]]'
expected_value1 = 1
expected_key2 = '[[ 0 1 1]\n [-1 0 1]\n [-1 -1 0]]'
expected_value2 = 129
self.assertTrue(
expected_key1 in computed_triad_map,
'First key was not found in computed_triad_map.')
self.assertTrue(
expected_key2 in computed_triad_map,
'Second key was not found in computed_triad_map.')
self.assertEqual(
computed_triad_map[expected_key1], expected_value1,
'First value was not found in computed_triad_map.')
self.assertEqual(
computed_triad_map[expected_key2], expected_value2,
'Second value was not found in computed_triad_map.')
# =========================================================================
# ====================== detect_triad_type_for_all_subgraph3 ==============
# =========================================================================
def test_detect_triad_type_for_all_subgraph3(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=1)
dg.add_edge(1, 4, weight=2)
dg.add_edge(4, 3, weight=-5)
expected = {
'(1, 2, 3)': 55,
# [[0, 0, 1],
# [1, 0, 0],
# [0, 1, 0]]
'(1, 2, 4)': 3,
# [[0, 0, 0],
# [0, 0, 0],
# [1, 1, 0]]
'(1, 3, 4)': 56,
# [[0, 0, 1],
# [1, 0, 0],
# [0,-1, 0]]
'(2, 3, 4)': 24
# [[0, 0, 0],
# [1, 0, 0],
# [-1, 0, 0]]
}
computed = network_utils._detect_triad_type_for_all_subgraph3(
dgraph=dg, triad_map=self.triad_map)
self.assertDictEqual(expected, computed)
def test_detect_triad_type_for_all_subgraph3_nodes_with_str_name(self):
dg = nx.DiGraph()
dg.add_nodes_from(['b', 'c', 'a', 'd'])
dg.add_edge('b', 'c', weight=1)
dg.add_edge('c', 'a', weight=1)
dg.add_edge('a', 'b', weight=1)
dg.add_edge('b', 'd', weight=2)
dg.add_edge('d', 'a', weight=-5)
expected = {
"('a', 'b', 'c')": 55,
"('a', 'b', 'd')": 56,
"('a', 'c', 'd')": 24,
"('b', 'c', 'd')": 3
}
computed = network_utils._detect_triad_type_for_all_subgraph3(
dgraph=dg, triad_map=self.triad_map)
self.assertDictEqual(expected, computed)
def test_detect_triad_type_for_all_subgraph3_has_unique_keys(self):
dg = nx.DiGraph()
dg.add_nodes_from(['b', 'c', 'a', 'd'])
dg.add_edge('b', 'c', weight=1)
dg.add_edge('c', 'a', weight=1)
dg.add_edge('a', 'b', weight=1)
dg.add_edge('b', 'd', weight=2)
dg.add_edge('d', 'a', weight=-5)
computed = network_utils._detect_triad_type_for_all_subgraph3(
dgraph=dg, triad_map=self.triad_map)
truncated_keys = []
for key in list(computed.keys()):
key = re.sub(r'[^\w]', ' ', key)
key = key.replace(" ", "")
truncated_keys.append(''.join(sorted(key)))
self.assertEqual(len(truncated_keys), len(np.unique(truncated_keys)))
# =========================================================================
# ====================== compute_transition_matrix ========================
# =========================================================================
def test_compute_transition_matrix(self):
dg1 = nx.DiGraph()
dg1.add_nodes_from([1, 2, 3, 4])
dg1.add_edge(1, 2, weight=1)
dg1.add_edge(2, 1, weight=1)
dg1.add_edge(2, 3, weight=1)
dg1.add_edge(3, 1, weight=-1)
dg1.add_edge(3, 4, weight=1)
dg2 = nx.DiGraph()
dg2.add_nodes_from([1, 2, 3, 4])
dg2.add_edge(1, 2, weight=1)
dg2.add_edge(1, 3, weight=1)
dg2.add_edge(2, 1, weight=1)
dg2.add_edge(2, 3, weight=1)
dg2.add_edge(2, 4, weight=1)
dg2.add_edge(3, 1, weight=1)
dg2.add_edge(3, 4, weight=1)
dg2.add_edge(4, 1, weight=1)
dgraphs = [dg1, dg2]
triads_types = [
{'(1, 2, 3)': 76,
'(1, 2, 4)': 6,
'(1, 3, 4)': 4,
'(2, 3, 4)': 8},
{'(1, 2, 3)': 63,
'(1, 2, 4)': 57,
'(1, 3, 4)': 57,
'(2, 3, 4)': 22}]
n = len(self.triad_list)
transition_matrix = np.zeros((n, n))
transition_matrix[76, 63] = 1
transition_matrix[6, 57] = 1
transition_matrix[4, 57] = 1
transition_matrix[8, 22] = 1
computed = network_utils.compute_transition_matrix(
dgraphs=dgraphs,
unique_triad_num=n,
triad_map=self.triad_map)
# self.assertDictEqual(expected, computed)
self.assertTrue(
'triads_types' in computed,
'triads_types was not found in computed transition matrix.')
self.assertTrue(
'transition_matrices' in computed,
'transition_matrices was not found in computed transition matrix.')
self.assertEqual(
triads_types,
computed['triads_types'],
'Triad types were different.')
np.testing.assert_array_equal(
transition_matrix,
computed['transition_matrices'][0],
'Transition matrices were different.')
# =========================================================================
# ====================== get_stationary_distribution ======================
# =========================================================================
def test_get_stationary_distribution_simple(self):
transition_matrix = np.array(
[[0, 0, 1],
[0, 0, 1],
[0, 0, 1]], dtype=float)
expected = np.array([0, 0, 1])
computed = network_utils.get_stationary_distribution(
transition_matrix, aperiodic_irreducible_eps=0.0)
np.testing.assert_array_almost_equal(expected, computed, decimal=4)
def test_get_stationary_distribution_full_matrix(self):
transition_matrix = np.array(
[[0.6, 0.1, 0.3],
[0.1, 0.7, 0.2],
[0.2, 0.2, 0.6]], dtype=float)
expected = np.array([0.2759, 0.3448, 0.3793])
computed = network_utils.get_stationary_distribution(
transition_matrix, aperiodic_irreducible_eps=0.0)
np.testing.assert_array_almost_equal(expected, computed, decimal=4)
def test_get_stationary_distribution_not_row_stochastic(self):
transition_matrix = np.array(
[[0, 0, 0],
[9, 0, 1],
[1, 0, 3]], dtype=float)
expected = np.array([0.3571, 0.1191, 0.5238])
computed = network_utils.get_stationary_distribution(
transition_matrix, aperiodic_irreducible_eps=0.0001)
np.testing.assert_array_almost_equal(expected, computed, decimal=4)
def test_get_stationary_distribution(self):
transition_matrix = np.array(
[[0, 0, 0],
[0.9, 0, 0.1],
[0.25, 0, 0.75]], dtype=float)
expected = np.array([0.3571, 0.1191, 0.5238])
computed = network_utils.get_stationary_distribution(
transition_matrix, aperiodic_irreducible_eps=0.0001)
np.testing.assert_array_almost_equal(expected, computed, decimal=4)
# =========================================================================
# ====================== get_mixing_time_range ============================
# =========================================================================
def test_get_mixing_time_range(self):
transition_matrix = np.array(
[[0, 0, 0],
[0.9, 0, 0.1],
[0.25, 0, 0.75]], dtype=float)
expected = 13.7081
computed = network_utils.get_mixing_time_range(
transition_matrix,
aperiodic_irreducible_eps=0.0001,
distance_from_stationary_eps=0.01)
self.assertEqual(np.round(expected, 4), np.round(computed, 4))
# =========================================================================
# ====================== _randomize_network ===============================
# =========================================================================
def test_randomize_network_with_unweighted_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5, 6])
dg.add_edge(1, 2)
dg.add_edge(2, 1)
dg.add_edge(2, 3)
dg.add_edge(3, 1)
dg.add_edge(3, 4)
dg.add_edge(4, 5)
dg.add_edge(5, 4)
dg.add_edge(1, 6)
dg.add_edge(6, 1)
dg.add_edge(6, 5)
computed = network_utils._randomize_network(dg, switching_count_coef=2)
self.assertEqual(
sorted(dict(dg.degree()).values()),
sorted(dict(computed.degree()).values()))
self.assertEqual(
sorted(dg.nodes()),
sorted(computed.nodes()))
def test_randomize_network_with_all_positive_weighted_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5, 6])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=2)
dg.add_edge(3, 4, weight=5)
dg.add_edge(4, 5, weight=9)
dg.add_edge(5, 4, weight=6)
dg.add_edge(1, 6, weight=9)
dg.add_edge(6, 1, weight=1)
dg.add_edge(6, 5, weight=16)
computed = network_utils._randomize_network(dg, switching_count_coef=2)
self.assertEqual(
sorted(dict(dg.degree()).values()),
sorted(dict(computed.degree()).values()))
self.assertEqual(
sorted(dg.nodes()),
sorted(computed.nodes()))
def test_randomize_network_with_signed_weighted_graph(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4, 5, 6])
dg.add_edge(1, 2, weight=1)
dg.add_edge(2, 1, weight=1)
dg.add_edge(2, 3, weight=1)
dg.add_edge(3, 1, weight=-2)
dg.add_edge(3, 4, weight=5)
dg.add_edge(4, 5, weight=9)
dg.add_edge(5, 4, weight=-6)
dg.add_edge(1, 6, weight=-9)
dg.add_edge(6, 1, weight=1)
dg.add_edge(6, 5, weight=-16)
computed = network_utils._randomize_network(dg, switching_count_coef=2)
self.assertEqual(
sorted(dict(dg.degree()).values()),
sorted(dict(computed.degree()).values()))
self.assertEqual(
sorted(dg.nodes()),
sorted(computed.nodes()))
# =========================================================================
# ================== get_robustness_of_transitions ========================
# =========================================================================
def test_get_robustness_of_transitions(self):
transition_matrices = [
np.array(
[[0.9, 0.1, 0],
[0.6, 0.2, 0.2],
[0.7, 0.1, 0.2]]),
np.array(
[[0.1, 0.8, 0.1],
[0, 0.9, 0.1],
[0.1, 0.1, 0.8]])
]
# Expected dataframe.
columns = [
'Transitions',
'Matrix L2-Norm Dist. from Average',
'Matrix Pearson r-value',
'Matrix Pearson p-value',
'Stationary Dist. L2-Norm Dist. from Average',
'Stationary Dist. Pearson r-value',
'Stationary Dist. Pearson p-value']
expected_df = pd.DataFrame({
columns[0]: ['Period 1 to Period 2', 'Period 2 to Period 3'],
columns[1]: [0.8444, 0.8083],
columns[2]: [0.4256, 0.6522],
columns[3]: [0.2534, 0.0569],
columns[4]: [0.5833, 0.4404],
columns[5]: [0.4637, 0.1319],
columns[6]: [0.6930, 0.9156],
},
columns=columns)
expected_df = pd.DataFrame(
expected_df, columns=columns)
# Computed dataframe.
computed_df = network_utils.get_robustness_of_transitions(
transition_matrices, lnorm=2)
# Comparing computed with expected.
pd.testing.assert_frame_equal(
expected_df, computed_df, check_less_precise=2)
# =========================================================================
# ================== generate_converted_graphs ============================
# =========================================================================
def test_generate_converted_graphs_raises_when_wrong_percentage(self):
with self.assertRaises(ValueError):
network_utils.generate_converted_graphs(
dgraph=nx.DiGraph(),
percentage=-1)
def test_generate_converted_graphs_when_it_adds_edges(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=1)
dg.add_edge(1, 3, weight=2)
dg.add_edge(2, 3, weight=5)
dg.add_edge(3, 1, weight=1)
percentage = 25
computed_graphs = network_utils.generate_converted_graphs(
dgraph=dg,
convert_from=0,
convert_to=1,
percentage=percentage,
how_many_to_generate=5)
for computed in computed_graphs:
# It should contain all nodes.
self.assertEqual(dg.nodes(), computed.nodes())
# It should contain all dg's edges.
self.assertEqual(len(nx.difference(dg, computed).edges()), 0)
# It should contain percentage% more edges.
remaining_edges_count = 4 * 3 - 4
self.assertEqual(
len(nx.difference(computed, dg).edges()),
int(percentage*remaining_edges_count/100))
def test_generate_converted_graphs_when_all_edges_exist(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
dg.add_edge(1, 2, weight=2)
dg.add_edge(1, 3, weight=-5)
dg.add_edge(2, 3, weight=-2)
dg.add_edge(3, 1, weight=2)
dg.add_edge(4, 1, weight=2)
dg.add_edge(4, 3, weight=2)
percentage = 25
computed_graphs = network_utils.generate_converted_graphs(
dgraph=dg,
convert_from=2,
convert_to=3,
percentage=percentage,
how_many_to_generate=2)
for computed in computed_graphs:
converted_cnt = 0
# It should contain all nodes.
self.assertEqual(dg.nodes(), computed.nodes())
# It should contain all dg's edges.
self.assertEqual(dg.edges(), computed.edges())
# Checking every edge weight.
for edge in dg.edges():
w1 = dg.get_edge_data(edge[0], edge[1])['weight']
w2 = computed.get_edge_data(edge[0], edge[1])['weight']
if w1 == w2:
continue
if w1 != w2 and w1 == 2 and w2 == 3 and converted_cnt == 0:
converted_cnt += 1
else:
self.assertTrue(
False, 'Found more converted edges than expeced.')
def test_generate_converted_graphs(self):
dg = nx.DiGraph()
dg.add_nodes_from([1, 2, 3, 4])
percentage = 10
computed_graphs = network_utils.generate_converted_graphs(
dgraph=dg,
convert_from=0,
convert_to=1,
percentage=percentage,
how_many_to_generate=2)
for computed in computed_graphs:
# It should contain all nodes.
self.assertEqual(dg.nodes(), computed.nodes())
# It should contain percentage extra edges.
self.assertEqual(
len(computed.edges()), int(4 * 3 * percentage / 100))
def test_generate_converted_graphs_for_large_networks(self):
n = 100
m = 300
dgraph = nx.gnm_random_graph(n=n, m=m, directed=True)
percentage = 5
computed_graphs = network_utils.generate_converted_graphs(
dgraph=dgraph,
convert_from=0,
convert_to=1,
percentage=percentage,
how_many_to_generate=6)
for computed in computed_graphs:
# It should contain all nodes.
self.assertEqual(dgraph.nodes(), computed.nodes())
# It should contain percentage extra edges.
self.assertEqual(
len(computed.edges()), m + int(
(n * (n-1) - m) * percentage / 100))
if __name__ == '__main__':
unittest.main()
| 2.09375
| 2
|
domain-finder.py
|
luisrowley/domain-finder
| 0
|
12784411
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Check for all required python dependencies first
try:
import sys
import argparse
import subprocess
import shlex
except ImportError, e:
python_install = raw_input("\nMissing required python dependencies: sys argparse subprocess shlex. Install them now? [y/n]")
if python_install == 'y' or python_install == 'yes':
install_process = subprocess.Popen(['pip install sys argparse subprocess shlex'], stdout=subprocess.PIPE, shell=True)
(out, err) = install_process.communicate()
print(out);
else:
print('Error: unmet dependencies, leaving...')
sys.exit(0)
# Check for system specific dependencies
nslookup = subprocess.Popen(['command -v nslookup'], stdout=subprocess.PIPE, shell=True)
(out, err) = nslookup.communicate()
if len(out) == 0:
nslookup_install = raw_input("\n'dnsutils' package is a required dependency but it is not installed in your system. Install now? [y/n]")
if nslookup_install == 'y' or nslookup_install == 'yes':
install_process = subprocess.Popen(['sudo apt-get install -y dnsutils'], stdout=subprocess.PIPE, shell=True)
(out, err) = install_process.communicate()
print(out);
else:
print('Error: unmet dependency, leaving...')
sys.exit(0)
print('\n---------------- MIXFLARE v0.12 ----------------\n')
#Create options for single string input or list of strings
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', action="store_true", dest='input', help='Simple Lookup. Searches for availability of the specified domain name. (.com and .net top-level domains supported)')
parser.add_argument('-l', '--list-domains', action="store_true", dest='list', help='Advanced search. This option takes in a list of space separated strings, generates all possible (and best) combinations between them, and then checks their avalability as domain names via DNS lookup.')
parser.add_argument('-c', '--com', action="store_true", dest='com_domains', help='Filter results by .com domains only.')
parser.add_argument('-n', '--net', action="store_true", dest='net_domains', help='Filter results by .net domains only.')
parser.add_argument('--version', action='version', version='Mixflare v0.12')
args = parser.parse_args()
# If not -c or -n option, don't even bother
if not args.com_domains and not args.net_domains and not args.list:
print('Please specify a TLD option: -c for .com, -n for .net domains or both. e.g.: domain-finder -c')
sys.exit(0)
############### main functions ###############
def perform_simple_lookup():
try:
args.input = raw_input('\nPlease enter a single word to search for (i.e: with no TLS extension): \n')
# sanitize input string
invalid = set(';&(){}!¡?¿=.,_$@^*¨%"<>')
if any((c in invalid) for c in args.input):
print("Error: ';&(){}!¡?¿=.,_$@^*¨%\"<>' characters not permitted")
sys.exit(0)
else:
domInput = args.input
except ValueError:
print("Wrong input type. Please try again.")
except KeyboardInterrupt:
print "\nUser interrupted. Bye!"
sys.exit()
available = []
if args.com_domains:
xcom = domInput + ".com"
response_com = subprocess.Popen(["nslookup " + xcom], stdout=subprocess.PIPE, shell=True)
(out, err) = response_com.communicate()
if "NXDOMAIN" in out:
available.append(xcom)
if args.net_domains:
xnet = domInput + ".net"
response_net = subprocess.Popen(["nslookup " + xnet], stdout=subprocess.PIPE, shell=True)
(out, err) = response_net.communicate()
if "NXDOMAIN" in out:
available.append(xnet)
if len(available) < 1:
print('\nSorry, there are no unregistered domain names matching your criteria.\nYou should try again with different words.')
perform_simple_lookup() # Recursive call to prompt again.
else:
print('\n' + str(len(available)) + ' available domain(s):\n' + '\n'.join(available))
def perform_advanced_search():
try:
# Create new list from the words given.
args.list = raw_input('\nPlease enter a list of space-separated words (5 max): \n')
# sanitize input string
invalid = set(';&(){}!¡?¿=.,_$@^*¨%"<>')
if any((c in invalid) for c in args.list):
print("Error: ';&(){}!¡?¿=.,_$@^*¨%\"<>' characters not permitted")
sys.exit(0)
else:
domList = args.list
domList = shlex.split(domList)
if len(domList) > 5:
print('\n ----> Maximum of elements reached! Please enter 5 or less.')
perform_advanced_search() # Recursive call to prompt again.
except ValueError:
print("Wrong input type. Please try again.")
except KeyboardInterrupt:
print "\nUser interrupted. Bye!"
sys.exit()
# Generate all possible two-word combinations between the elements of the list.
# Avoid combinations of same words. Total amount generated: (x^2 - x) where x = --list-domains
combos = []
for x in domList:
for y in domList:
if not x == y:
combos.append(x + y)
combosLen = len(combos)
# Check for available domains within the word list. Total amount of requests (x^2 - x)*2
available = []
for x in combos:
if args.com_domains:
xcom = x + ".com"
response_com = subprocess.Popen(["nslookup " + xcom], stdout=subprocess.PIPE, shell=True)
(out, err) = response_com.communicate()
if "NXDOMAIN" in out:
available.append(xcom)
if args.net_domains:
xnet = x + ".net"
response_net = subprocess.Popen(["nslookup " + xnet], stdout=subprocess.PIPE, shell=True)
(out, err) = response_net.communicate()
if "NXDOMAIN" in out:
available.append(xnet)
# Display progress bar on tty
xVal = combos.index(x) + 1
percent = int(xVal/float(combosLen) * 100)
lines = int(xVal/float(combosLen) * 30)
sys.stdout.write('\r')
sys.stdout.write("[%-30s] %d%%" % ('='* lines, percent))
sys.stdout.flush()
if len(available) < 1:
print('\n\nSorry, there are no unregistered domain names matching your criteria.\nYou should try again with different words.')
perform_advanced_search() # Recursive call to prompt again.
else:
print('\n\n' + str(len(available)) + ' available domain(s):\n' + '\n'.join(available))
############### /main functions ###############
if not args.list:
# Always default to Simple Lookup.
perform_simple_lookup()
else:
perform_advanced_search()
# EOF
| 2.703125
| 3
|
XeroPoster.py
|
6enno/FarmXero
| 0
|
12784412
|
# Goal: Push a journal to Xero
import json
import requests
import Secrets
from oauthlib.oauth2 import BackendApplicationClient
from requests_oauthlib import OAuth2Session
url = 'https://api.xero.com/api.xro/2.0/ManualJournals'
refreshUrl = "https://identity.xero.com/connect/token"
connectUrl ='https://api.xero.com/connections'
scope = ['offline_access', 'accounting.transactions', 'openid', 'profile', 'email', 'accounting.contacts', 'accounting.settings']
tenant = Secrets.tenant
auth = Secrets.auth
clientId = Secrets.clientId
clientSecret = Secrets.clientSecret
auth = requests.auth.HTTPBasicAuth(clientId, clientSecret)
client = BackendApplicationClient(client_id=clientId, scope=scope)
oauth = OAuth2Session(client=client)
token = oauth.fetch_token(token_url=connectUrl, auth=auth)
print token
payload='''{ 'Narration': 'Test Journal 5 - from PYTHON',
'JournalLines': [
{
'LineAmount': 20000,
'AccountCode': '200'
},
:::
'LineAmount': -20000,
'AccountCode': '210'
}
]
}'''
headers = {
'xero-tenant-id': tenant,
'Authorization': 'Bearer ' + auth,
'Accept': 'application/json',
'Content-Type': 'application/json'
}
refreshPayload={'grant_type': 'refresh_token',
'refresh_token': 'e65ce7c0cfafd010b7be071c638706b8a489b12443632f64229e39197db4245d',
'client_id': '467E283E84B045C9B3A549B3CCD415F6',
'client_secret': 'sBq2m3GQNRwwIFVIaOyZzTF6aTEm_1WHc3s-nclllooTHXw-'
}
files=[]
refreshHeaders = {
'grant_type': 'refresh_token',
'Accept': '*/*'#,
#'Content-Type': 'application/json'
}
# Do the stuff______________________________________________________
#refreshResponse = requests.request('POST', refreshUrl, headers=refreshHeaders, data=refreshPayload, files=files)
#response = requests.request('POST', url, headers=headers, data=payload)
#print(refreshResponse.text)
#print(response.text)
| 2.484375
| 2
|
gfyrslf/commands/hi.py
|
mstroud/python-matrix-gfyrslf
| 0
|
12784413
|
<gh_stars>0
import logging
from gfyrslf.command import GfyrslfCommand
class HelloCommand(GfyrslfCommand):
def __init__(self, cmdname, cfg):
super().__init__(cmdname, cfg)
self.description = "Replies \"Hi\" to a variety of greetings"
def event_handler(self, bot, room, event):
room.send_text("Hi, " + event['sender'])
| 2.390625
| 2
|
miRmedon_src/counter.py
|
Ally-s-Lab/miRmedon
| 2
|
12784414
|
<gh_stars>1-10
import pysam
from collections import Counter
def counter(bam):
bam_input = pysam.AlignmentFile(bam, 'rb')
haplotypes_counter = Counter()
alignments_list = []
for alignment in bam_input.fetch(until_eof=True):
if len(alignments_list) == 0:
alignments_list.append(alignment)
else:
if alignment.query_name == alignments_list[-1].query_name:
alignments_list.append(alignment)
else:
tmp_counter = Counter([x.reference_name for x in alignments_list])
tmp_counter = {k: v/len(tmp_counter) for k, v in tmp_counter.items()}
haplotypes_counter += Counter(tmp_counter)
alignments_list = [alignment]
tmp_counter = Counter([x.reference_name for x in alignments_list])
tmp_counter = {k: v / len(tmp_counter) for k, v in tmp_counter.items()}
haplotypes_counter += Counter(tmp_counter)
haplotypes_counter = dict(haplotypes_counter)
with open('counts.txt', 'w') as counts_file:
for haplotype, count in list(haplotypes_counter.items()):
counts_file.write(haplotype + '\t' + str(count) + '\n')
| 2.4375
| 2
|
main.py
|
RyanMatheusRamello/ppt
| 0
|
12784415
|
<reponame>RyanMatheusRamello/ppt<gh_stars>0
import discord
import os
import time
import discord.ext
from discord.utils import get
from discord.ext import commands, tasks
from discord.ext.commands import has_permissions, CheckFailure, check
from random import randint
client = discord.Client()
client = commands.Bot(command_prefix = '>')
@client.event
async def on_ready():
print("bot online") #will print "bot online" in the console when the bot is online
@client.command()
async def ping(ctx):
await ctx.send("pong!") #simple command so that when you type "!ping" the bot will respond with "pong!"
@client.command(aliases=['pedra_papel_tesoura', 'jkp'])
async def ppt(ctx, frase: str = ""):
try:
frase = frase.strip()
if(frase != ""):
if(frase == "help"):
await ctx.send("Esse é um jogo de pedra, papel e tesoura\nPara jogar utilize >ppt <objeto>\nTroque <objeto> por 'pedra', 'papel' ou 'tesoura'")
elif(frase == "pedra" or frase == "papel" or frase == "tesoura"):
# 0 = pd 1 = pp 2 = ts
x = randint(0, 2)
if(x == 0):
if(frase == "pedra"):
await ctx.send(f"{ctx.author.mention} jogou: **{frase}**\n{client.user.mention} jogou: **pedra**\n**Resultado:** O jogo terminou empatado")
if(frase == "papel"):
await ctx.send(f"{ctx.author.mention} jogou: **{frase}**\n{client.user.mention} jogou: **pedra**\n**Resultado:** {ctx.author.mention} ganhou")
if(frase == "tesoura"):
await ctx.send(f"{ctx.author.mention} jogou: **{frase}**\n{client.user.mention} jogou: **pedra**\n**Resultado:** {client.user.mention} ganhou")
elif(x == 1):
if(frase == "papel"):
await ctx.send(f"{ctx.author.mention} jogou: **{frase}**\n{client.user.mention} jogou: **papel**\n**Resultado:** O jogo terminou empatado")
if(frase == "tesoura"):
await ctx.send(f"{ctx.author.mention} jogou: **{frase}**\n{client.user.mention} jogou: **papel**\n**Resultado:** {ctx.author.mention} ganhou")
if(frase == "pedra"):
await ctx.send(f"{ctx.author.mention} jogou: **{frase}**\n{client.user.mention} jogou: **papel**\n**Resultado:** {client.user.mention} ganhou")
elif(x == 2):
if(frase == "tesoura"):
await ctx.send(f"{ctx.author.mention} jogou: **{frase}**\n{client.user.mention} jogou: **tesoura**\n**Resultado:** O jogo terminou empatado")
if(frase == "pedra"):
await ctx.send(f"{ctx.author.mention} jogou: **{frase}**\n{client.user.mention} jogou: **tesoura**\n**Resultado:** {ctx.author.mention} ganhou")
if(frase == "papel"):
await ctx.send(f"{ctx.author.mention} jogou: **{frase}**\n{client.user.mention} jogou: **tesoura**\n**Resultado:** {client.user.mention} ganhou")
else:
await ctx.send("Objeto do pedra, papel e tesoura não reconhecido")
else:
await ctx.send("Esse é um jogo de pedra, papel e tesoura\nPara jogar utilize >ppt <objeto>\nTroque <objeto> por 'pedra', 'papel' ou 'tesoura'")
except:
await ctx.send("Um erro ocorreu")
client.run(os.getenv("TOKEN"))
| 3.1875
| 3
|
src/meltano/core/job/finder.py
|
siilats/meltano
| 122
|
12784416
|
<reponame>siilats/meltano
"""Defines JobFinder."""
from datetime import datetime, timedelta
from .job import HEARTBEAT_VALID_MINUTES, HEARTBEATLESS_JOB_VALID_HOURS, Job, State
class JobFinder:
"""
Query builder for the `Job` model for a certain `elt_uri`.
"""
def __init__(self, job_id: str):
self.job_id = job_id
def latest(self, session):
return (
session.query(Job)
.filter(Job.job_id == self.job_id)
.order_by(Job.started_at.desc())
.first()
)
def successful(self, session):
return session.query(Job).filter(
(Job.job_id == self.job_id)
& (Job.state == State.SUCCESS)
& Job.ended_at.isnot(None)
)
def running(self, session):
"""Find jobs in the running state."""
return session.query(Job).filter(
(Job.job_id == self.job_id) & (Job.state == State.RUNNING)
)
def latest_success(self, session):
return self.successful(session).order_by(Job.ended_at.desc()).first()
def latest_running(self, session):
"""Find the most recent job in the running state, if any."""
return self.running(session).order_by(Job.started_at.desc()).first()
def with_payload(self, session, flags=0, since=None):
query = (
session.query(Job)
.filter(
(Job.job_id == self.job_id)
& (Job.payload_flags != 0)
& (Job.payload_flags.op("&")(flags) == flags)
& Job.ended_at.isnot(None)
)
.order_by(Job.ended_at.asc())
)
if since:
query = query.filter(Job.ended_at > since)
return query
def latest_with_payload(self, session, **kwargs):
return (
self.with_payload(session, **kwargs)
.order_by(None) # Reset ascending order
.order_by(Job.ended_at.desc())
.first()
)
@classmethod
def all_stale(cls, session):
"""Return all stale jobs."""
now = datetime.utcnow()
last_valid_heartbeat_at = now - timedelta(minutes=HEARTBEAT_VALID_MINUTES)
last_valid_started_at = now - timedelta(hours=HEARTBEATLESS_JOB_VALID_HOURS)
return session.query(Job).filter(
(Job.state == State.RUNNING)
& (
(
Job.last_heartbeat_at.isnot(None)
& (Job.last_heartbeat_at < last_valid_heartbeat_at)
)
| (
Job.last_heartbeat_at.is_(None)
& (Job.started_at < last_valid_started_at)
)
)
)
def stale(self, session):
"""Return stale jobs with the instance's job ID."""
return self.all_stale(session).filter(Job.job_id == self.job_id)
| 2.328125
| 2
|
secse/scoring/docking_score_prediction.py
|
autodataming/SECSE
| 1
|
12784417
|
<filename>secse/scoring/docking_score_prediction.py
#!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author: <NAME>
@file: docking_score_prediction.py
@time: 2021/10/27/14:26
"""
import argparse
import subprocess
from openbabel import openbabel
import pandas as pd
import os
import rdkit
from rdkit import Chem
from rdkit.Chem import PandasTools
from rdkit.Chem import MolStandardize
from tqdm import tqdm
rdkit.RDLogger.DisableLog("rdApp.*")
def get_train(sdf, dock):
g = PandasTools.LoadSDF(sdf, molColName='Molecule')
g_smi = pd.read_csv(dock, sep="\t", header=None)
g_smi.columns = ["Smiles", "ID"]
g_smi = g_smi.set_index("ID")
g = g[["ID", "Molecule", "docking score"]]
g["docking score"] = g["docking score"].astype(float)
g = g.sort_values("docking score", ascending=True)
g["Smiles"] = g["ID"].apply(lambda x: g_smi.loc[x.rsplit("-C", 1)[0]][0])
g_new = g.sort_values(by="docking score", ascending=True).drop_duplicates(subset="Smiles", keep="first")
smi = g_new["Smiles"].apply(lambda x: neutralize(x))
g_new["Smiles"] = smi
g_new = g_new.drop_duplicates(subset="Smiles", keep="first")
return g_new
def get_pre(workdir, max_gen, get_all=False):
pre_dir = os.path.join(workdir, "prediction")
if get_all:
pre_raw = os.path.join(pre_dir, "all_G" + str(max_gen) + "_for_pre.raw")
pre_file = os.path.join(pre_dir, "all_G" + str(max_gen) + "_for_pre.csv")
cmd_cat = "find {} -name \"filter.csv\" |xargs awk -F, 'NR>1{{print $(NF-5)\",\"$(NF-6)}}' > {}".format(
workdir, pre_raw)
subprocess.check_output(cmd_cat, shell=True, stderr=subprocess.STDOUT)
cmd_dedup = "awk -F',' '!seen[$2]++' " + pre_raw + " > " + pre_file
subprocess.check_output(cmd_dedup, shell=True, stderr=subprocess.STDOUT)
drop_mols = os.path.join(pre_dir, "drop_ids.txt")
mols_id_cat = "find {} -name \"mols_for_docking.smi\" |xargs cut -f2 > {}".format(workdir, drop_mols)
subprocess.check_output(mols_id_cat, shell=True, stderr=subprocess.STDOUT)
final_file = os.path.join(pre_dir, "all_G" + str(max_gen) + "_for_pre_uniq.csv")
else:
pre_file = os.path.join(pre_dir, "gen_" + str(max_gen) + "_for_pre.csv")
cmd_cp = "awk -F, 'NR>1{{print $(NF-5)\",\"$(NF-6)}}' {} > {}".format(
os.path.join(workdir, "generation_" + str(max_gen), "filter.csv"), pre_file)
subprocess.check_output(cmd_cp, shell=True, stderr=subprocess.STDOUT)
drop_mols = os.path.join(pre_dir, "drop_ids_{}.txt".format(max_gen))
mols_id_cat = "cut -f2 {} > {}".format(
os.path.join(workdir, "generation_" + str(max_gen), "mols_for_docking.smi"), drop_mols)
subprocess.check_output(mols_id_cat, shell=True, stderr=subprocess.STDOUT)
final_file = os.path.join(pre_dir, "gen_" + str(max_gen) + "_for_pre_uniq.csv")
cmd_drop = "grep -wvf {} {} > {}".format(drop_mols, pre_file, final_file)
subprocess.check_output(cmd_drop, shell=True, stderr=subprocess.STDOUT)
return final_file
def neutralize(smi):
mol = Chem.MolFromSmiles(smi)
if mol is None:
smi = wash_mol(smi)
mol = Chem.MolFromSmiles(smi)
if mol is None:
return "C"
uc = MolStandardize.charge.Uncharger()
return Chem.MolToSmiles(uc.uncharge(mol))
def wash_mol(smi):
ob_conversion = openbabel.OBConversion()
ob_conversion.SetInAndOutFormats("smi", "can")
ob_mol = openbabel.OBMol()
ob_conversion.ReadString(ob_mol, smi)
ob_conversion.Convert()
res = ob_conversion.WriteString(ob_mol).strip()
return res
def prepare_files(max_gen, workdir, dl_mode):
pre_dir = os.path.join(workdir, "prediction")
os.makedirs(pre_dir, exist_ok=True)
def pre_train_per_gen(gen):
sdf = os.path.join(workdir, "generation_{}/docking_outputs_with_score.sdf".format(gen))
dock = os.path.join(workdir, "generation_{}/mols_for_docking.smi".format(gen))
df_train = get_train(sdf, dock)[['Smiles', 'docking score']]
# write per generation
df_train.to_csv(os.path.join(pre_dir, "train_G{}.csv".format(gen)), index=False)
return df_train
if dl_mode == 1:
# prepare current generation data
pre_train_per_gen(max_gen)
train = os.path.join(pre_dir, "train_G{}.csv".format(max_gen))
pre = get_pre(workdir, max_gen, False)
return train, pre
elif dl_mode == 2:
# prepare files for all the generation and merge together
cum_path = os.path.join(pre_dir, "train_G" + str(max_gen) + "_all.csv")
df_lst = []
for i in tqdm(range(1, max_gen + 1)):
df = pre_train_per_gen(i)
# write cumulative dataframe
df_lst.append(df)
df_all = pd.concat(df_lst, axis=0).sort_values(
by="docking score", ascending=True).drop_duplicates(subset="Smiles", keep="first")
df_all.to_csv(cum_path, index=False)
pre = get_pre(workdir, max_gen, True)
return cum_path, pre
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="SCESE -- Prepare Data for Deep Learning")
parser.add_argument("max_gen", help="Max number of generation.", type=int)
parser.add_argument("workdir", help="Workdir")
parser.add_argument("dl_mode",
help="Mode of deep learning modeling, 1: modeling per generation, 2: modeling overall after all the generation",
type=int, default=0)
args = parser.parse_args()
prepare_files(args.max_gen, args.workdir, args.dl_mode)
| 2.1875
| 2
|
widgets/loading_bar.py
|
Thenujan-0/grub-editor
| 1
|
12784418
|
#!/usr/bin/python
import sys
from PyQt5.QtWidgets import QWidget, QApplication
from PyQt5.QtGui import QPainter, QColor, QFont,QPalette,QPolygonF,QBrush
from PyQt5.QtCore import Qt , QPointF,QRectF,QObject,pyqtSignal,QRunnable,pyqtSlot,QThreadPool,QTimer
import traceback
from time import sleep
class WorkerSignals(QObject):
""" defines the signals available from a running worker thread
supported signals are:
finished
No data
error
tuple( exctype ,value ,traceback.format_exc() )
result
object data returned from processing , anything
"""
finished = pyqtSignal()
error= pyqtSignal(tuple)
result = pyqtSignal(object)
class Worker(QRunnable):
"""
Worker thread
inherits from QRunnable to handler worker thread setup , signals, wrap-up.
:param callback: The function to run on this worker thread . Supllied args and
kwargs will be passed through the runner
:type callback: function
:param args : Arguments to pass the callback function
:param kwargs : keyword to pass to the callback function
"""
def __init__(self,fn,*args,**kwargs):
super(Worker, self).__init__()
self.fn =fn
self.args= args
self.kwargs=kwargs
self.signals=WorkerSignals()
@pyqtSlot()
def run(self):
"""
initialise the runner function with passed args and kwargs
"""
try:
result =self.fn(*self.args,**self.kwargs)
except:
traceback.print_exc()
exctype,value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc() ))
else:
self.signals.result.emit(result)
finally:
self.signals.finished.emit()
class LoadingBar(QWidget):
def __init__(self):
super().__init__()
self.threadpool=QThreadPool()
self.initUI()
#position of colored part in loading bar 0 -100
self.position=20
self.startWorker(self.move_loading_bar)
self.loading_increasing=True
# self.timer=QTimer()
# self.timer.timeout.connect(self.move_loading_bar)
# self.timer.start(500)
def move_loading_bar(self):
""" move the loading bar back and forth by changing the value of self.position """
while True:
# print('moving loading bar',self.position)
sleep(0.015)
if self.position ==100:
self.loading_increasing=False
elif self.position==0:
self.loading_increasing=True
if self.loading_increasing:
self.position+=1
else:
self.position-=1
qp=QPainter()
#Error might occur if the LoadingBar widget is deleted so to catch that
try:
self.update()
except RuntimeError:
pass
def startWorker(self,fn,*args,**kwargs):
worker= Worker(fn)
self.threadpool.start(worker)
def initUI(self):
self.setGeometry(300, 300, 350, 300)
self.setWindowTitle('loading please wait')
self.show()
def paintEvent(self, event):
qp = QPainter()
qp.begin(self)
self.drawText(event, qp)
qp.end()
def drawText(self, event, qp):
width = self.width()
height = self.height()
self.widget_height= 6
#the part of the loading bar that is not going to have the progressed part
reduce_amount = width*0.6
top_left =QPointF(int(width*0.1),int(height/2-self.widget_height/2))
bottom_right =QPointF(int(width*0.9)-reduce_amount ,int(height/2 +self.widget_height/2))
bigger_bottom_right =QPointF(int(width*0.9) ,int(height/2+self.widget_height/2) )
recty =QRectF(QPointF(top_left.x()+self.position/100*reduce_amount,top_left.y()),
QPointF(bottom_right.x()+self.position/100*reduce_amount,bottom_right.y()))
bigger_recty=QRectF(top_left,bigger_bottom_right)
#non progressed part (bigger rounded rect)
qp.setPen(QPalette().color(QPalette.Disabled,QPalette.Text))
qp.setBrush(QBrush(QPalette().color(QPalette.Active,QPalette.Button)))
qp.drawRoundedRect(bigger_recty,3,3)
#progressed part
qp.setBrush(QBrush(QPalette().color(QPalette().Inactive,QPalette().Highlight)))
qp.setPen(QPalette().color(QPalette().Active,QPalette().Highlight))
qp.drawRoundedRect(recty,2,2)
def main():
app = QApplication(sys.argv)
ex = LoadingBar()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 2.78125
| 3
|
impl/status.py
|
HEG-INCIPIT/ARKetype
| 9
|
12784419
|
<filename>impl/status.py
# =============================================================================
#
# EZID :: status.py
#
# Periodic status reporting.
#
# This module should be imported at server startup so that its daemon
# thread is started.
#
# Author:
# <NAME> <<EMAIL>>
#
# License:
# Copyright (c) 2013, Regents of the University of California
# http://creativecommons.org/licenses/BSD/
#
# -----------------------------------------------------------------------------
import django.conf
import django.db
import logging
import os
import threading
import time
import uuid
import binder_async
import config
import crossref
import datacite
import datacite_async
import download
import ezid
import ezidapp.models
import log
import search_util
# Deferred imports...
"""
import boto3
"""
_enabled = None
_reportingInterval = None
_threadName = None
_cloudwatchEnabled = None
_cloudwatchRegion = None
_cloudwatchNamespace = None
_cloudwatchInstanceName = None
def _formatUserCountList(d):
if len(d) > 0:
l = d.items()
l.sort(cmp=lambda x, y: -cmp(x[1], y[1]))
return " (" + " ".join("%s=%d" % i for i in l) + ")"
else:
return ""
def _statusDaemon():
while _enabled and threading.currentThread().getName() == _threadName:
try:
activeUsers, waitingUsers, isPaused = ezid.getStatus()
na = sum(activeUsers.values())
nw = sum(waitingUsers.values())
ndo = datacite.numActiveOperations()
uql = ezidapp.models.UpdateQueue.objects.count()
bql = binder_async.getQueueLength()
daql = datacite_async.getQueueLength()
cqs = crossref.getQueueStatistics()
doql = download.getQueueLength()
as_ = search_util.numActiveSearches()
no = log.getOperationCount()
log.resetOperationCount()
log.status(
"pid=%d" % os.getpid(),
"threads=%d" % threading.activeCount(),
"paused" if isPaused else "running",
"activeOperations=%d%s" % (na, _formatUserCountList(activeUsers)),
"waitingRequests=%d%s" % (nw, _formatUserCountList(waitingUsers)),
"activeDataciteOperations=%d" % ndo,
"updateQueueLength=%d" % uql,
"binderQueueLength=%d" % bql,
"dataciteQueueLength=%d" % daql,
"crossrefQueue:archived/unsubmitted/submitted=%d/%d/%d"
% (cqs[2] + cqs[3], cqs[0], cqs[1]),
"downloadQueueLength=%d" % doql,
"activeSearches=%d" % as_,
"operationCount=%d" % no,
)
if _cloudwatchEnabled:
import boto3
# Disable annoying boto3 logging.
logging.getLogger("botocore").setLevel(logging.ERROR)
try:
c = boto3.client("cloudwatch", region_name=_cloudwatchRegion)
d = [{"Name": "InstanceName", "Value": _cloudwatchInstanceName}]
data = {
"ActiveOperations": na,
"WaitingRequests": nw,
"ActiveDataciteOperations": ndo,
"UpdateQueueLength": uql,
"BinderQueueLength": bql,
"DataciteQueueLength": daql,
"CrossrefQueueLength": cqs[0] + cqs[1],
"DownloadQueueLength": doql,
"ActiveSearches": as_,
"OperationRate": float(no) / _reportingInterval,
}
r = c.put_metric_data(
Namespace=_cloudwatchNamespace,
MetricData=[
{
"MetricName": k,
"Dimensions": d,
"Value": float(v),
"Unit": "Count/Second"
if k == "OperationRate"
else "Count",
}
for k, v in data.items()
],
)
assert r["ResponseMetadata"]["HTTPStatusCode"] == 200
except:
# Ignore CloudWatch exceptions, as it's not essential.
pass
except Exception, e:
log.otherError("status._statusDaemon", e)
django.db.connections["default"].close()
time.sleep(_reportingInterval)
def loadConfig():
global _enabled, _reportingInterval, _threadName, _cloudwatchEnabled
global _cloudwatchRegion, _cloudwatchNamespace, _cloudwatchInstanceName
_enabled = (
django.conf.settings.DAEMON_THREADS_ENABLED
and config.get("daemons.status_enabled").lower() == "true"
)
if _enabled:
_reportingInterval = int(config.get("daemons.status_logging_interval"))
_threadName = uuid.uuid1().hex
_cloudwatchEnabled = config.get("cloudwatch.enabled").lower() == "true"
if _cloudwatchEnabled:
_cloudwatchRegion = config.get("cloudwatch.region")
_cloudwatchNamespace = config.get("cloudwatch.namespace")
_cloudwatchInstanceName = config.get("cloudwatch.instance_name")
t = threading.Thread(target=_statusDaemon, name=_threadName)
t.setDaemon(True)
t.start()
| 1.945313
| 2
|
liczeniedoszesciu.py
|
SoGreeno/pygamethingidunno
| 0
|
12784420
|
import time
c = 1
print(c)
c += 1
print(c)
time.sleep(1)
c += 1
print(c)
time.sleep(1)
c += 1
print(c)
time.sleep(1)
c += 1
print(c)
time.sleep(1)
c += 1
print(c)
time.sleep(1)
print("jak by co")
time.sleep(0.5)
print("to kod jest z książki")
time.sleep(3)
print("nie kradziony")
time.sleep(2)
| 3.46875
| 3
|
scripts/offuscate_names.py
|
diego0020/lab_vision
| 8
|
12784421
|
<gh_stars>1-10
import os
import hmac
import base64
import glob
import hashlib
from itertools import izip
import csv
in_dir='/home/pabloa/imagenet_tiny/test'
out_dir='/home/pabloa/imagenet_tiny/test_o'
cats_file='/home/pabloa/imagenet_tiny/cats_o.csv'
key='<KEY>'
os.chdir(in_dir)
imgs = glob.glob('*/*.JPEG')
hashes = [base64.urlsafe_b64encode(hmac.new(key,f,hashlib.sha1).hexdigest()) for f in imgs ]
for f,o in izip(imgs,hashes):
os.symlink(os.path.join(in_dir,f),os.path.join(out_dir,o+".JPEG"))
with open(cats_file,"wb") as csv_file:
w=csv.writer(csv_file)
w.writerows(l for l in izip(hashes,(os.path.split(f)[0] for f in imgs)) )
| 2.625
| 3
|
src/bulk_mover/mover_classes/SANCBagger.py
|
StateArchivesOfNorthCarolina/sanc-repo-manager
| 0
|
12784422
|
<filename>src/bulk_mover/mover_classes/SANCBagger.py
import bagit
import os
class SANCBagger(object):
def __init__(self) -> None:
self.bag_to_open = None # type: str
self.tree_to_bag = None # type: str
self.working_bag = None # type: bagit.Bag
self.validation_error_details = None # type: dict
self.validation_error_message = None # type: str
self.bagging_error = None
def open_bag(self, bag_to_open: str) -> bool:
self.bag_to_open = bag_to_open
try:
self.working_bag = bagit.Bag(self.bag_to_open)
return True
except bagit.BagError as e:
self.validation_error_details = e
self.validation_error_message = e
return False
def validate_bag(self) -> bool:
try:
self.working_bag.validate(processes=8)
return True
except bagit.BagValidationError as e:
self.validation_error_details = e.details
self.validation_error_message = e.message
return False
def quick_validate(self) -> bool:
try:
self.working_bag.validate(fast=True)
return True
except bagit.BagValidationError as e:
self.validation_error_details = e.details
self.validation_error_message = e.message
return False
def create_bag(self, tree_to_bag: str, metadata: dict) -> bool:
self.tree_to_bag = tree_to_bag
try:
self.working_bag = bagit.make_bag(self.tree_to_bag, metadata, processes=8, checksum=["sha256"])
self.working_bag.save()
except bagit.BagError as e:
self.bagging_error = e
return False
return True
def which_error(self):
if self.validation_error_message:
return self.validation_error_message, self.validation_error_details
if self.bagging_error:
return "BagError", self.bagging_error
def is_already_bagged(self, tree):
if os.path.isfile(os.path.join(tree, "bagit.txt")):
return True
return False
| 2.203125
| 2
|
app/main/models/posts.py
|
NiHighlism/Minerva
| 4
|
12784423
|
"""
DB Model for Posts and
relevant junction tables
"""
import datetime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql import and_, select
from app.main import db
from app.main.models.base import Base
from app.main.models.comments import Comment
from app.main.models.movies import Movie
from app.main.models.postSearches import SearchableMixin
class Post(Base, SearchableMixin):
"""
Description of User model.
Columns
-----------
:id: int [pk]
:title: Text [not NULL]
:author_id: int [Foreign Key]
:creation_time: DateTime [not NULL]
:last_edit_time: DateTime [not NULL]
:post_body: Text
# Relationships
:comments: Relationship -> Comments (one to many)
"""
# Columns
id = db.Column(db.Integer, db.ForeignKey("base.id"), primary_key=True)
post_id = db.Column(db.Integer, autoincrement=True,
primary_key=True, unique=True)
title = db.Column(db.Text, nullable=False)
post_movie = db.Column(db.String(20))
tags = db.Column(db.JSON)
__searchable__ = ['title', 'body', 'tags']
__mapper_args__ = {
'polymorphic_identity': 'post',
'inherit_condition': (id == Base.id)
}
comments = db.relationship('Comment', primaryjoin="(Post.post_id == Comment.parent_post_id)",
backref=db.backref('post'), lazy='dynamic')
def __init__(self, author_id, post_movie, title, post_body, tags):
super().__init__(author_id, post_body, "post")
self.title = title
self.post_movie = post_movie
self.tags = tags
db.session.add(self)
db.session.commit()
def add_comment(self, author_id, comment_body):
parent_post_id = self.id
comment = Comment(author_id, parent_post_id, comment_body)
self.comments.append(comment)
db.session.commit()
return comment.id
def update_col(self, key, value):
setattr(self, key, value)
db.session.commit()
def delete_post(self, post_id):
post = Post.query.filter_by(id=post_id).delete()
db.session.commit()
| 2.96875
| 3
|
python/iot-sdk-demo/iotSdkDemo/Device/BatchGetDeviceState.py
|
aliyun/iot-api-demo
| 86
|
12784424
|
<gh_stars>10-100
#!/usr/bin/env python
#coding=utf-8
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.acs_exception.exceptions import ClientException
from aliyunsdkcore.acs_exception.exceptions import ServerException
from aliyunsdkiot.request.v20180120.BatchGetDeviceStateRequest import BatchGetDeviceStateRequest
client = AcsClient('<accessKeyId>', '<accessSecret>', 'cn-shanghai')
request = BatchGetDeviceStateRequest()
request.set_accept_format('json')
request.set_ProductKey("ProductKey")
request.set_DeviceNames(["DeviceName1","DeviceName2"])
request.set_IotIds(["IotId1","IotId2"])
response = client.do_action_with_exception(request)
# python2: print(response)
print(response)
| 1.851563
| 2
|
helpers/google.py
|
eoglethorpe/DEEPL
| 1
|
12784425
|
from django.conf import settings
import requests
import json
from NER.models import GoogleLocationCache
GOOGLE_GEOCODE_URL =\
'https://maps.googleapis.com/maps/api/geocode/json?key={}&address={}'
def get_google_geocode_url(location):
return GOOGLE_GEOCODE_URL.format(
getattr(settings, 'GOOGLE_API_KEY', ''),
location,
)
def get_location_info(location):
# First query the database, if not found, make a call
try:
cache = GoogleLocationCache.objects.get(_location=location.lower())
return cache.location_info
except GoogleLocationCache.DoesNotExist:
pass
r = requests.get(get_google_geocode_url(location))
try:
info = json.loads(r.text)
location_info = info.get('results')[0]
# save to database
GoogleLocationCache.objects.create(
location=location,
location_info=location_info
)
return location_info
except (ValueError, IndexError):
return {}
| 2.515625
| 3
|
examples/discovery.py
|
BenoitAnastay/aiohue
| 14
|
12784426
|
<reponame>BenoitAnastay/aiohue
"""AIOHue example for HUE bridge discovery."""
import asyncio
from os.path import abspath, dirname
from sys import path
path.insert(1, dirname(dirname(abspath(__file__))))
from aiohue.discovery import discover_nupnp
async def main():
"""Run code example."""
discovered_bridges = await discover_nupnp()
print()
print("Discovered bridges:")
for item in discovered_bridges:
print()
print(item)
print()
try:
asyncio.run(main())
except KeyboardInterrupt:
pass
| 2.421875
| 2
|
tasks/admin.py
|
housepig7/ops
| 394
|
12784427
|
<reponame>housepig7/ops
from django.contrib import admin
# Register your models here.
from .models import history,toolsscript
admin.site.register(history)
admin.site.register(toolsscript)
| 1.351563
| 1
|
lib/evaluation.py
|
Julius-Syvis/PyTorch-Transformer-Studies
| 0
|
12784428
|
<filename>lib/evaluation.py
import time
import torch
from torchtext.data.metrics import bleu_score
class Translator:
def __init__(self, model, spacy_model, field_src, field_trg, device):
self.model = model
self.spacy_model = spacy_model
self.field_src = field_src
self.field_trg = field_trg
self.device = device
def translate(self, sentence, verbose=False):
if isinstance(sentence, str):
tokens = [token.text.lower() for token in self.spacy_model(sentence)]
else:
tokens = [token.lower() for token in sentence]
tokens = [self.field_src.init_token] + tokens + [self.field_src.eos_token]
translation = self.translate_tokens(tokens, verbose)
return translation
def translate_tokens(self, tokens, verbose=False):
self.model.eval()
idx = [self.field_src.vocab.stoi[token] for token in tokens]
tensor = torch.LongTensor(idx).unsqueeze(1).to(self.device)
if verbose:
print(f'Tokenized data ready for manual translation: tensor=[{tensor.shape}]')
sos = self.field_trg.vocab.stoi["<sos>"]
eos = self.field_trg.vocab.stoi["<eos>"]
target = [sos]
for i in range(20):
trg_tensor = torch.LongTensor(target).unsqueeze(1).to(self.device)
with torch.no_grad():
out = self.model(tensor, trg_tensor)
if verbose:
print(f'Time step {i}: tensor=[{tensor.shape}]; trg_tensor=[{trg_tensor.shape}]; out=[{out.shape}]')
choice = out.argmax(2)[-1, :].item()
target.append(choice)
if choice == eos:
break
translation = [self.field_trg.vocab.itos[i] for i in target]
if verbose:
print(f'The final result has {len(translation) - 1} tokens (<sos> excluded)')
return translation[1:]
def calculate_bleu(self, data, verbose=False):
t1 = time.time()
trgs = []
pred_trgs = []
for datum in data:
src = vars(datum)['src']
trg = vars(datum)['trg']
pred_trg = self.translate(src)[:-1]
pred_trgs.append(pred_trg)
trgs.append([trg])
score = bleu_score(pred_trgs, trgs)
t2 = time.time()
minutes = int((t2 - t1) / 60)
seconds = int((t2 - t1) % 60)
if verbose:
print(f'\nTotal Time: {minutes}m {seconds}s')
return score * 100
| 2.40625
| 2
|
pythonScripts/BVDDataAnalyzer/SimulationFile.py
|
Yperidis/bvd_agent_based_model
| 1
|
12784429
|
#!/usr/bin/env python
import outputSpecification as outputSpec
import h5py as h5
import os
import numpy as np
import sys
def getFileNames(fileName):
filenames = []
path = ""
if fileName.lower().endswith('.txt'):
path = os.path.dirname(os.path.abspath(fileName))
with open(fileName) as f:
filenames = f.readlines()
del f
elif fileName.lower().endswith('.h5'):
filenames.append(fileName)
else:
print("no valid file given")
return (filenames,path)
class FarmDataManager:
def __init__(self):
self.farms = {}
def getFarm(self,id):
if id in self.farms:
return self.farms[id]
else:
f = Farm(id)
self.farms[id] = f
return f
def getAllInfectedFarmsAtTime(self,time):
AllInfectedFarmsAtTime = []
for id,farm in self.farms.iteritems():
if time in farm.infectiousTimes:
AllInfectedFarmsAtTime.append(farm)
return AllInfectedFarmsAtTime
def getNumberOfInfectiousFarmsAtTime(self, time):
return len(self.getAllInfectedFarmsAtTime(time))
def getPrevalenceAtTime(self, time):
returnDict = {}
returnDict["time"] = time
for id,f in self.farms.iteritems():
fdata = f.getDataAtTime(time)
for key, value in fdata.iteritems():
if not key in returnDict:
returnDict[key] = 0
returnDict[key] = returnDict[key] + value
return returnDict
def addFarmDataDict(self, time, FarmDataTableDict):
id = FarmDataTableDict.pop("id")
farm = self.getFarm(id)
farm.setDataPoint(time, FarmDataTableDict)
class Farm:
def __init__(self, id):
self.data = {}
self.infectiousTimes = []
self.hasBeenInfected = False
self.id = id
def setDataPoint(self, time, dataDict ):
if(dataDict["ti"] != 0 or dataDict["pi"] != 0):
self.infectiousTimes.append(time)
self.hasBeenInfected = True
dataDict["total"] = 0
for key, value in dataDict.iteritems():
if key != "total":
dataDict["total"] += value
self.data[time] = dataDict
def getDataAtTime(self,time):
return self.data[time]
class SimulationFile:
def __init__(self,fileName, printStuff = False):
self.printStuff = printStuff
self.files = []
self.keys = []
self.keyToFiles = {}
self.keysForFiles = {}
self.filenames,self.path = getFileNames(fileName)
for fileName in self.filenames:
fullpath = os.path.join(self.path, fileName).strip()
if self.printStuff:
print "scanning " + fullpath
try:
file = h5.File(fullpath, 'r')
self.files.append(fullpath)
self.keysForFiles[fullpath] = []
for key in file.keys():
if not key in self.keys:
self.keys.append(key)
self.keyToFiles[key] = []
self.keyToFiles[key].append(fullpath)
self.keysForFiles[fullpath].append(key)
file.close()
pass
except IOError as ioe:
print fileName
print ioe
pass
def getFullTable(self, key):
#for fileName in self.retunFilesForKey(key):
return self.getMultipleTables([key])[key]
def getFilesForKey(self, key):
return self.keyToFiles[key]
def getAllKeys(self):
return self.keys
def getKeyForFile(self,fileName):
return self.keysForFiles[fileName]
def getMultipleTables(self, keys):
files = []
readingKeys = []
tables = {}
for key in [key for key in set(keys) if key in self.keys]:
readingKeys.append(key)
files = files + self.getFilesForKey(key)
fileset = set(files)
for fileName in fileset:
file = h5.File(fileName, 'r')
for key in [key for key in self.getKeyForFile(fileName) if key in readingKeys]:
data = file[key]
subTable = np.array(data)
if not key in tables:
tables[key] = subTable
else:
fullTable = tables[key]
tables[key] = np.concatenate([fullTable, subTable])
file.close()
return tables
def getBreedingDynamicsData(self):
data = self.getMultipleTables([outputSpec.deadCowTableName, outputSpec.intermediateCalvingTimesTableName])
return data
def getTestData(self):
data = self.getFullTable(outputSpec.testsTableName)
return data
def getIntermediateCalvingTimes(self):
data = self.getFullTable(outputSpec.intermediateCalvingTimesTableName)
return data
def getDataDictFromDataPoint(self, rowData, dict):
dataDict = {}
for key,value in dict.iteritems():
dataDict[key] = rowData[value]
return dataDict
def getAllDataFromSplitUpTables(self, tablePrefix, timeTableName):
fullRetData = {}
for fileName in self.files:
file = h5.File(fileName, 'r')
try:
h5times = file[timeTableName]
times = np.array(h5times[:].astype(int))
for time in times:
dataSetName = tablePrefix + str(time)
h5data = file[dataSetName]
retData = np.array(h5data)
fullRetData[time] = retData
except KeyError:
print "error"
pass
file.close()
return fullRetData
def getEndemicData(self):
fdm = FarmDataManager()
retData = {}
fullFarmData = self.getAllDataFromSplitUpTables(outputSpec.farmsTablePrefix, outputSpec.farmTimesTableName)
for time,data in fullFarmData.iteritems():
for row in data:
rowData = self.getDataDictFromDataPoint(row, outputSpec.farmTable)
fdm.addFarmDataDict(time,rowData)
retData[time] = {}
retData[time]["prevalenceCows"] = fdm.getPrevalenceAtTime(time)
retData[time]["prevalenceFarms"] = fdm.getAllInfectedFarmsAtTime(time)
return (len(fdm.farms),retData)
def getTradeData(self):
fullTradeData = self.getFullTable(outputSpec.tradingTableName)
trades = np.array( fullTradeData )
tradingAges = {}
tradingAges["male"] = []
tradingAges["female"] = []
for sex in tradingAges:
cols = trades[:,outputSpec.tradeCowSexIndex] == outputSpec.sexes[sex]
data = trades[cols,outputSpec.tradeCowAgeIndex]
tradingAges[sex] = data.reshape(len(data),1)
tradeData = {}
tradeData["tradingAges"] = tradingAges
return tradeData
def getTradeMatrices(self):
fullTradeData = self.getAllDataFromSplitUpTables(outputSpec.tradeTablePrefix, outputSpec.tradeTimesTableName)
return fullTradeData
def getStaticTradesPerFarm(self):
fullTradeData = self.getFullTable(outputSpec.tradingTableName)
tradeTable = outputSpec.tradeTable
returnDict = {}
trades = np.array( fullTradeData )
for trade in trades:
src = trade[tradeTable["srcFarm"]]
time = trade[tradeTable["date"]]
dest = trade[tradeTable["destFarm"]]
#print str(src) + " " + str(dest)
if not src in returnDict:
farmDict = {}
farmDict["id"]=src
farmDict["in"]={}
farmDict["out"]={}
farmDict["times"] = []
else:
farmDict = returnDict[src]
if not dest in returnDict:
destDict = {}
destDict["id"]=dest
destDict["in"]={}
destDict["out"]={}
destDict["times"] = []
else:
destDict = returnDict[dest]
#farmDict["times"].append(time)
#destDict["times"].append(time)
if not dest in farmDict["out"]:
farmDict["out"][dest] = 1
else:
farmDict["out"][dest] += 1
if not src in destDict["in"]:
destDict["in"][src] = 1
else:
destDict["in"][src] += 1
returnDict[src] = farmDict
returnDict[dest] = destDict
return returnDict
# def
| 2.890625
| 3
|
mapping/data_loader/h5_timeseries.py
|
syanga/model-augmented-mutual-information
| 2
|
12784430
|
<reponame>syanga/model-augmented-mutual-information<gh_stars>1-10
from torchvision import datasets, transforms
from ..base import BaseDataLoader
from torch.utils import data
import h5py
import numpy as np
import torch
class TimeseriesDataset(data.Dataset):
"""docstring for BackBlazeDataset"""
def __init__(self, h5_path, feat_list=None, time_trimmed=0, tia=0):
df = h5py.File(h5_path, "r")
xh = df.get("X")
yh = df.get("Y")
assert tia >= 0
self.length = xh.shape[0]
if feat_list is not None:
self.X = np.empty(
(xh.shape[0], xh.shape[1]-tia-time_trimmed, len(feat_list)),
order="C")
if tia > 0:
xh.read_direct(self.X, np.s_[:,time_trimmed:-tia,feat_list], None)
else:
xh.read_direct(self.X, np.s_[:,time_trimmed:,feat_list], None)
else:
self.X = np.empty(
(xh.shape[0], xh.shape[1]-tia, xh.shape[2]),
order="C")
if tia > 0:
xh.read_direct(self.X, np.s_[:,:-tia,:], None)
else:
xh.read_direct(self.X, np.s_[:,:,:], None)
self.Y = np.empty(yh.shape, order="C")
yh.read_direct(self.Y, None, None)
df.close()
self.X = torch.from_numpy(
np.ascontiguousarray(self.X)).type(torch.float32)
self.Y = torch.from_numpy(self.Y).type(torch.float32)
def __len__(self):
return self.length
def __getitem__(self, index):
return self.X[index,...], self.Y[index]
class TimeseriesDataLoader(BaseDataLoader):
def __init__(self, data_dir, batch_size, shuffle,
validation_split, test_split, num_workers,
feat_list=None, time_trimmed=0, tia=0, training=True, seed=0):
self.data_dir = data_dir
self.feat_list = feat_list
self.time_trimmed = time_trimmed
self.tia = tia
self.seed = seed
self.dataset = TimeseriesDataset(data_dir, feat_list, time_trimmed, tia)
super().__init__(
self.dataset, batch_size, shuffle,
validation_split,test_split, num_workers, seed=seed)
| 2.171875
| 2
|
numba/ir/generator/build.py
|
liuzhenhai/numba
| 1
|
12784431
|
# -*- coding: utf-8 -*-
"""
Generate a package with IR implementations and tools.
"""
from __future__ import print_function, division, absolute_import
import os
from textwrap import dedent
from itertools import chain
from . import generator
from . import formatting
from . import astgen
from . import visitorgen
from . import naming
#------------------------------------------------------------------------
# Tools Flags
#------------------------------------------------------------------------
cython = 1
#------------------------------------------------------------------------
# Tools Resolution
#------------------------------------------------------------------------
class Tool(object):
def __init__(self, codegens, flags=0, depends=[]):
self.codegens = codegens
self.flags = flags
self.depends = depends
def __repr__(self):
return "Tool(codegens=[%s])" % ", ".join(map(str, self.codegens))
def resolve_tools(tool_list, mask, tools=None, seen=None):
if tools is None:
tools = []
seen = set()
for tool in tool_list:
if not (tool.flags & mask) and tool not in seen:
seen.add(tool)
resolve_tools(tool.depends, mask, tools, seen)
tools.append(tool)
return tools
def enumerate_tools(feature_names, mask):
tool_set = set(chain(*[features[name] for name in feature_names]))
tools = resolve_tools(tool_set, mask)
return tools
def enumerate_codegens(feature_names, mask):
tools = enumerate_tools(feature_names, mask)
codegens = list(chain(*[tool.codegens for tool in tools]))
return codegens
#------------------------------------------------------------------------
# Tool Definitions
#------------------------------------------------------------------------
def make_codegen_dict(codegens):
return dict((codegen.out_filename, codegen) for codegen in codegens)
all_codegens = astgen.codegens + visitorgen.codegens
gens = make_codegen_dict(all_codegens)
pxd_ast_tool = Tool([gens[naming.nodes + ".pxd"]], flags=cython)
py_ast_tool = Tool([gens[naming.nodes + ".py"]])
pxd_interface_tool = Tool([gens[naming.interface + ".pxd"]], flags=cython,
depends=[pxd_ast_tool])
py_interface_tool = Tool([gens[naming.interface + ".py"]],
depends=[py_ast_tool])
pxd_visitor_tool = Tool([gens[naming.visitor + ".pxd"]], flags=cython,
depends=[pxd_interface_tool])
py_visitor_tool = Tool([gens[naming.visitor + ".py"]],
depends=[py_interface_tool, pxd_visitor_tool])
pxd_transform_tool = Tool([gens[naming.transformer + ".pxd"]], flags=cython,
depends=[pxd_interface_tool])
py_transformr_tool = Tool([gens[naming.transformer + ".py"]],
depends=[py_interface_tool, pxd_transform_tool])
pxd_ast_tool.depends.extend([pxd_interface_tool, py_interface_tool])
#------------------------------------------------------------------------
# Feature Definitions & Entry Points
#------------------------------------------------------------------------
features = {
'all': [py_ast_tool, py_visitor_tool, py_transformr_tool],
'ast': [py_ast_tool],
'visitor': [py_visitor_tool],
'transformer': [py_transformr_tool],
}
def build_package(schema_filename, feature_names, output_dir, mask=0):
"""
Build a package from the given schema and feature names in output_dir.
:param mask: indicates which features to mask, e.g. specifying
'mask=build.cython' disables Cython support.
"""
codegens = enumerate_codegens(feature_names, mask)
disk_allocator = generator.generate_from_file(
schema_filename, codegens, output_dir)
try:
_make_package(disk_allocator, codegens)
finally:
disk_allocator.close()
#------------------------------------------------------------------------
# Package Building Utilities
#------------------------------------------------------------------------
source_name = lambda fn: os.path.splitext(os.path.basename(fn))[0]
def _make_package(disk_allocator, codegens):
_make_init(disk_allocator, codegens)
# Make Cython dependency optional
# disk_allocator.open_sourcefile("cython.py")
fns = [c.out_filename for c in codegens if c.out_filename.endswith('.pxd')]
if fns:
_make_setup(disk_allocator, [source_name(fn) + '.py' for fn in fns])
def _make_init(disk_allocator, codegens):
init = disk_allocator.open_sourcefile("__init__.py")
init.write(dedent("""
# Horrid hack to make work around circular cimports
import os, sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
"""))
for c in codegens:
if c.out_filename.endswith('.py'):
modname = source_name(c.out_filename)
init.write("from %s import *\n" % modname)
def _make_setup(disk_allocator, filenames):
setup = disk_allocator.open_sourcefile("setup.py")
ext_modules = ["Extension('%s', ['%s'])" % (source_name(fn), fn)
for fn in filenames]
setup.write(dedent("""
from distutils.core import setup
from Cython.Distutils import build_ext
from Cython.Distutils.extension import Extension
ext_modules = [
%s
]
setup(
# ext_modules=cythonize('*.pyx'),
ext_modules=ext_modules,
cmdclass={'build_ext': build_ext},
)
""") % formatting.py_formatter.format_stats(",\n", 4, ext_modules))
| 2.078125
| 2
|
sdk/search/azure-search-documents/tests/test_index_documents_batch.py
|
rsdoherty/azure-sdk-for-python
| 2,728
|
12784432
|
<gh_stars>1000+
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
from azure.search.documents.models import IndexAction
from azure.search.documents import IndexDocumentsBatch
METHOD_NAMES = [
"add_upload_actions",
"add_delete_actions",
"add_merge_actions",
"add_merge_or_upload_actions",
]
METHOD_MAP = dict(zip(METHOD_NAMES, ["upload", "delete", "merge", "mergeOrUpload"]))
class TestIndexDocumentsBatch(object):
def test_init(self):
batch = IndexDocumentsBatch()
assert batch.actions == []
def test_repr(self):
batch = IndexDocumentsBatch()
assert repr(batch) == "<IndexDocumentsBatch [0 actions]>"
batch._actions = [1, 2, 3]
assert repr(batch) == "<IndexDocumentsBatch [3 actions]>"
# a strict length test here would require constructing an actions list
# with a length of ~10**24, so settle for this simple sanity check on
# an extreme case.
batch_actions = list(range(2000))
assert len(repr(batch)) <= 1024
def test_actions_returns_list_copy(self):
batch = IndexDocumentsBatch()
batch.actions.extend([1, 2, 3])
assert type(batch.actions) is list
assert batch.actions == []
assert batch.actions is not batch._actions
@pytest.mark.parametrize("method_name", METHOD_NAMES)
def test_add_method(self, method_name):
batch = IndexDocumentsBatch()
method = getattr(batch, method_name)
method("doc1")
assert len(batch.actions) == 1
method("doc2", "doc3")
assert len(batch.actions) == 3
method(["doc4", "doc5"])
assert len(batch.actions) == 5
method(("doc6", "doc7"))
assert len(batch.actions) == 7
assert all(
action.action_type == METHOD_MAP[method_name] for action in batch.actions
)
assert all(type(action) == IndexAction for action in batch.actions)
expected = ["doc{}".format(i) for i in range(1, 8)]
assert [action.additional_properties for action in batch.actions] == expected
| 2.328125
| 2
|
_AceVision_testcodes/A6449/RS232_serial Pass NG code.py
|
lyj911111/OpenCV_Project
| 0
|
12784433
|
import serial
import cv2
# 시리얼 연결 실패시 오류 메시지 클래스
class errorMessage(Exception):
def __init__(self, msg='init_error_msg'):
self.msg = msg
def __str__(self):
return self.msg
# 초기 시리얼 연결
try:
print("Suceessfully connected with PLC")
ser = serial.Serial(
port='COM3',
baudrate=9600,
parity=serial.PARITY_NONE, \
stopbits=serial.STOPBITS_ONE, \
bytesize=serial.EIGHTBITS, \
timeout=0.5 # PLC가 1초에 한번 보내므로 그보다 적게.
)
except:
print("[ERROR] : please check PLC RS232")
# raise errorMessage('[ERROR] : please check PLC RS232')
# PASS 때 '1' 보내고, RS232 통신 닫기
def passSignal():
print("PASS the judgement")
# PLC신호 읽음
if ser.readable():
res = ser.readline()
PLC_ready = res.decode()
PLC_ready = PLC_ready.lower() # 소문자로 변환
if PLC_ready[0:5] == 'ready':
print("PLC로부터 받은 프로토콜:", PLC_ready[0:5]) # 받은 프로토콜
passSig = '1'.encode()
ser.write(passSig) # 전송
ser.close() # 닫기
# NG 때 '2' 보내고, RS232 통신 닫기
def NGSignal():
print("NG the judgement")
# PLC신호 읽음
if ser.readable():
res = ser.readline()
PLC_ready = res.decode()
PLC_ready = PLC_ready.lower() # 소문자로 변환
if PLC_ready[0:5] == 'ready':
print("PLC로부터 받은 프로토콜:", PLC_ready[0:5]) # 받은 프로토콜
NGSig = '2'.encode()
ser.write(NGSig) # 전송
ser.close() # 닫기
'''
PLC 에서 온 값 'READY'를 읽고,
합격일때 : judgeSignal(1)
불합일때 : judgeSignal(2)
의 형태로 함수 사용.
'''
def judgeSignal(signal=0):
print("checking PASS or NG signal...")
# PLC신호 읽음
if ser.readable():
res = ser.readline()
PLC_ready = res.decode()
PLC_ready = PLC_ready.lower() # 소문자로 변환
if PLC_ready[0:5] == 'ready':
print("PLC로부터 받은 프로토콜:", PLC_ready[0:5]) # 받은 프로토콜
signal = str(signal)
signal = signal.encode()
ser.write(signal) # 전송
ser.close() # 닫기
if __name__ == "__main__":
while 1:
judgeSignal()
# passSignal()
# NGSignal()
| 2.875
| 3
|
meiduo04/mall/utils/fastdfs/fdfsstorage.py
|
sunsyw/web
| 0
|
12784434
|
from django.core.files.storage import Storage
"""
1.您的自定义存储系统必须是以下的子类 :django.core.files.storage.Storage
2.Django必须能够在没有任何参数的情况下实例化您的存储系统。
这意味着任何设置都应该来自:django.conf.settings
3.您的存储类必须实现_open()和_save() 方法,
以适合您的存储类中的任何其他方法一起。请参阅下面的这些方法。
4.您的存储类必须是可解构的, 以便在迁移中的字段上使用时可以对其进行序列化。
只要您的字段具有可自行序列化的参数,
就 可以使用 django.utils.deconstruct.deconstructible类装饰器
(这就是Django在FileSystemStorage上使用的)
"""
from fdfs_client.client import Fdfs_client
from django.utils.deconstruct import deconstructible
from mall import settings
@deconstructible
class MyStorage(Storage):
# 初始化的时候 有任何配置信息 都设置为 默认值
def __init__(self, config_path=None,ip=None):
if not config_path:
config_path = settings.FDFS_CLIENT_CONF
self.config_path = config_path
if not ip:
ip = settings.FDFS_URL
self.ip = ip
# open 打开文件(图片)
# Fdfs 是通过 HTTP来访问我们的图片资源的,不需要打开
# http://192.168.229.148:8888/group1/M00/00/02/wKjllFx4r_aAJyv2AAGByoOJNyU855.jpg
def _open(self, name, mode='rb'):
pass
# save 保存
# 保存是通过 Fdfs来实现保存的,所以我们要在save中实现保存操作
def _save(self, name, content, max_length=None):
# name, content, max_length=None
# name: 图片名字
# content: 图片资源
# max_length: 最大长度
# 1.创建上传 的客户端
# client = Fdfs_client('utils/fastdfs/client.conf')
# client = Fdfs_client(settings.FDFS_CLIENT_CONF)
client = Fdfs_client(self.config_path)
# 2.获取图片 我们不能通过name找到图片,所以通过content来获取图片内容
# 读取的是 图片的二进制
data = content.read()
# 3.上传
# buffer 二进制
result = client.upload_by_buffer(data)
# result 就是上传之后的返回值
# 4.根据上传的状态获取 remote file_id
"""
{'Group name': 'group1',
'Remote file_id': 'group1/M00/00/02/wKjllFx42-6AW-JBAAGByoOJNyU783.jpg',
'Status': 'Upload successed.',
'Local file name': '/home/python/Desktop/images/2.jpg',
'Uploaded size': '96.00KB',
'Storage IP': '192.168.229.148'}
"""
if result.get('Status') == 'Upload successed.':
#上传成功,返回 remote file_id
file_id = result.get('Remote file_id')
else:
raise Exception('上传失败')
# 我们需要把 remote file_id 返回回去
# 系统要使用
return file_id
# exists 是否存在
# Fdfs 已经帮我们做了 重名的处理,所以我们不需要判断 图片是否重复
# 直接上它上传就可以
def exists(self, name):
return False
# url 默认是把 name返回回去,
# 在Fdfs中 name其实就是 remote file_id
# 但是我们在访问图片的时候 需要自己再添加 http://ip:port/ + name
# 所以我们重写 url方法,添加 http://ip:prot/ + name
def url(self, name):
# return 'http://192.168.229.148:8888/' + name
# return settings.FDFS_URL + name
return self.ip + name
# return name
# pass
| 2.5
| 2
|
2018/day11p2.py
|
JonSn0w/advent-of-code
| 1
|
12784435
|
import numpy as np
n = 300
serial = int(input())
grid = np.array([[int(str(((x+10)*y+serial)*(x+10))[-3])-5 for y in range(1, n+1)] for x in range(1, n+1)])
coord = (0, 0)
mVal, dim = 0, 0
for d in range(4, 2, -1):
squares = sum(grid[x:x-d+1 or None, y:y-d+1 or None] for x in range(d) for y in range(d))
val = int(squares.max())
if mVal < val:
coord = np.where(squares == val)
mVal = val
dim = d
x,y = coord[0][0], coord[1][0]
print(f'({x+1}, {y+1}) X {dim} = {mVal}')
| 3.21875
| 3
|
odoo-13.0/venv/lib/python3.8/site-packages/ImageDraw.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
| 3
|
12784436
|
from PIL.ImageDraw import *
| 1.109375
| 1
|
OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/GL/ARB/explicit_uniform_location.py
|
JE-Chen/je_old_repo
| 0
|
12784437
|
<reponame>JE-Chen/je_old_repo
'''OpenGL extension ARB.explicit_uniform_location
This module customises the behaviour of the
OpenGL.raw.GL.ARB.explicit_uniform_location to provide a more
Python-friendly API
Overview (from the spec)
This extension provides a method to pre-assign uniform locations to
uniform variables in the default uniform block, including subroutine
uniforms. This allows an application to modify the uniform values without
requiring a GL query like GetUniformLocation, GetSubroutineUniformLocation
and GetSubroutineIndex.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/explicit_uniform_location.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ARB.explicit_uniform_location import *
from OpenGL.raw.GL.ARB.explicit_uniform_location import _EXTENSION_NAME
def glInitExplicitUniformLocationARB():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
| 1.882813
| 2
|
code/venv/lib/python3.8/site-packages/datadog_api_client/v2/model/logs_archive_integration_s3.py
|
Valisback/hiring-engineers
| 0
|
12784438
|
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from datadog_api_client.v2.model_utils import (
ModelNormal,
cached_property,
)
class LogsArchiveIntegrationS3(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
validations = {}
@cached_property
def openapi_types():
return {
"account_id": (str,),
"role_name": (str,),
}
attribute_map = {
"account_id": "account_id",
"role_name": "role_name",
}
read_only_vars = {}
def __init__(self, account_id, role_name, *args, **kwargs):
"""LogsArchiveIntegrationS3 - a model defined in OpenAPI
Args:
account_id (str): The account ID for the integration.
role_name (str): The path of the integration.
Keyword Args:
"""
super().__init__(kwargs)
self._check_pos_args(args)
self.account_id = account_id
self.role_name = role_name
@classmethod
def _from_openapi_data(cls, account_id, role_name, *args, **kwargs):
"""Helper creating a new instance from a response."""
self = super(LogsArchiveIntegrationS3, cls)._from_openapi_data(kwargs)
self._check_pos_args(args)
self.account_id = account_id
self.role_name = role_name
return self
| 1.929688
| 2
|
venv/lib/python3.8/site-packages/debugpy/_vendored/pydevd/pydev_ipython/version.py
|
Retraces/UkraineBot
| 2
|
12784439
|
<reponame>Retraces/UkraineBot
/home/runner/.cache/pip/pool/94/b0/52/47c9ad945d5e0b3c3039e8e58dc840c9f4b2d28a43f1bd30fd08d1f7b4
| 0.871094
| 1
|
services/terminal/main.py
|
darqos/darqos
| 0
|
12784440
|
<reponame>darqos/darqos<gh_stars>0
#! /usr/bin/env python
# Copyright (C) 2020 <NAME>
import sys
import typing
from urllib.parse import urlparse
from datetime import datetime
import PyQt5
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtGui import QColor, QKeySequence, QMouseEvent, QIcon, QPixmap
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QMenu, QShortcut, QHBoxLayout, QVBoxLayout, QLineEdit, QLabel, QToolButton, QToolBar, qApp, QAction, QTableWidget, QTableWidgetItem
from darq.type.text import TextTypeView
from darq.rt.history import History
class Type:
def __init__(self):
self.name: str = ""
self.description: str = ""
self.impl: str = ""
self.icon: str = ""
return
class TypeCacheModel:
def __init__(self):
# Table of named types.
self.types = {}
# Sorted type name index.
self.type_names = []
# Most recently used types.
self.used_types = []
# Pinned types, in order.
self.pinned_types: typing.List[str] = []
return
def add_type(self, type_: Type) -> None:
"""Add a type to the system."""
if type_.name in self.types:
raise KeyError(f"Type '{type_.name}' already exists.")
# FIXME: Cache icon.
self.types[type_.name] = type_
return
def remove_type(self, name: str) -> None:
"""Remove a type from the system."""
if name not in self.types:
raise KeyError(f"No such type '{name}'")
if name in self.pinned_types:
self.pinned_types.remove(name)
if name in self.used_types:
self.used_types.remove(name)
del self.types[name]
return
def append_pinned_type(self, name: str) -> None:
"""Append a type to the pinned list."""
t = self.types.get(name)
if t is None:
raise KeyError(f"No such type '{name}'")
self.pinned_types.append(name)
return
def insert_pinned_type(self, index: int, name: str) -> None:
"""Insert a type into the pinned list."""
t = self.types.get(name)
if t is None:
raise KeyError(f"No such type '{name}'")
self.pinned_types.insert(index, name)
return
def unpin_type(self, name: str) -> None:
"""Remove a type from the pinned list."""
if name not in self.pinned_types:
raise KeyError(f"No such type '{name}'")
if name in self.pinned_types:
self.pinned_types.remove(name)
return
def pinned_count(self) -> int:
"""Return number of pinned types."""
return len(self.pinned_types)
def pinned_type(self, index: int) -> Type:
"""Return type at index in pinned type list."""
if index < 0 or index >= len(self.pinned_types):
raise IndexError(f"Pinned types index out of range: {index}")
name = self.pinned_types[index]
return self.types[name]
def use_type(self, name: str) -> None:
"""Record usage of a type."""
if name in self.used_types:
self.used_types.remove(name)
self.used_types.insert(0, name)
return
class ObjectFactory(QWidget):
"""Enables creation of new type instances."""
def __init__(self, *args):
"""Constructor."""
super().__init__(*args)
# Cache.
self.types = TypeCacheModel()
self.init_types()
# Set size and position.
screen = QtWidgets.QDesktopWidget().screenGeometry(0)
self.resize(int(screen.width() * 0.7), int(screen.height() * 0.7))
self.move(int(screen.width() * 0.15), int(screen.height() * 0.15))
# Set window type.
flags = QtCore.Qt.WindowFlags(
QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.FramelessWindowHint)
self.setWindowFlags(flags)
self.layout = QVBoxLayout()
self.layout.setContentsMargins(50, 50, 50, 50)
# Pinned types.
self.hotbar = QToolBar()
self.hotbar.setStyleSheet("QToolBar { }")
for index in range(self.types.pinned_count()):
t = self.types.pinned_type(index)
url = urlparse(t.icon)
icon = QIcon(url.path)
action = QAction(icon, t.name, qApp)
self.hotbar.addAction(action)
self.layout.addWidget(self.hotbar)
# Search bar.
self.omnibox = QHBoxLayout()
self.omnibox.setContentsMargins(20, 20, 20, 20)
self.omnitext = QLineEdit()
self.omnitext.setStyleSheet("QLineEdit { font-size: 20px; padding: 12px; border: none; border-radius: 10px; }")
self.omnitext.setFrame(False) # Doesn't seem to do anything'
self.omnitext.setAttribute(QtCore.Qt.WA_MacShowFocusRect, False)
self.omnitext.setPlaceholderText("Search ...")
self.omnitext.setClearButtonEnabled(True)
self.omnitext.setTextMargins(20, 20, 20, 20)
self.omnibox.addWidget(self.omnitext)
self.layout.addLayout(self.omnibox)
# Unfiltered types, MRU order (?)
self.object_table = QVBoxLayout()
self.object_table.setContentsMargins(20, 20, 20, 20)
row1 = QLabel("line")
self.object_table.addWidget(row1)
self.layout.addLayout(self.object_table)
self.layout.setStretch(2, 100)
self.setLayout(self.layout)
self.hide()
return
def init_types(self):
"""Initialize types collection."""
# List of known types, with index by name, by last use, and by
# keyword from their description. Each type has a name, icon, and
# a description.
#
# There's also an ordered list of "pinned" types.
text = Type()
text.name = "Text"
text.description = "Unformatted Unicode document"
text.impl = "file:///Users/d/work/personal/darqos/darq/types/text.py"
text.icon = "file:///Users/d/work/personal/darqos/darq/icons/txt.png"
self.types.add_type(text)
self.types.append_pinned_type("Text")
book = Type()
book.name = "Book Details"
book.description = "Catalog data for a book"
book.impl = ""
book.icon = "file:///Users/d/work/personal/darqos/darq/icons/book.png"
self.types.add_type(book)
self.types.append_pinned_type("Book Details")
return
def on_create(self):
o = TextTypeView()
self.hide()
return
class ObjectSelector(QWidget):
def __init__(self, *args):
super().__init__(*args)
# Set size & position.
screen = QtWidgets.QDesktopWidget().screenGeometry(0)
self.resize(int(screen.width() * 0.7), int(screen.height() * 0.7))
self.move(int(screen.width() * 0.15), int(screen.height() * 0.15))
# Set window type.
flags = QtCore.Qt.WindowFlags(
QtCore.Qt.WindowStaysOnTopHint | QtCore.Qt.FramelessWindowHint)
self.setWindowFlags(flags)
self.layout = QVBoxLayout()
self.layout.setContentsMargins(50, 50, 50, 50)
self.omnibox = QHBoxLayout()
self.omnibox.setContentsMargins(20, 20, 20, 20)
self.omnitext = QLineEdit()
self.omnitext.setStyleSheet("QLineEdit { font-size: 20px; padding: 12px; border: none; border-radius: 10px; }")
self.omnitext.setFrame(False) # Doesn't seem to do anything'
self.omnitext.setAttribute(QtCore.Qt.WA_MacShowFocusRect, False)
self.omnitext.setPlaceholderText("Search ...")
self.omnitext.setClearButtonEnabled(True)
self.omnitext.setTextMargins(20, 20, 20, 20)
self.omnibox.addWidget(self.omnitext)
self.layout.addLayout(self.omnibox)
# Here I'm going to start with the list of objects from
# the history service. This will need to be completely
# rejigged in future.
# So, a table ... date/time, event, type, details of object.
#
# The object details will be the tricky bit: objects don't
# have to have a name or anything. Perhaps it'd be good for
# the type implementation to have a method to get a description
# of the object in a type-specific way?
self.object_table = QTableWidget(10, 4, self)
self.object_table.setHorizontalHeaderLabels(("Date / Time", "Type", "Event", "Object"))
self.object_table.verticalHeader().setVisible(False)
self.object_table.setContentsMargins(20, 20, 20, 20)
for r in range(10):
for c in (0, 1, 2, 3):
item = QTableWidgetItem()
item.setText(["datetime", "type", "event", "description of object"][c])
self.object_table.setItem(r, c, item)
self.layout.addWidget(self.object_table)
self.setLayout(self.layout)
self.hide()
self.history = History.api()
return
def show(self):
# FIXME: needs a lot more work here ...
# Populate history list
self.object_table.clearContents()
now = datetime.utcnow()
history = self.history.get_events(now, 100, True)
r = 0
for event in history:
for c in range(4):
item = QTableWidgetItem()
item.setText([event[0], "type", event[2], event[1]][c])
self.object_table.setItem(r, c, item)
r += 1
super().show()
return
def hide(self):
super().hide()
return
def toggle_visibility(self):
"""Show if hidden; hide if shown"""
if self.isVisible():
self.hide()
else:
self.show()
return
class UI(QWidget):
def __init__(self, *args):
super().__init__(*args)
self.factory = ObjectFactory()
self.factory_shortcut = QShortcut(QKeySequence("Ctrl+n"), self)
self.factory_shortcut.setContext(QtCore.Qt.ApplicationShortcut)
self.factory_shortcut.activated.connect(self.on_factory)
self.selector = ObjectSelector()
self.selector_shortcut = QShortcut(QKeySequence("Ctrl+s"), self)
self.selector_shortcut.setContext(QtCore.Qt.ApplicationShortcut)
self.selector_shortcut.activated.connect(self.on_selector)
flags = QtCore.Qt.WindowFlags(QtCore.Qt.WindowStaysOnBottomHint)
self.setWindowFlags(flags)
self.showFullScreen()
return
def contextMenuEvent(self, event):
m = QMenu()
new_action = m.addAction("New ...")
find_action = m.addAction("Find ...")
m.addSeparator()
logout_action = m.addAction("Logout")
quit_action = m.addAction("Shutdown")
action = m.exec_(self.mapToGlobal(event.pos()))
if action == new_action:
self.on_factory()
elif action == find_action:
self.on_selector()
elif action == quit_action:
qApp.quit()
def on_factory(self, *args):
"""Display a panel enabling creation of new type instances."""
print("Factory: " + str(args))
if self.factory.isVisible():
self.factory.hide()
else:
self.factory.show()
return
def on_selector(self, *args):
"""Display a panel enabling a search of existing objects."""
print("Selector: " + str(args))
self.selector.toggle_visibility()
return
def main():
app = QApplication(sys.argv)
ui = UI()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| 2.125
| 2
|
Scripts/Dealer/CustomThreads/Reconstruction/Reconstruction.py
|
Sk3pper/AASSS-PoC
| 0
|
12784441
|
<reponame>Sk3pper/AASSS-PoC
import threading
import os
from CustomThreads.groups import MODP2048
from CustomThreads.groups import parametres
from CustomThreads.PedersenUtilities.VSS import pedersenVerify
from CustomThreads.PedersenUtilities.VSS import pedersenRecover
from CustomThreads.PedersenUtilities.VSS import genRand
from CustomThreads.aesCustomKey import AES_encrypt, AES_decrypt
from CustomThreads.Sharing.Sharing import sharing2Dealer
from CustomThreads.Utils.Utils_function import write_data, send_data, readPenultimeLine, readIntPenultimeLine, \
computeCoordinate, split, strToIntList
from CustomThreads.Utils.Utils_function import logMsg, logError
# load group
g2048 = MODP2048()
par = parametres()
t = g2048.t
n = g2048.n
CHAR_MSG_SPLIT = par.CHAR_MSG_SPLIT
PATH_DATA_USERS = par.PATH_DATA_USERS
CHAR_COORD_SPLIT = par.CHAR_COORD_SPLIT
WHICH_PHASE = par.WHICH_PHASE
FILE_NAME_COMM = par.FILE_NAME_COMM
FILE_NAME_SHARE = par.FILE_NAME_SHARE
CHAR_DATA_SPLIT = par.CHAR_DATA_SPLIT
FILE_NAME_NW_INFORMATION = par.FILE_NAME_NW_INFORMATION
FILE_NAME_MC = par.FILE_NAME_MC
IP_SHAREHOLDERS = par.IP_SHAREHOLDERS
PORT_SHAREHOLDERS = par.PORT_SHAREHOLDERS
PORT_EXTERNAL_SERVER = par.PORT_EXTERNAL_SERVER
IP_EXTERNAL_SERVER = par.IP_EXTERNAL_SERVER
COD200 = par.COD200 # every is went well
COD300 = par.COD300 # saved is not went well
COD400 = par.COD400 # xi != h(xi)
COD450 = par.COD450 # (s_i,t_i) given by DEALER TO SHAREHOLDER is not consistent with commitments
COD500 = par.COD500 # (s_i,t_i) given by SHAREHOLDER TO DEALER is not consistent with commitments
COD550 = par.COD550 # S' != \overline(S')
COD650 = par.COD650 # error in REC-Client MC!=D_k'[MS]
COD700 = par.COD700 # error in REC2-Dealer MC_dec != D_k[MC]
COD750 = par.COD750 # error in REC2-Dealer D_k'[MS] != MC
COD999 = par.COD999 # error in SHA1-ExternalServer
COD888 = par.COD888 # error in REC1 ES
# NEW CODES
COD600 = par.COD600 # user_id is not presetn --> you have to pass to signup before
COD600_desc = par.COD600_desc
COD800 = par.COD800 # Insufficient shares from shareholders --> check shareholders and xi given from client
COD800_desc = par.COD800_desc
COD830 = par.COD830 # Some SHAREHOLDERS give to the dealer INCORRECT shares (3)
COD830_desc = par.COD830_desc
COD850 = par.COD850 # Insufficient correct shares -> too much shareholder that cheated (4)
COD850_desc = par.COD850_desc
COD860 = par.COD860 # Client is cheating, S' != S' rec
COD860_desc = par.COD860_desc
COD880 = par.COD880 # Less than n-coordinates from Client
COD880_desc = par.COD880_desc
COD2000 = par.COD2000 # D_k[MC_given in SHA from client] != preMC --> Client is cheating (7)
COD2000_desc = par.COD2000_desc
COD2400 = par.COD2400 # MC != D_k[MS] --> ExternalServer is cheating (8)
COD2400_desc = par.COD2400_desc
COD2600 = par.COD2600 # MC'_from_Client != MC'_from_ExternalServer --> Es or client is cheating (9)
COD2600_desc = par.COD2600_desc
COD444 = par.COD444 # Generale error
COD444_desc = par.COD444_desc
DELIM = par.DELIM
#################################
# REC1 #
#################################
# send the x_i to shareholders and retrive the (s_i,t_i)
def request_Shares(abscissa_vect, id_user):
# cycling on abscissa_vec
# Dealer sends the request to each Shareholder in order to give to it the couple (s_i,t_i)
# the request is made with n-thread that wait the data.
# The main_th wait that every Shareholder replay and then put togheter the information
i = 0
j = 1
th_list = []
for IP in IP_SHAREHOLDERS:
# start thread listener connections unicast
out_data = "REC" + CHAR_MSG_SPLIT + str(abscissa_vect[i]) + CHAR_DATA_SPLIT + id_user + par.CHAR_DATA_SPLIT + DELIM
th = RequestSharesThread(IP, PORT_SHAREHOLDERS, out_data, id_user, j, abscissa_vect[i])
# unicast_conn.setDaemon(True)
th.start()
th_list.append(th)
i += 1
j += 1
for th in th_list:
th.join()
# At this point we have the replays of each shareholder.
# check if we have at least t-shares
# (we don't know if are also correct but for the first step we check if there are)
shares = []
t_shares = []
error = 0
ip_shareholders = []
coordinates = []
for th in th_list:
info = th.getIn_data()
ip_shareholders.append(th.getIp())
# info should be CODX||Desc 0r COD200||s_i||t_i
data = split(info, CHAR_DATA_SPLIT)
if data[0] == COD200:
shares.append(data[1])
t_shares.append(data[2])
coordinates.append(th.getCoordinate())
else:
shares.append(None)
t_shares.append(None)
coordinates.append(None)
error += 1
print " shares: " + str(shares)
print " t_shares: " + str(t_shares)
print " ip_shareholders: " + str(ip_shareholders)
print " error: " + str(error)
return shares, t_shares, coordinates, ip_shareholders, error
class RequestSharesThread(threading.Thread):
# take host_ip, port to contact, and data to send.
# send the request and wait the respond
def __init__(self, host_ip, port, out_data, id_user, id, xi):
"""
:rtype: threading.Thread
"""
threading.Thread.__init__(self)
self.host_ip = host_ip
self.port = port
self.out_data = out_data
self.in_data = ''
self.id_user = id_user
self.id = id
self.xi = xi
def run(self):
try:
# perform the request and wait the respond
sh = "Shareholder-" + str(self.id)
# From, To, Payload, Phase, id_user
logMsg("Dealer", sh, self.out_data, "RECONSTRUCTION", self.id_user)
self.in_data = send_data(self.host_ip, self.port, self.out_data)
except Exception as e:
print "\n General Error: " + str(e)
self.in_data = COD444 + CHAR_DATA_SPLIT + COD444_desc
# Log error Phase, Actor, CODError, Payload, id_user
payload = COD444_desc + str(e) + " IP: "+str(self.host_ip)
logError("SHARING", "Dealer", COD444, payload, self.id_user)
def getIn_data(self):
return self.in_data
def getIp(self):
return self.host_ip
def getCoordinate(self):
return self.xi
# delete the None information inside these lists
def deleteNone(shares, t_shares, coordinates, ip_shareholders):
s = []
ts = []
c = []
ips = []
for i in range(0, len(shares)):
if (shares[i] is not None) and (t_shares[i] is not None) and (coordinates[i] is not None):
s.append(int(shares[i]))
ts.append(int(t_shares[i]))
c.append(int(coordinates[i]))
ips.append(ip_shareholders[i])
print "\n shares: " + str(s)
print " t_shares: " + str(ts)
print " coordinates: " + str(c)
print " ip_shareholders: "+str(ip_shareholders)
return s, ts, c, ips
def deleteIncorrectShares(check_list_shares, shares, t_shares, coordinates, ip_shareholders):
# we have to pass only the correct info
c_shares = []
c_coordinates = []
c_t_shares = []
ips = []
for i in range(0, len(shares)):
if check_list_shares[i] == True:
c_shares.append(shares[i])
c_t_shares.append(t_shares[i])
c_coordinates.append(coordinates[i])
ips.append(ip_shareholders[i])
print "\n c_shares: " + str(c_shares)
print " c_t_shares: " + str(c_t_shares)
print " c_coordinates: " + str(c_coordinates)
print " c_ips: " + str(ips)
return c_shares, c_t_shares, c_coordinates, ips
def reconstructionDealer1(self, sPrime, abscissa_vect, sSecond, mcPrime, eMS, id_user):
print " reconstructionDealer1"
path_user = PATH_DATA_USERS + "/" + id_user
path_file_comm = path_user + "/" + FILE_NAME_COMM
path_file_name = path_user + "/" + FILE_NAME_NW_INFORMATION
msg = ''
try:
# check if the user is already signed-up
if os.path.isdir(path_user):
if len(abscissa_vect) != n:
print " Less than n-coordinates from Client"
msg = COD880 + CHAR_DATA_SPLIT + COD880_desc
# Log error Phase, Actor, CODError, Payload, id_user
logError("RECONSTRUCTION", "Dealer", COD880, COD880_desc, id_user)
else:
# request (si,ti) to SHAREHOLDERS!
shares, t_shares, coordinates, ip_shareholders, error = request_Shares(abscissa_vect, id_user)
if error != 0 and (n-error) >= t:
print " report the error: we can continue but something is went bad --> deeply understand"
m = "someone did not replay: "
for i in range(0, len(shares)):
if shares[i] is None:
m = m + " | " + str(ip_shareholders[i])
# Log error Phase, Actor, CODError, Payload, id_user
logError("RECONSTRUCTION", "Dealer", "None", m, id_user)
if (n-error) >= t:
# read files and extract the informations, take the last penultimate-line,
# because the last-line is the \n char
Commitments = readIntPenultimeLine(path_file_comm)
print " Commitments:" + str(Commitments)
# now we have commits and shares we can CHECK if the shares and t-shares are correct through commits
check_list_shares = []
shares, t_shares, coordinates, ip_shareholders = deleteNone(shares, t_shares, coordinates, ip_shareholders)
Commitments = strToIntList(Commitments)
# check the given shares
correct_shares = 0
for i in range(0, len(shares)):
check_result = pedersenVerify(coordinates[i], shares[i], t_shares[i], Commitments)
check_list_shares.append(check_result)
if check_result:
correct_shares += 1
print " check_list_shares: "+str(check_list_shares)
# report the error because some shareholder is compromised
if correct_shares != (n-error):
# some of the given shares are incorrect
print " Log the error for the admin system"
msg = COD830 + CHAR_DATA_SPLIT + COD830_desc
m = 'who is cheating: '
for i in range(0, len(check_list_shares)):
if check_list_shares[i] == False:
print " who is cheating: " + str(ip_shareholders[i])
m = m + " | " + str(ip_shareholders[i])
# Log error Phase, Actor, CODError, Payload, id_user
payload = COD830_desc + " " + m
logError("RECONSTRUCTION", "Dealer", COD830, payload, id_user)
# with at least t correct shares we can reconstruct the secret
if correct_shares >= t:
# reconstruct the secret
# take only the CORRECT SHARES in order do rebuild the secret
c_shares, c_t_shares, c_coordinates, c_ip_shareholder = deleteIncorrectShares(check_list_shares, shares, t_shares, coordinates, ip_shareholders)
recoveredK = pedersenRecover(c_coordinates, c_shares, c_t_shares, Commitments)
print "\n recoveredK: " + str(recoveredK)
if recoveredK == sPrime:
print " secret: " + str(sPrime) + " recoveredK: " + str(recoveredK) + " True"
# since the secret given to the user is equal to the reconstructed one we can save the
# temporary data like: sSecond, mcPrime, eMS, id_user,
# Precalculating also x1',..,xn'
coordinate = computeCoordinate()
# sSecond||coordinate||mcPrime||eMS
data = str(sSecond) + CHAR_DATA_SPLIT + \
coordinate + CHAR_DATA_SPLIT + \
str(mcPrime) + CHAR_DATA_SPLIT + \
eMS + "\n"
# flush data
write_data(path_file_name, data)
# now we have to contact ExternarlServer and send to it: REC1|||eMS||id_user
out_data_es = WHICH_PHASE[2] + CHAR_MSG_SPLIT + \
str(eMS) + CHAR_DATA_SPLIT + id_user + par.CHAR_DATA_SPLIT + DELIM
# Log the message (From, To, Payload, Phase, id_user)
logMsg("Dealer", IP_EXTERNAL_SERVER, out_data_es, "RECONSTRUCTION", id_user)
# ExternalServer's replay
in_data_es = send_data(IP_EXTERNAL_SERVER, PORT_EXTERNAL_SERVER, out_data_es)
if COD200 in in_data_es:
print " Now the client can contact the ES"
msg = COD200 + CHAR_DATA_SPLIT + str(coordinate)
else:
print " Something was went wrong in the ES"
msg = in_data_es
else:
print " ERROR, something is went wrong in the pedersenRecover secret: " + str(sPrime) + \
" recoveredK: " + str(recoveredK) + " False"
msg = COD860 + CHAR_DATA_SPLIT + COD860_desc
else:
print " ERROR: less than t CORRECT shares"
m = ' who is cheating: '
for i in range(0, len(check_list_shares)):
if check_list_shares[i] == False:
print " who is cheating: "+str(ip_shareholders[i])
m = m +" | "+ str(ip_shareholders[i])
payload = COD850_desc + m
msg = COD850 + CHAR_DATA_SPLIT + COD850_desc
# Log error Phase, Actor, CODError, Payload, id_user
logError("RECONSTRUCTION", "Dealer", COD850, payload, id_user)
else:
# less than t-shareholders give us the coordinates
print " ERROR: less than t-shareholders give us the coordinates"
print " Someone of the SHAREHOLDERS does not reply, discover why: " \
"Client give us the incorrect coordiante?"
print " The Shareholder that did not reply are: "
m = " The Shareholder that did not reply are: "
for i in range(0, len(shares)):
if shares[i] is None:
print ip_shareholders[i]
m = m +" | "+ str(ip_shareholders[i])
payload = COD800_desc + m
# Log error Phase, Actor, CODError, Payload, id_user
logError("SHARING", "Dealer", COD800, payload, id_user)
msg = COD800 + CHAR_DATA_SPLIT + COD800_desc
else:
print " ERROR: user_id is NOT present. It has to signed-up before"
msg = COD600 + CHAR_DATA_SPLIT + COD600_desc
# Log error Phase, Actor, CODError, Payload, id_user
logError("SHARING", "Dealer", COD600, COD600_desc, self.id_user)
except Exception as e:
print "\n General Error: " + str(e)
msg = COD444 + CHAR_DATA_SPLIT + COD444_desc
# Log error Phase, Actor, CODError, Payload, id_user
payload = COD444_desc + str(e)
logError("SHARING", "Dealer", COD444, payload, id_user)
# Log message (From, To, Payload, Phase, id_user)
logMsg("Dealer", "Client", msg, "RECONSTRUCTION", id_user)
# send back to the Client
self.csocket.send((bytes(msg).encode("utf-8")))
print " Client at " + str(self.clientAddress) + " disconnected..."
#################################
# REC2 #
#################################
def retrieve_info(path_user):
# prendo l'informazione di MC dal file MC_information
path_file_MC = path_user + "/" + FILE_NAME_MC
MC = readPenultimeLine(path_file_MC)[:-len("\n")]
print " MC: " + str(MC)
# prendo le informazioni nuove dal file new_informations
path_file_NW_INFORMATION = path_user + "/" + FILE_NAME_NW_INFORMATION
info = split(readPenultimeLine(path_file_NW_INFORMATION), CHAR_DATA_SPLIT)
print " info: " + str(info)
# model: sSecond||coordinate||mcPrime||eMS
sSecond = info[0]
coordinate = info[1]
mcPrime_fromClient = info[2]
eMS = info[3]
return MC, sSecond, coordinate, mcPrime_fromClient, eMS
def reconstructionDealer2(self, k, kPrime, MS, MC_dec, MC_prime_fromES, id_user):
print "\n reconstructionDealer2"
path_user = PATH_DATA_USERS + "/" + id_user
msg = ''
try:
if not os.path.isdir(path_user):
print " ERROR: user_id is NOT present. It has to signed-up before"
msg = COD600 + CHAR_DATA_SPLIT + COD600_desc
# Log error Phase, Actor, CODError, Payload, id_user
logError("RECONSTRUCTION", "Dealer", COD600, COD600_desc, self.id_user)
else:
# retrieve the information from the two files: FILE_NAME_MC e FILE_NAME_NW_INFORMATION
MC, sSecond, coordinate, mcPrime_fromClient, eMS = retrieve_info(path_user)
# compute the 3 checks
# check3: mcPrime_fromClient =?= MC_prime_fromES
print "\n mcPrime_fromClient: " + mcPrime_fromClient
print " MC_prime_fromES: " + MC_prime_fromES
if mcPrime_fromClient == MC_prime_fromES:
print " mcPrime_fromClient == MC_prime_fromES"
# check2: D_k'[MS] =?= MC
decrypted_MS = AES_decrypt(kPrime, MS)
print "\n decrypted MS: " + decrypted_MS
print " MC: " + MC
if decrypted_MS == MC:
print " decrypted_MS == MC"
# check1: MC_dec =?= D_k[MC]
decrypted_MC = AES_decrypt(k, MC)
print "\n decrypted MC: " + decrypted_MC
print " MC_dec: " + MC_dec
if decrypted_MC == MC_dec:
print " decrypted_MC == MC_dec"
print "\n ricostruiamo il segreto!!"
# All 3 checks are passed
# Save the new value of MC that is MC'
path_file_name = path_user + "/" + FILE_NAME_MC
data = str(mcPrime_fromClient) + "\n"
# flush data
write_data(path_file_name, data)
# Replace S' with S'' but this time the coordinates are fixed
# set the information and call sharing SHA2
coordinate = split(coordinate, CHAR_COORD_SPLIT)
print " informations passed to sharing2Dealer:" + \
"\n sSecond: " + str(sSecond) + \
"\n id_user: " + str(id_user) + \
"\n coordinate: " + str(coordinate)
coordinate = strToIntList(coordinate)
# Start the sharing phase SHA2
sharing2Dealer(None, int(sSecond), id_user, abscissa_vector=coordinate)
msg = COD200
else:
print " decrypted_MC != MC_dec"
msg = COD2000 + CHAR_DATA_SPLIT + COD2000_desc
# Log error Phase, Actor, CODError, Payload, id_user
logError("RECONSTRUCTION", "Dealer", COD2000, COD2000_desc, id_user)
else:# error 8
print " decrypted_kPrime_MS != MC"
msg = "decrypted_kPrime_MS != MC"
msg = COD2400 + CHAR_DATA_SPLIT + COD2400_desc
# Log error Phase, Actor, CODError, Payload, id_user
logError("RECONSTRUCTION", "Dealer", COD2400, COD2400_desc, self.id_user)
else:
# error 9
print " mcPrime_fromClient != MC_prime_fromES"
msg = COD2600 + CHAR_DATA_SPLIT + COD2600_desc
# Log error Phase, Actor, CODError, Payload, id_user
logError("RECONSTRUCTION", "Dealer", COD2600, COD2600_desc, self.id_user)
except Exception as e:
print "\n General Error: " + str(e)
msg = COD444 + CHAR_DATA_SPLIT + COD444_desc
# Log error Phase, Actor, CODError, Payload, id_user
payload = COD444_desc + str(e)
logError("RECONSTRUCTION", "Dealer", COD444, payload, id_user)
self.csocket.send((bytes(msg).encode("utf-8")))
print " Client at " + str(self.clientAddress) + " disconnected..."
| 1.789063
| 2
|
parallelism.py
|
JNY0606/parallelismForAllLang
| 3
|
12784442
|
import multiprocessing as p
import time
def doit():
''' check CPU parallelism
while True:
pass
#'''
time.sleep(1)
print('並行')
count=0
def main():
def listener(x):
global count
count+=1
time.sleep(1)
print(count)
threads=5
pool=p.Pool()
for i in range(threads):
pool.apply_async(doit, callback=listener)
# '''
while threads!=count:
time.sleep(1)
#'''
main()
| 3.125
| 3
|
vega/language/types.py
|
philippwiesner/compiler
| 0
|
12784443
|
"""Vega language types
Representation of vega variable types
The following variable types are defined here:
Basic Types: INT, FLOAT, CHAR, BOOL
Complex Types: Array, String
"""
from typing import List
from vega.language.token import Tag
from vega.language.token import Word
class Type(Word):
"""Simple type
Simple or basic variable types are integers, floating point numbers, chars
or boolean values.
Each of them can be created after reading the correct keyword and uses a
pre defined amount of memory space.
"""
def __init__(self, var_type: str, tag: Tag, width: int) -> None:
"""Create new variable type
Args:
var_type: variable type
tag: type tag (used to differ between basic and more complex types)
width: memory width
"""
super().__init__(var_type, tag)
self.__width: int = width
@property
def width(self) -> int:
"""Width property
Returns:
memory width of type
"""
return self.__width
INT = Type("int", Tag.BASIC, 4)
FLOAT = Type("float", Tag.BASIC, 8)
CHAR = Type("char", Tag.BASIC, 1)
BOOL = Type("bool", Tag.BASIC, 1)
class Array(Type):
"""Array type
Arrays are a more complex type as they are defined by a basic type and
a size of number of elements to be stored in the array. Arrays can also be
nested.
"""
def __init__(self, var_type: Type, **kwargs) -> None:
"""Create new array
When a new array is created the size of the array is stored
alongside the amount of memory to be allocated for storing the
number of elements of the array type. For nested elements each array
size is stored in a list to be able to compare nested arrays with each
other.
Args:
var_type: array type (basic types, another array)
**kwargs: size of the array
"""
self.__size: int = kwargs.get('size', 0)
self.__dimensions: List = [self.__size]
self.__type: Type = var_type
if isinstance(var_type, Array):
self.__dimensions = var_type.dimensions + self.__dimensions
self.__type = var_type.type
super().__init__('[]',
Tag.INDEX,
self.__size * var_type.width)
@property
def dimensions(self) -> List:
"""Dimension property
Returns:
list of array dimensions
"""
return self.__dimensions
@property
def type(self) -> Type:
"""Array type property
Returns:
basic array type (INT, CHAR, BOOL, FLOAT)
"""
return self.__type
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.type}{self.dimensions!r})'
def __str__(self) -> str:
return f'{self.type}{self.dimensions}'
class String(Array):
"""String Type
Basically Strings are just char arrays. Therefore we only create a new
array with the CHAR base type.
"""
def __init__(self, **kwargs):
"""Create char array"""
super().__init__(CHAR, **kwargs)
| 3.984375
| 4
|
intro_to_machine_learning/lesson/lesson_3_decision_trees/classifyDT.py
|
robinl3680/udacity-course
| 68
|
12784444
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 29 18:17:51 2014
@author: tvu
"""
def classify(features_train, labels_train):
### your code goes here--should return a trained decision tree classifer
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(features_train, labels_train)
return clf
| 3.359375
| 3
|
scripts/bam2species_map.py
|
861934367/cgat
| 0
|
12784445
|
<gh_stars>0
'''bam2species_map.py
=============================================
:Author: <NAME>
:Release: $Id$
:Date: |today|
:Tags: Python
Purpose
-------
Produce a mapping txt file between contigs a species based
on aligned reads.
Usage
-----
Example::
python bam2species_map.py --help
Type::
python bam2species_map.py --help
for command line help.
Documentation
-------------
This script would be used as a pre-step to using
contigs2random_sample.py. It provides a mapping between contigs and
species that are represented in those contigs i.e. in a metagenomic
simulation study the majority species for a contig will be returned
with the contig.
Command line options
--------------------
'''
import sys
import optparse
import pysam
import collections
import CGAT.Experiment as E
import CGAT.FastaIterator as FastaIterator
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if not argv:
argv = sys.argv
# setup command line parser
parser = optparse.OptionParser(version="%prog version: $Id: script_template.py 2871 2010-03-03 10:20:44Z andreas $",
usage=globals()["__doc__"])
parser.add_option("-b", "--bam-file", dest="bamfile", type="string",
help="supply bam file")
# add common options (-h/--help, ...) and parse command line
(options, args) = E.Start(parser, argv=argv)
# read in contigs
E.info("reading in contig file")
contigs = {}
for fasta in FastaIterator.iterate(options.stdin):
contigs[fasta.title] = (1, len(fasta.sequence) - 1)
E.info("read %i contigs" % len(contigs.keys()))
# read in bamfile
E.info("reading bam file")
samfile = pysam.Samfile(options.bamfile)
E.info("iterating over contigs")
c = 0
for contig, coords in contigs.iteritems():
coords = list(coords)
#################################
# NB this is specific for my data!
contig = contig.split(" ")[0]
#################################
species_counts = collections.defaultdict(int)
for alignment in samfile.fetch(contig, coords[0], coords[1]):
species_id = alignment.qname.split("|")[1]
species_counts[species_id] += 1
# at the moment ignore if there are no counts
if len(species_counts.values()) == 0:
E.warn("no reads map to %s" % contig)
continue
for species, count in species_counts.iteritems():
if species_counts[species] == max(species_counts.values()):
top_dog = species
c += 1
break
E.info("species %s assigned to contig number %i" % (top_dog, c))
options.stdout.write("%s\t%s\n" % (contig, top_dog))
# write footer and output benchmark information.
E.Stop()
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 3
| 3
|
bmsAccumulator.py
|
clean-code-craft-tcq-1/function-ext-python-UtkrshGupta
| 0
|
12784446
|
<reponame>clean-code-craft-tcq-1/function-ext-python-UtkrshGupta
from bmsConstants import bms_multilingual_logs_status_heading
from bmsConstants import bms_multilingual_logs
import bmsGlobalParam as bgp
def accumulateLogs(bms_error_report, log_mode):
if log_mode == 'on':
bms_log_report(bms_error_report)
def bms_log_report(bms_error_report):
if len(bms_error_report) == 0:
print(bms_multilingual_logs_status_heading[bgp.language][0]+'\n')
else:
for param_name, param_value in zip(bms_error_report.keys(), bms_error_report.values()):
printLog(param_name, param_value)
print('\n')
def printLog(param_name,param_value):
log_key = list(bms_multilingual_logs[bgp.language].keys())
if param_value[0] == log_key[0]:
print('\033[31m' + '{}'.format(param_value[0]) + '\033[m' + ' : {} -> {}'.format(param_name,param_value[1]))
elif param_value[0] == log_key[1]:
print('\033[32m' + '{}'.format(param_value[0]) + '\033[m' + ' : {} -> {}'.format(param_name,param_value[1]))
| 2.390625
| 2
|
frag_pele/Analysis/compute_atom_atom_distance.py
|
danielSoler93/FrAG_PELE
| 26
|
12784447
|
<gh_stars>10-100
import glob
import os
import argparse
import joblib
import mdtraj as md
import pandas as pd
def parseArguments():
"""
Parse the command-line options
:returns: str, int, int -- path to file to results folder,
index of the first atom,
index of the second atom
"""
desc = "It includes the atom-atom distance of the specified ones to report files\n"
parser = argparse.ArgumentParser(description=desc)
required_named = parser.add_argument_group('required named arguments')
required_named.add_argument("sim_folder", type=str, help="Path to the simulation results.")
required_named.add_argument("-a", "--atoms", type=int, nargs='+', action='append',
help="List of pairs of indexes with atoms to compute the distance between them.")
parser.add_argument("-t", "--traj", default="trajectory_",
help="Trajectory file prefix.")
parser.add_argument("-r", "--rep", default="report_",
help="Report file prefix.")
parser.add_argument("-p", "--proc", type=int, default=4,
help="Number of processors to paralellize the computation.")
args = parser.parse_args()
return args.sim_folder, args.atoms, args.traj, args.rep, args.proc
def compute_atom_atom_dist(infile, atoms_list):
distances = []
names = []
traj = md.load_pdb(infile)
print(atoms_list)
for at_pair in atoms_list:
name ="{}-{}".format(traj.topology.atom(at_pair[0]), traj.topology.atom(at_pair[1]))
distance = md.compute_distances(traj, [at_pair])
distances.append(distance)
names.append(name)
return distances, names
def compute_distances_from_report(atomlist, report, trajectory):
distances, colnames = compute_atom_atom_dist(trajectory, atomlist)
new_lines = []
with open(report) as rep:
rep_lines = rep.readlines()
rep_lines = [x.strip("\n") for x in rep_lines]
for ind, line in enumerate(rep_lines):
new_content = list(line.split(" "))
if new_content[-1] == '':
new_content = new_content[:-1]
if ind == 0:
for colname in colnames:
new_content.append(colname)
else:
for dist in distances:
value = "{:.3f}".format(dist[ind-1][0]*10)
new_content.append(value)
new_line = " ".join(new_content)
new_lines.append(new_line)
new_report = "\n".join(new_lines)
new_rep_name = report.split("/")
new_rep_name[-1] = "dist" + new_rep_name[-1]
new_rep_name = "/".join(new_rep_name)
with open(new_rep_name, "w") as out:
out.write(new_report)
print("{} completed".format(new_rep_name))
def compute_simulation_distance(sim_folder, atomlist, traj_pref="trajectory_", report_pref="report_", processors=4):
trajectories = sorted(glob.glob("{}*".format(os.path.join(sim_folder, traj_pref))))
reports = sorted(glob.glob("{}*".format(os.path.join(sim_folder, report_pref))))
joblib.Parallel(n_jobs=processors)(joblib.delayed(compute_distances_from_report)(atomlist, report, traj) for report, traj in zip(reports, trajectories))
if __name__ == '__main__':
sim_fold, atom_list, traj, report, processors = parseArguments()
compute_simulation_distance(sim_fold, atom_list, traj, report, processors)
| 2.734375
| 3
|
Statistics/zscore.py
|
ankit-kushwaha-51/calculatorHw-1
| 0
|
12784448
|
<gh_stars>0
from Statistics.PopulationSD import population_standard_deviation
def zscore(datapoint, a, b, c, d, e, f):
try:
datapoint = float(datapoint)
a = float(a)
b = float(b)
c = float(c)
d = float(d)
e = float(e)
f = float(f)
data = []
data.append(a)
data.append(b)
data.append(c)
data.append(d)
data.append(e)
data.append(f)
sum = a+b+c+d+e+f
mean = sum/len(data)
sd = population_standard_deviation(a,b,c,d,e,f)
result = (datapoint-mean)/sd
return result
except ZeroDivisionError:
print("Cannot divide by zero")
except ValueError:
print("Numbers are not valid")
| 3.1875
| 3
|
setup.py
|
jpetrucciani/python-duckduckgo
| 6
|
12784449
|
<filename>setup.py
"""
pip setup.py for ddg3
"""
from setuptools import setup
__library__ = "ddg3"
__version__ = "VERSION"
with open("README.md") as readme:
LONG_DESCRIPTION = readme.read()
with open("requirements.txt") as requirements:
INSTALL_REQUIRES = requirements.read().split("\n")
INSTALL_REQUIRES = [x.strip() for x in INSTALL_REQUIRES if x.strip()]
setup(
name=__library__,
version=__version__,
py_modules=["ddg3"],
description="Library for querying the Duck Duck Go API, updated for python3",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
author="<NAME>, <NAME>",
author_email="<EMAIL>",
license="BSD",
url="https://github.com/jpetrucciani/python-duckduckgo",
platforms=["any"],
install_requires=INSTALL_REQUIRES,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP :: Indexing/Search",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
entry_points={"console_scripts": ["ddg3 = ddg3:main"]},
)
| 1.6875
| 2
|
inxs/contrib.py
|
Balletie/inxs
| 0
|
12784450
|
<reponame>Balletie/inxs
""" This module contains transformations that are supposedly of common interest. """
from lxml import etree
from inxs import (
TRAVERSE_DEPTH_FIRST, TRAVERSE_BOTTOM_TO_TOP, TRAVERSE_LEFT_TO_RIGHT, lib, utils,
Not, Rule,
SkipToNextElement, Transformation,
)
__all__ = []
# reduce_whitespaces
def _reduce_whitespace_handler(element):
if element.text:
element.text = utils.reduce_whitespaces(element.text, strip='')
if element.tail:
element.tail = utils.reduce_whitespaces(element.tail, strip='')
reduce_whitespaces = Transformation(
Rule('*', _reduce_whitespace_handler)
)
"""
Normalizes any whitespace character of element's text and tail to a simple space and
reduces consecutives to one.
"""
__all__.append('reduce_whitespaces')
# remove_empty_elements
def _append_tail_to_previous_in_stream(element, skip_elements):
if etree.QName(element).localname in skip_elements:
raise SkipToNextElement
if not element.tail:
return
previous = element.getprevious()
if previous is None:
element.getparent().text += element.tail
elif previous.tail is None:
previous.tail = element.tail
else:
previous.tail += element.tail
remove_empty_elements = Transformation(
Rule(Not(lib.has_children, lib.has_text, lib.has_attributes, '/'),
(_append_tail_to_previous_in_stream, lib.remove_element)),
name='remove_empty_elements', context={'skip_elements': []},
traversal_order=(
TRAVERSE_DEPTH_FIRST | TRAVERSE_LEFT_TO_RIGHT | TRAVERSE_BOTTOM_TO_TOP
)
)
"""
Removes elements without attributes, text, tail and children from the (sub-)tree.
"""
__all__.append('remove_empty_elements')
| 2.328125
| 2
|