hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20df17921cd623c6203bd44e441ee7f224b6468c | 2,273 | py | Python | 04_Data Manipulation with pandas/02_Aggregating Data/09_Multiple grouped summaries.py | mohd-faizy/DataScience-With-Python | 13ebb10cf9083343056d5b782957241de1d595f9 | [
"MIT"
] | 5 | 2021-02-03T14:36:58.000Z | 2022-01-01T10:29:26.000Z | 04_Data Manipulation with pandas/02_Aggregating Data/09_Multiple grouped summaries.py | mohd-faizy/DataScience-With-Python | 13ebb10cf9083343056d5b782957241de1d595f9 | [
"MIT"
] | null | null | null | 04_Data Manipulation with pandas/02_Aggregating Data/09_Multiple grouped summaries.py | mohd-faizy/DataScience-With-Python | 13ebb10cf9083343056d5b782957241de1d595f9 | [
"MIT"
] | 3 | 2021-02-08T00:31:16.000Z | 2022-03-17T13:52:32.000Z | '''
09 - Multiple grouped summaries
Earlier in this chapter, you saw that the .agg() method is useful to
compute multiple statistics on multiple variables. It also works with
grouped data. NumPy, which is imported as np, has many different summary
statistics functions, including: np.min, np.max, np.mean, and np.median.
Instructions:
- Import numpy with the alias np.
- Get the min, max, mean, and median of weekly_sales for each store type
using .groupby() and .agg(). Store this as sales_stats. Make sure to use
numpy functions!
- Get the min, max, mean, and median of unemployment and fuel_price_usd_per_l
for each store type. Store this as unemp_fuel_stats.
-----------------------------------------------------------------------------------------------------------------
sales.head()
store type department date weekly_sales is_holiday temperature_c fuel_price_usd_per_l unemployment
0 1 A 1 2010-02-05 24924.50 False 5.728 0.679 8.106
1 1 A 1 2010-03-05 21827.90 False 8.056 0.693 8.106
2 1 A 1 2010-04-02 57258.43 False 16.817 0.718 7.808
3 1 A 1 2010-05-07 17413.94 False 22.528 0.749 7.808
4 1 A 1 2010-06-04 17558.09 False 27.050 0.715 7.808
-----------------------------------------------------------------------------------------------------------------
'''
# Import pandas and numpy
import numpy as np
import pandas as pd
sales = pd.read_csv('content/wallmart_sales.csv')
# Import numpy with the alias np
import numpy as np
# For each store type, aggregate weekly_sales: get min, max, mean, and median
sales_stats = sales.groupby('type')['weekly_sales'].agg([np.min, np.max, np.mean, np.median])
# Print sales_stats
print(sales_stats)
# For each store type, aggregate unemployment and fuel_price_usd_per_l: get min, max, mean, and median
unemp_fuel_stats = sales.groupby('type')[['fuel_price_usd_per_l', 'unemployment']].agg([np.min, np.max, np.mean, np.median])
# Print unemp_fuel_stats
print(unemp_fuel_stats)
| 45.46 | 124 | 0.571051 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,981 | 0.871535 |
20e09e04ed38a70521575f37bf12b56637794b22 | 2,739 | py | Python | TestGenerator.py | HIAOAIH/MeRGAN | 1191111d53440f0c361162a721e9a0b701243dd4 | [
"MIT"
] | null | null | null | TestGenerator.py | HIAOAIH/MeRGAN | 1191111d53440f0c361162a721e9a0b701243dd4 | [
"MIT"
] | null | null | null | TestGenerator.py | HIAOAIH/MeRGAN | 1191111d53440f0c361162a721e9a0b701243dd4 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
from torchvision import datasets, transforms
from ACGAN import Generator
class TestGenerator(object):
def __init__(self, args):
self.dataset = args.dataset
self.method = args.method
self.classifier = torch.hub.load('pytorch/vision:v0.5.0', 'resnet18', pretrained=False)
# self.classifier.load_state_dict(torch.load('./network/resnet18.pt'))
if args.dataset == 'MNIST':
channels = 1
input_size = 28
elif args.dataset == 'SVHN':
channels = 3
input_size = 32
self.generator = Generator(output_channel=channels, input_size=input_size)
self.task = args.task
self.gpu_mode = torch.cuda.is_available()
self.batch_size = args.batch_size
self.noise_dim = 100
self.total_class_num = 10
if self.dataset == 'MNIST':
self.classifier.conv1 = nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
self.classifier.load_state_dict(torch.load('./network/' + self.dataset + '/resnet18.pt'))
self.generator.load_state_dict(torch.load('./network/' + self.dataset + '/' + self.method + '/generator_' + self.method + '_' + self.task + '.pt'))
def test(self):
correct = 0
self.classifier.eval()
self.generator.eval()
if self.gpu_mode:
self.classifier, self.generator = self.classifier.cuda(), self.generator.cuda()
for i in range(5000):
noise = torch.rand(self.batch_size, self.noise_dim)
# one_hot vector
if self.task == 'to_4':
one_hot_label = torch.randint(0, 5, [self.batch_size])
elif self.task == 'to_9':
one_hot_label = torch.randint(0, 10, [self.batch_size])
# answer labels
labels = torch.zeros(self.batch_size, 10).scatter_( # batch_size, total_class_num
1, one_hot_label.type(torch.LongTensor).unsqueeze(1), 1)
if self.gpu_mode:
noise, labels, one_hot_label = noise.cuda(), labels.cuda(), one_hot_label.cuda()
generated_images = self.generator(noise, labels)
output = self.classifier(generated_images)
prediction = output.data.max(1, keepdim=True)[1]
if torch.cuda.is_available():
correct += prediction.eq(one_hot_label.data.view_as(prediction)).long().cpu().sum()
else:
correct += prediction.eq(one_hot_label.data.view_as(prediction)).long().sum()
if (i + 1) % 20 == 0:
print("Accuracy: {}/{} ({:.3f}%)".format(correct, (i + 1) * 64, 100. * correct / ((i + 1) * 64)))
| 40.880597 | 155 | 0.593282 | 2,628 | 0.959474 | 0 | 0 | 0 | 0 | 0 | 0 | 284 | 0.103687 |
20e1940d3d4620392074c473e2d9cc09298def3a | 4,915 | py | Python | tests/test_rpReaction.py | brsynth/rplibs | 37608cce6335783863455fb5696dbee032d46ad5 | [
"MIT"
] | null | null | null | tests/test_rpReaction.py | brsynth/rplibs | 37608cce6335783863455fb5696dbee032d46ad5 | [
"MIT"
] | null | null | null | tests/test_rpReaction.py | brsynth/rplibs | 37608cce6335783863455fb5696dbee032d46ad5 | [
"MIT"
] | null | null | null | """
Created on May 28 2021
@author: Joan Hérisson
"""
from unittest import TestCase
from copy import deepcopy
from rplibs import rpReaction
class Test_rpReaction(TestCase):
def setUp(self):
self.reactants = {
"CMPD_0000000010": 1,
"MNXM1": 1
}
self.products = {
"CMPD_0000000003": 1,
"MNXM13": 1
}
self.ec_numbers = [
"4.1.1.63"
]
self.id = "rxn"
self.miriam = {'ec-code': ['4.1.1.63']}
self.rxn = rpReaction(
id=self.id,
miriam=self.miriam,
reactants=self.reactants,
products=self.products
)
self.rp2_transfo_id = 'TRS_0_0_0'
self.rule_ids = ['RR-02-a0cc0be463ff412f-16-F']
self.tmpl_rxn_ids = ['MNXR96458']
self.rule_score = 0.5982208769718989
self.selenzy = {
'UniProtID_1': 65.65,
'UniProtID_2': 77.77,
}
self.inherited_dict = {
'id': self.id,
'reactants': self.reactants,
'products': self.products,
'ec_numbers': self.ec_numbers,
}
self.specific_dict = {
'idx_in_path': -1,
'rp2_transfo_id': self.rp2_transfo_id,
'rule_ids': self.rule_ids,
'tmpl_rxn_ids': self.tmpl_rxn_ids,
'rule_score': self.rule_score,
**{
'selenzy_'+id: self.selenzy[id]
for id in self.selenzy
}
}
## READ METHODS
def test_get_rp2_transfo_id(self):
self.rxn.set_rp2_transfo_id(self.rp2_transfo_id)
self.assertEqual(
self.rxn.get_rp2_transfo_id(),
self.rp2_transfo_id
)
def test_get_rule_ids(self):
self.rxn.set_rule_ids(self.rule_ids)
self.assertEqual(
self.rxn.get_rule_ids(),
self.rule_ids
)
def test_get_tmpl_rxn_ids(self):
self.rxn.set_tmpl_rxn_ids(self.tmpl_rxn_ids)
self.assertEqual(
self.rxn.get_tmpl_rxn_ids(),
self.tmpl_rxn_ids
)
def test_get_rule_score(self):
self.rxn.set_rule_score(self.rule_score)
self.assertEqual(
self.rxn.get_rule_score(),
self.rule_score
)
def test__to_dict(self):
self.rxn.set_rp2_transfo_id(self.rp2_transfo_id)
self.rxn.set_rule_ids(self.rule_ids)
self.rxn.set_tmpl_rxn_ids(self.tmpl_rxn_ids)
self.rxn.set_rule_score(self.rule_score)
self.rxn.set_selenzy(self.selenzy)
self.assertDictEqual(
self.rxn._to_dict(full=True),
{
**self.inherited_dict,
**self.specific_dict
}
)
self.assertDictEqual(
self.rxn._to_dict(full=False),
self.specific_dict
)
def test___eq__(self):
rxn = deepcopy(self.rxn)
self.assertEqual(
self.rxn,
rxn
)
rxn.set_idx_in_path(2)
self.assertEqual(
self.rxn,
rxn
)
rxn.add_reactant('reac', 2)
# objects are not equal
self.assertNotEqual(
self.rxn,
rxn
)
# objects are not the same type
self.assertNotEqual(
self.rxn,
'rxn'
)
def test_get_default_fbc(self):
fbc = {
'units': rpReaction.get_default_fbc_units(),
'lower': rpReaction.get_default_fbc_lower(),
'upper': rpReaction.get_default_fbc_upper()
}
for id, val in fbc.items():
with self.subTest(f'Testing get/set fbc_{id}()', id=id, val=val):
getattr(self.rxn, f'set_fbc_{id}')(val)
self.assertEqual(
getattr(self.rxn, f'get_fbc_{id}')(),
val
)
def test_reversible(self):
self.rxn.set_reversible(False)
self.assertFalse(self.rxn.reversible())
self.rxn.set_reversible(True)
self.assertTrue(self.rxn.reversible())
def test_miriam(self):
self.assertDictEqual(
self.rxn.get_miriam(),
self.miriam
)
def test_selenzy(self):
self.rxn.set_selenzy(self.selenzy)
self.assertDictEqual(
self.rxn.get_selenzy(),
self.selenzy
)
id = 'UniProtID_1'
self.assertEqual(
self.rxn.get_selenzy_infos_fromID(id),
self.selenzy[id]
)
def test_add_miriam(self):
db = 'bigg'
xref = 'bigg_ID'
self.rxn.add_miriam(db, xref)
self.assertDictEqual(
self.rxn.get_miriam(),
{
**self.miriam,
**{
db: xref
}
}
) | 27.305556 | 77 | 0.521872 | 4,771 | 0.970504 | 0 | 0 | 0 | 0 | 0 | 0 | 515 | 0.10476 |
20e33764352d730024b8287c9a46b924f5639e3d | 342 | py | Python | MatrixSqrt.py | aminya/smop | d1ef857dbae08ed071b54685c106d4b5b40747c3 | [
"MIT"
] | null | null | null | MatrixSqrt.py | aminya/smop | d1ef857dbae08ed071b54685c106d4b5b40747c3 | [
"MIT"
] | 1 | 2019-10-16T03:45:08.000Z | 2019-10-16T03:45:08.000Z | MatrixSqrt.py | aminya/smop-for-Julia-Matlab-Python-Benchmark | d1ef857dbae08ed071b54685c106d4b5b40747c3 | [
"MIT"
] | null | null | null | # Generated with SMOP 0.41
from libsmop import *
# MatrixSqrt.m
@function
def MatrixSqrt(matrixSize=None,mX=None,mY=None,*args,**kwargs):
varargin = MatrixSqrt.varargin
nargin = MatrixSqrt.nargin
mY=dot(mX.T,mX)
# MatrixSqrt.m:3
mA=sqrtm(mY)
# MatrixSqrt.m:5
return mA
if __name__ == '__main__':
pass
| 18 | 63 | 0.660819 | 0 | 0 | 0 | 0 | 225 | 0.657895 | 0 | 0 | 83 | 0.24269 |
20e3e3987464531bc5bd471dbee340a002da3c03 | 45 | py | Python | networks/std/__init__.py | Chappie733/MLPack | 223b142ff22dc35b9122183435afdc473a2c0b47 | [
"MIT"
] | null | null | null | networks/std/__init__.py | Chappie733/MLPack | 223b142ff22dc35b9122183435afdc473a2c0b47 | [
"MIT"
] | null | null | null | networks/std/__init__.py | Chappie733/MLPack | 223b142ff22dc35b9122183435afdc473a2c0b47 | [
"MIT"
] | null | null | null | from .layers import *
from .network import *
| 15 | 22 | 0.733333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
20e3f8bfa43cbc790e7b99c0ea59b40a7436d373 | 4,214 | py | Python | VGG19.py | Ralph-Finn/Deep-Image-Analogy-MXNet | 600b1f724fb58b11a260a80c089283226006bf37 | [
"MIT"
] | 2 | 2018-04-27T16:34:14.000Z | 2021-08-22T14:47:51.000Z | VGG19.py | Ralph-Finn/Deep-Image-Analogy-MXNet | 600b1f724fb58b11a260a80c089283226006bf37 | [
"MIT"
] | null | null | null | VGG19.py | Ralph-Finn/Deep-Image-Analogy-MXNet | 600b1f724fb58b11a260a80c089283226006bf37 | [
"MIT"
] | null | null | null | """
This code is modified from harveyslash's work (https://github.com/harveyslash/Deep-Image-Analogy-PyTorch)
"""
import mxnet as mx
from mxnet.gluon.model_zoo import vision as models
from mxnet.gluon import nn
from mxnet import nd
from mxnet import autograd
from time import time
from mxnet import optimizer
import sys
class VGG19:
def __init__(self):
vgg19_model = models.vgg19(pretrained=False)
vgg19_model.load_params("model/vgg19.params", ctx=mx.cpu(0)) # pre-trained net is in cpu
self.use_cuda = True
self.mean = nd.array([0.485, 0.456, 0.406])
self.std = nd.array([0.229, 0.224, 0.225])
self.ctx = mx.gpu(0)
self.model = self.get_model(vgg19_model)
self.smooth = 0.5
def get_model(self, pretrained_net):
# We need to redefine a new network
# because pre-trained structures cannot be read directly as "arrays."
net = nn.Sequential()
for i in range(40):
net.add(pretrained_net.features[i])
net.collect_params().reset_ctx(ctx=self.ctx)
return net
def preprocess(self, img):
img = (nd.array(img).astype('float32') / 255.0 - self.mean) / self.std
return img.transpose((2, 0, 1)).expand_dims(axis=0)
def forward_subnet(self, x, start_layer, end_layer):
for i, layer in enumerate(list(self.model)):
if start_layer <= i <= end_layer:
x = layer(x)
return x
def get_features(self, img_tensor, layers):
img_tensor = self.preprocess(img_tensor)
img_tensor = nd.array(img_tensor).copyto(self.ctx)
features = []
sizes = []
x = img_tensor
features.append(img_tensor)
sizes.append(img_tensor.shape)
for i in range(len(self.model)):
x = self.model[i](x)
if i in layers:
features.append(x)
sizes.append(x.shape)
features.reverse()
sizes.reverse()
return features, sizes
def get_deconvoluted_feat(self, feat, curr_layer, init=None, lr=10, iters=3000):
# Deconvolution process: deconvolute the feature on one layer (e.g. L4) to the second last layer (e.g. L2)
# and forward it to the last layer (e.g. L3).
blob_layers = [29, 20, 11, 6, 1, -1]
end_layer = blob_layers[curr_layer]
mid_layer = blob_layers[curr_layer + 1]
start_layer = blob_layers[curr_layer + 2] + 1
# print("start:", start_layer, " mid:", mid_layer, " end", end_layer)
# make sure the data is in GPU
noise = init.copyto(self.ctx)
target = feat.copyto(self.ctx)
# get_sub_net
net = nn.Sequential()
for layer_num, layer in enumerate(list(self.model)):
if start_layer <= layer_num <= end_layer: # python simplified
net.add(layer)
net.collect_params().reset_ctx(ctx=self.ctx)
def tv_loss(x):
return (x[:, :, 1:, :] - x[:, :, :-1, :]).abs().sum() + (x[:, :, :, 1:] - x[:, :, :, :-1]).abs().sum()
def go(x):
output = net(x)
if curr_layer == 0:
loss = (output - target).square().sum() + self.smooth * tv_loss(x)
else:
loss = (output - target).square().sum()
return loss
def train(x, lr, iters):
tic = time()
t = 1
v = x.zeros_like()
sqr = x.zeros_like()
optim = optimizer.Adam(learning_rate=lr)
for idx in range(iters):
with autograd.record():
loss = go(x)
loss.backward()
optim.update(t, x, x.grad, [sqr, v])
nd.waitall() # TODO:it is a time cost operation
t = t + 1
sys.stdout.write('\r training..........%s%%' % (100 * idx // iters + 1))
sys.stdout.flush()
print(" all_train_time:", time() - tic)
return x
# begin training,just like style transfer
noise.attach_grad()
noise = train(noise, lr, iters)
out = self.forward_subnet(noise, start_layer, mid_layer)
return out
| 36.964912 | 114 | 0.561224 | 3,890 | 0.923113 | 0 | 0 | 0 | 0 | 0 | 0 | 680 | 0.161367 |
20e4d477f0b097ab0df3742373535cd12e0f32f8 | 3,964 | py | Python | backend/indexing/test_fragment_doc.py | rsimmons/massif | d97ee63f618e0d05308722158992f804e2c54fa5 | [
"MIT"
] | 24 | 2021-06-20T16:54:14.000Z | 2022-03-31T03:46:13.000Z | backend/indexing/test_fragment_doc.py | rsimmons/massif | d97ee63f618e0d05308722158992f804e2c54fa5 | [
"MIT"
] | 6 | 2021-07-04T14:49:38.000Z | 2022-03-27T09:19:58.000Z | backend/indexing/test_fragment_doc.py | rsimmons/massif | d97ee63f618e0d05308722158992f804e2c54fa5 | [
"MIT"
] | 1 | 2021-08-25T14:31:22.000Z | 2021-08-25T14:31:22.000Z | import json
from .fragment_doc import fragment_srt, fragment_syosetu, has_unbalanced_quotes, extract_kana_kanji
EXTRACT_KANA_KANJI_CASES = [
['asdf.!ä', ''],
['あいうえお', 'あいうえお'],
['asdこfdれ', 'これ'],
['「ああ、畜生」foo', 'ああ畜生'],
]
for [text, target] in EXTRACT_KANA_KANJI_CASES:
if extract_kana_kanji(text) != target:
print('FAIL EXTRACT KANA+KANJI')
print(text)
QUOTE_BALANCE_CASES = [
['あいうえお', False],
['あい「うえお', True],
['「あいうえお', True],
['「あいうえお」', False],
['あい「うえ」お', False],
['「あい」う「えお」', False],
['「あいう「えお」」', False],
['「あい「うえ」お', True],
['あい「うえ」お」', True],
]
for [text, target] in QUOTE_BALANCE_CASES:
if has_unbalanced_quotes(text) != target:
print('FAIL QUOTE BALANCE')
print(text)
FRAG_CASES = [
['S',
'''
1
00:02:17,440 --> 00:02:20,375
Senator, we're making
our final approach into Coruscant.
2
00:02:20,476 --> 00:02:22,501
Very good, Lieutenant.
''',
[
{'text': "Senator, we're making our final approach into Coruscant.", 'loc': 't:137.440-140.375'},
{'text': 'Very good, Lieutenant.', 'loc': 't:140.476-142.501'},
]
],
# no anchor novel
['N', '<div><p>食べる</p></div>', [{'text': "食べる"}]],
# anchor novel
['N', '<div><p id="L123">食べる</p></div>', [{'text': '食べる', 'loc': 'a:L123'}]],
# no splitting
['N', '<div><p>それでは、行ってまいります</p></div>',
[
{'text': 'それでは、行ってまいります'},
]
],
# simple splitting
['N', '<div><p>そのせいだろうか。あの日に見た空の青を、よく覚えている。</p></div>',
[
{'text': 'そのせいだろうか。'},
{'text': 'あの日に見た空の青を、よく覚えている。'},
]
],
# strip leading dashes
['N', '<div><p>――ああ、そうだったのですか。</p></div>',
[
{'text': 'ああ、そうだったのですか。'},
]
],
# strip leading ellipses
['N', '<div><p>……そうか?</p></div>',
[
{'text': 'そうか?'},
]
],
# strip matching quotes
['N', '<div><p>「ああ、畜生」</p></div>',
[
{'text': 'ああ、畜生'},
]
],
# strip just leading open quote
['N', '<div><p>「あっ、大丈夫です!</p></div>',
[
{'text': 'あっ、大丈夫です!'},
]
],
# strip just trailing close quote
['N', '<div><p>王宮に神父がいるかっ」</p></div>',
[
{'text': '王宮に神父がいるかっ'},
]
],
# combo
['N', '<div><p>「……うん」</p></div>',
[
{'text': 'うん'},
]
],
# don't strip trailing ellipses
['N', '<div><p>「……血……血が……………」</p></div>',
[
{'text': '血……血が……………'},
]
],
# ignore fragments that start with close quote
['N', '<div><p>」と見開いた。</p></div>', []],
# handle other quotes
['N', '<div><p>『モルツ、少し休憩する』</p></div>',
[
{'text': 'モルツ、少し休憩する'},
]
],
# remove leading speaker label
['N', '<div><p>【ポルペオ】「なんだ、その目は?</p></div>',
[
{'text': 'なんだ、その目は?'},
]
],
# remove drama-style speaker label
['N', '<div><p>(平次)おい 大変だ。</p></div>',
[
{'text': 'おい 大変だ。'},
]
],
# TODO: can we get rid of the leading dash?
# ['N', '<div><p id="L75">─ 〝城内〟に命ず。騎士団による警備を撤去せよ。</p></div>',
# [
# {'text': '〝城内〟に命ず。', 'loc': 'a:L75'},
# {'text': '騎士団による警備を撤去せよ。', 'loc': 'a:L75'}
# ]
# ],
]
for [kind, text, expected_result] in FRAG_CASES:
if kind == 'S':
result = fragment_srt(text, None)
elif kind == 'N':
result = fragment_syosetu(text, None)
else:
assert False
# this is hacky, but should be OK
if json.dumps(result, sort_keys=True) != json.dumps(expected_result, sort_keys=True):
print('FAIL')
print('TEXT-----------------')
print(text)
print('TARGET RESULT--------')
print(repr(expected_result))
print('ACTUAL RESULT--------')
print(repr(result))
print()
| 23.046512 | 109 | 0.461403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,805 | 0.583039 |
20e716c140ceb51fcabaef36632da4cd0336339d | 1,399 | py | Python | tetrapod/session.py | uwekamper/tetrapod | 1782a2e8a0c4cd93ff1499fdb2fead45a2d5cbd2 | [
"MIT"
] | null | null | null | tetrapod/session.py | uwekamper/tetrapod | 1782a2e8a0c4cd93ff1499fdb2fead45a2d5cbd2 | [
"MIT"
] | null | null | null | tetrapod/session.py | uwekamper/tetrapod | 1782a2e8a0c4cd93ff1499fdb2fead45a2d5cbd2 | [
"MIT"
] | null | null | null | import os
import math
import time
import datetime
import logging
from . import podio_auth
log = logging.getLogger(__file__)
def try_environment_token():
"""
Try to get the token from the environment variables TETRAPOD_CLIENT_ID and TETRAPOD_ACCESS_TOKEN.
:return:
"""
try:
client_id = os.environ['TETRAPOD_CLIENT_ID']
access_token = os.environ['TETRAPOD_ACCESS_TOKEN']
except KeyError as e:
log.info('Environment variables TETRAPOD_CLIENT_ID and TETRAPOD_ACCESS_TOKEN not set.')
return None
log.info('Loading OAuth2 token from environment.')
return {
"access_token": str(access_token),
"client_id": str(client_id),
"token_type": "bearer"
}
def create_podio_session(credentials_file=None, credentials=None, check=True, robust=False):
token = try_environment_token()
if token is None:
log.info('Loading OAuth2 token from credentials file.')
if credentials_file is None:
token = podio_auth.load_token()
else:
token = podio_auth.load_token(credentials_file)
podio = podio_auth.make_client(token['client_id'], token, check=check, enable_robustness=robust)
return podio
def create_app_auth_session(client_id:str, client_secret:str, app_id:int, app_token:str):
return podio_auth.make_app_auth_client(client_id, client_secret, app_id, app_token) | 31.795455 | 101 | 0.716226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 387 | 0.276626 |
20e755a64ee836a08b5fdee412b040a3939be9af | 652 | py | Python | src/minerva_db/sql/models/group.py | labsyspharm/minerva-db | 49c205fc5d9bcc513b4eb21b6493c928ea711fce | [
"MIT"
] | null | null | null | src/minerva_db/sql/models/group.py | labsyspharm/minerva-db | 49c205fc5d9bcc513b4eb21b6493c928ea711fce | [
"MIT"
] | 2 | 2018-06-06T13:29:23.000Z | 2018-07-25T00:36:38.000Z | src/minerva_db/sql/models/group.py | sorgerlab/minerva-db | 49c205fc5d9bcc513b4eb21b6493c928ea711fce | [
"MIT"
] | 1 | 2020-03-06T23:53:42.000Z | 2020-03-06T23:53:42.000Z | from sqlalchemy import Column, ForeignKey, String
from sqlalchemy.orm import relationship
# from sqlalchemy.ext.associationproxy import association_proxy
from .subject import Subject
class Group(Subject):
__mapper_args__ = {
'polymorphic_identity': 'group',
}
uuid = Column(String(36), ForeignKey(Subject.uuid), primary_key=True)
name = Column('name', String(64), unique=True, nullable=False)
users = relationship('User', viewonly=True, secondary='t_membership')
memberships = relationship('Membership', back_populates='group')
def __init__(self, uuid, name):
self.uuid = uuid
self.name = name
| 29.636364 | 73 | 0.714724 | 466 | 0.714724 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.210123 |
20e77eb0b4c59c0312f4fc50878035a552c60784 | 364 | py | Python | web-ui/AIOT-635/instance/config.py | kenhutaiwan/learning-practice | 6597a691fd090de718937087f3610971696f4894 | [
"MIT"
] | null | null | null | web-ui/AIOT-635/instance/config.py | kenhutaiwan/learning-practice | 6597a691fd090de718937087f3610971696f4894 | [
"MIT"
] | 10 | 2021-04-16T01:00:49.000Z | 2021-12-09T13:41:20.000Z | web-ui/AIOT-635/instance/config.py | kenhutaiwan/learning-practice | 6597a691fd090de718937087f3610971696f4894 | [
"MIT"
] | null | null | null | JIRA_DOMAIN = {'RD2': ['innodiv-hwacom.atlassian.net','innodiv-hwacom.atlassian.net'],
'RD5': ['srddiv5-hwacom.atlassian.net']}
API_PREFIX = 'rest/agile/1.0'
# project_id : column_name
STORY_POINT_COL_NAME = {'10005': 'customfield_10027',
'10006': 'customfield_10027',
'10004': 'customfield_10026'
}
| 40.444444 | 86 | 0.604396 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 220 | 0.604396 |
20e8d9128c41d94efd5a36d1c06da32a9be81f31 | 2,922 | py | Python | script/hardware/create_hardware_settings.py | bohanjason/website | 6df863c6a4c8b1c3d263700187d1c5d777b29a3c | [
"Apache-2.0"
] | null | null | null | script/hardware/create_hardware_settings.py | bohanjason/website | 6df863c6a4c8b1c3d263700187d1c5d777b29a3c | [
"Apache-2.0"
] | null | null | null | script/hardware/create_hardware_settings.py | bohanjason/website | 6df863c6a4c8b1c3d263700187d1c5d777b29a3c | [
"Apache-2.0"
] | null | null | null | import sys
import csv
import json
import shutil
from collections import OrderedDict
HW = OrderedDict()
with open('ec2_instance_types.csv', 'r') as f:
reader = csv.reader(f)
for i,row in enumerate(reader):
if i == 0:
header = row
else:
entry = {}
entry['type'] = i+1
entry['name'] = row[0]
entry['cpu'] = int(row[1])
entry['memory'] = float(row[2].replace(',', ''))
storage_str = row[3]
storage_type = None
if 'EBS' in storage_str:
storage_type = 'EBS'
elif 'NVMe' in storage_str:
storage_type = 'NVMe'
elif 'SSD' in storage_str:
storage_type = 'SSD'
elif entry['name'].startswith('r4'):
storage_type = 'EBS'
elif entry['name'].startswith('d2'):
storage_type = 'HDD'
elif entry['name'] == 'f1.16xlarge':
storage_type = 'SSD'
else:
raise Exception('Unknown storage type for {}'.format(entry['name']))
storage_list = None
if storage_type == 'EBS':
entry['storage'] = '40,40'
elif entry['name'] == 'f1.2xlarge':
entry['storage'] = storage_str.split(' ')[0]
else:
parts = storage_str.split(' ')
num_devices = 4 if int(parts[0]) > 4 else int(parts[0])
size = parts[2].replace(',', '')
entry['storage'] = ','.join([size for _ in range(num_devices)])
entry['storage_type'] = storage_type
entry['additional_specs'] = json.dumps(OrderedDict(zip(header[4:], row[4:])), encoding='utf-8')
HW[entry['name']] = entry
# For types.HardwareTypes
hw_consts = [('GENERIC', 1, 'generic')]
for k,v in HW.iteritems():
hw_consts.append(('EC2_{}'.format(k.replace('.', '').upper()), v['type'], k))
hw_str = ' '.join(['{} = {};'.format(k, v) for k,v,_ in hw_consts])
type_names = ', '.join(['{}: \'{}\''.format(k,n) for k,_,n in hw_consts])
#hw_str = 'GENERIC = 1; '
#hw_str += ' '.join(['EC2_{} = {};'.format(k.replace('.', '').upper(), v['type']) for k,v in HW.iteritems()])
#type_names = {v['type']: k for k,v in HW.iteritems()}
#type_names['GENERIC'] = 'GENERIC'
with open('hardware_types.txt', 'w') as f:
f.write(hw_str + '\n')
f.write('TYPE_NAMES = {' + type_names + '}')
entries = []
for k,v in HW.iteritems():
entries.append({
"model": "website.Hardware",
'fields': v
})
with open('hardware.json', 'w') as f:
json.dump(entries, f, encoding='utf-8', indent=4)
shutil.copy('hardware.json', '../../preload/hardware.json')
#maxx = ''
#maxlen = 0
#for k,v in HW.iteritems():
# if len(v['storage']) > maxlen:
# print k,len(v['storage']), v['storage']
# maxlen = len(v['storage'])
| 34.785714 | 109 | 0.519849 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 921 | 0.315195 |
20e93041941fdeb70e6476bed35d7399ad209e43 | 4,341 | py | Python | export_onnx.py | ak9250/BackgroundMattingV2 | 4e1b2fa429260e50342ef54f53de71962f7babe8 | [
"MIT"
] | 1 | 2020-12-24T01:19:39.000Z | 2020-12-24T01:19:39.000Z | export_onnx.py | dangchaojin/BackgroundMattingV2 | 97e2df124d0fa96eb7f101961a2eb806cdd25049 | [
"MIT"
] | null | null | null | export_onnx.py | dangchaojin/BackgroundMattingV2 | 97e2df124d0fa96eb7f101961a2eb806cdd25049 | [
"MIT"
] | 1 | 2021-11-23T13:57:17.000Z | 2021-11-23T13:57:17.000Z | """
Export MattingRefine as ONNX format
Example:
python export_onnx.py \
--model-type mattingrefine \
--model-checkpoint "PATH_TO_MODEL_CHECKPOINT" \
--model-backbone resnet50 \
--model-backbone-scale 0.25 \
--model-refine-mode sampling \
--model-refine-sample-pixels 80000 \
--onnx-opset-version 11 \
--onnx-constant-folding \
--precision float32 \
--output "model.onnx" \
--validate
"""
import argparse
import torch
from model import MattingBase, MattingRefine
# --------------- Arguments ---------------
parser = argparse.ArgumentParser(description='Export ONNX')
parser.add_argument('--model-type', type=str, required=True, choices=['mattingbase', 'mattingrefine'])
parser.add_argument('--model-backbone', type=str, required=True, choices=['resnet101', 'resnet50', 'mobilenetv2'])
parser.add_argument('--model-backbone-scale', type=float, default=0.25)
parser.add_argument('--model-checkpoint', type=str, required=True)
parser.add_argument('--model-refine-mode', type=str, default='sampling', choices=['full', 'sampling', 'thresholding'])
parser.add_argument('--model-refine-sample-pixels', type=int, default=80_000)
parser.add_argument('--model-refine-threshold', type=float, default=0.7)
parser.add_argument('--model-refine-kernel-size', type=int, default=3)
parser.add_argument('--onnx-verbose', type=bool, default=True)
parser.add_argument('--onnx-opset-version', type=int, default=12)
parser.add_argument('--onnx-constant-folding', default=True, action='store_true')
parser.add_argument('--device', type=str, default='cpu')
parser.add_argument('--precision', type=str, default='float32', choices=['float32', 'float16'])
parser.add_argument('--validate', action='store_true')
parser.add_argument('--output', type=str, required=True)
args = parser.parse_args()
# --------------- Main ---------------
# Load model
if args.model_type == 'mattingbase':
model = MattingBase(args.model_backbone)
if args.model_type == 'mattingrefine':
model = MattingRefine(
args.model_backbone,
args.model_backbone_scale,
args.model_refine_mode,
args.model_refine_sample_pixels,
args.model_refine_threshold,
args.model_refine_kernel_size,
refine_patch_crop_method='roi_align',
refine_patch_replace_method='scatter_element')
model.load_state_dict(torch.load(args.model_checkpoint, map_location=args.device), strict=False)
precision = {'float32': torch.float32, 'float16': torch.float16}[args.precision]
model.eval().to(precision).to(args.device)
# Dummy Inputs
src = torch.randn(2, 3, 1080, 1920).to(precision).to(args.device)
bgr = torch.randn(2, 3, 1080, 1920).to(precision).to(args.device)
# Export ONNX
if args.model_type == 'mattingbase':
input_names=['src', 'bgr']
output_names = ['pha', 'fgr', 'err', 'hid']
if args.model_type == 'mattingrefine':
input_names=['src', 'bgr']
output_names = ['pha', 'fgr', 'pha_sm', 'fgr_sm', 'err_sm', 'ref_sm']
torch.onnx.export(
model=model,
args=(src, bgr),
f=args.output,
verbose=args.onnx_verbose,
opset_version=args.onnx_opset_version,
do_constant_folding=args.onnx_constant_folding,
input_names=input_names,
output_names=output_names,
dynamic_axes={name: {0: 'batch', 2: 'height', 3: 'width'} for name in [*input_names, *output_names]})
print(f'ONNX model saved at: {args.output}')
# Validation
if args.validate:
import onnxruntime
import numpy as np
print(f'Validating ONNX model.')
# Test with different inputs.
src = torch.randn(1, 3, 720, 1280).to(precision).to(args.device)
bgr = torch.randn(1, 3, 720, 1280).to(precision).to(args.device)
with torch.no_grad():
out_torch = model(src, bgr)
sess = onnxruntime.InferenceSession(args.output)
out_onnx = sess.run(None, {
'src': src.cpu().numpy(),
'bgr': bgr.cpu().numpy()
})
e_max = 0
for a, b, name in zip(out_torch, out_onnx, output_names):
b = torch.as_tensor(b)
e = torch.abs(a.cpu() - b).max()
e_max = max(e_max, e.item())
print(f'"{name}" output differs by maximum of {e}')
if e_max < 0.001:
print('Validation passed.')
else:
raise 'Validation failed.' | 33.392308 | 118 | 0.668279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,466 | 0.33771 |
20e9cc261d322667f8ff8f2e1f32fe37565bcb4c | 21,486 | py | Python | htm_rl/htm_rl/modules/empowerment.py | cog-isa/htm-rl | baf5b67a11283d37165bf6a29d6808a234d6d98c | [
"MIT"
] | 1 | 2021-12-09T22:09:24.000Z | 2021-12-09T22:09:24.000Z | htm_rl/htm_rl/modules/empowerment.py | cog-isa/htm-rl | baf5b67a11283d37165bf6a29d6808a234d6d98c | [
"MIT"
] | null | null | null | htm_rl/htm_rl/modules/empowerment.py | cog-isa/htm-rl | baf5b67a11283d37165bf6a29d6808a234d6d98c | [
"MIT"
] | 1 | 2021-11-18T08:54:20.000Z | 2021-11-18T08:54:20.000Z | import numpy as np
import matplotlib.pyplot as plt
from tqdm import trange
from htm.bindings.sdr import SDR
from htm.bindings.algorithms import TemporalMemory
from htm.bindings.algorithms import SpatialPooler
from itertools import product
from copy import deepcopy
import json
EPS = 1e-12
class Memory:
"""
The Memory object saves SDR representations of states and clusterizes them using the similarity measure.
The SDR representation must have fixed sparsity of active cells for correct working.
Parameters
----------
size : int
The size is the size of SDR representations, which are stored
threshold: float
The threshold is used to determine then it's necessary to create a new cluster.
Attributes
----------
size: int
It stores size argument.
kernels : np.array
This is the list of created clusters representations in dence form. It contains information about frequency of
cell's activity (for each cluster) during working. Its shape: (number of clusters, size).
norms: np.array
This is the list of representations amount for each cluster. Its shape: (munber of clusters, 1)
threshold: float
It stores threshold argument.
"""
def __init__(self, size, threshold=0.5):
self.kernels = None
self.norms = None
self.threshold = threshold
self.size = size
@property
def number_of_clusters(self):
if (self.kernels is not None) and (self.kernels.ndim == 2):
return self.kernels.shape[0]
else:
return 0
def add(self, state):
""" Add a new SDR representation (store and clusterize).
Parameters
----------
state: np.array
This is the SDR representation (sparse), that we want to store ande clusterize with other stored SDRs.
Returns
-------
"""
state_dense = np.zeros(self.size)
state_dense[state] = 1
sims = self.similarity(state_dense)
if np.sum(sims > self.threshold) == 0:
if self.kernels is None:
self.kernels = state_dense.reshape((1, -1))
self.norms = np.array([[1]])
else:
self.kernels = np.vstack((self.kernels, state_dense))
self.norms = np.vstack((self.norms, [1]))
else:
self.kernels[np.argmax(sims)] += state_dense
self.norms[np.argmax(sims)] += 1
def similarity(self, state):
"""This function evaluate similarity measure between stored clusters and new state.
Parameters
----------
state: np.array
The sparse representation of the state to be compared.
Returns
-------
similarities: np.array
The similarity measures for given state. If the Memory object don't have any saved clusters, then the empty
array is returned, else returned array contained similarities between the state and each cluster.
Its shape: (number of kernels, 1).
"""
if self.kernels is None:
return np.array([])
else:
normalised_kernels = self.kernels / self.norms
sims = normalised_kernels @ state.T / (
np.sqrt(np.sum(normalised_kernels ** 2, axis=1)) * np.sqrt(state @ state.T))
similarities = sims.T
return similarities
def adopted_kernels(self, sparsity):
"""This function normalises stored representations and cuts them by sparsity threshold.
Parameters
----------
sparsity: float
The sparsity of active cells in stored SDR representations.
Returns
-------
clusters_representations: np.array
Normalised and cutted representations of each cluster. The cutting is done by choosing the most frequent
active cells (their number is defined by sparsity) in kernels attribute. All elements of array are
in [0, 1]. The shape is (number of clusters, 1).
"""
data = np.copy(self.kernels)
data[data < np.quantile(data, 1 - sparsity, axis=1).reshape((-1, 1))] = 0
clusters_representations = data / self.norms
return clusters_representations
class Empowerment:
"""
The Empowerment object contains all necessary things to evaluate 'empowerment' using the model of environment. This
model creates and learns also here.
Parameters
----------
seed: int
The seed for random generator.
encode_size: int
The size of SDR representations which is taken by model.
tm_config: dict
It contains all parameters for initialisation of the TemporalMemory without the columnDimensions.
columnDimensions is defined inside Empowerment.
sparsity: float
The sparsity of SDR representations which are used in the TemporalMemory algorithm.
sp_config (optional): dict
It contains all parameters for initialisation of the SpatialPooler without the inputDimensions
and localareaDensity. They are defined inside Empowerment. By default sp_config is None that means the absence
of SpatialPooler.
memory (optional): bool
This parameter defines will be used the Memory for saving and clusterization of state representations or not.
By default is False (doesn't use the Memory).
similarity_threshold (optional): float
This parameter determines the threshold for cluster creation. It is used then memory is True. By default: 0.6.
evaluate (optional): bool
This flag defines the necessarity of storing some statistics to evaluate the learning process.
By default is True.
Attributes
----------
evaluate: bool
It stores the same parameter.
anomalies: list
It stores the anomaly values of TM for easch time step after learning. Only then evaluate is True.
IoU: list
It stores the Intersection over Union values of TM predictions and real ones for easch time step after learning.
Only then evaluate is True.
sparsity: float
It stores the same parameter.
sp: SpatialPooler
It contains the SpatialPooler object if it was defined, else None
tm: TemporalMemory
It contains the TemporalMemory object.
size: int
It stores the encode_size parameter.
memory: Memory
It contains the Memory object if memory parameter is True, else None.
"""
def __init__(self, seed, encode_size, tm_config, sparsity,
sp_config=None,
memory=False,
similarity_threshold=0.6,
evaluate=True,
filename=None):
self.filename = filename
if self.filename is None:
self.evaluate = evaluate
if evaluate:
self.anomalies = []
self.IoU = []
self.sdr_0 = SDR(encode_size)
self.sdr_1 = SDR(encode_size)
self.sparsity = sparsity
if sp_config is not None:
self.sp = SpatialPooler(inputDimensions=[encode_size],
seed=seed,
localAreaDensity=sparsity,
**sp_config,
)
self.tm = TemporalMemory(
columnDimensions=self.sp.getColumnDimensions(),
seed=seed,
**tm_config,
)
self.sdr_sp = SDR(self.sp.getColumnDimensions())
self.size = self.sp.getColumnDimensions()[0]
else:
self.sp = None
self.tm = TemporalMemory(
columnDimensions=[encode_size],
seed=seed,
**tm_config,
)
self.size = self.tm.getColumnDimensions()[0]
if memory:
self.memory = Memory(self.tm.getColumnDimensions()[0], threshold=similarity_threshold)
else:
self.memory = None
else:
with open(self.filename) as json_file:
self.empowerment_data = json.load(json_file)
def eval_from_file(self, position):
return self.empowerment_data[str(position[0])][str(position[1])]
def eval_state(self, state, horizon, use_segments=False, use_memory=False):
"""This function evaluates empowerment for given state.
Parameters
----------
state: np.array
The SDR representation (sparse) of the state.
horizon: int
The horison of evaluating for given state. The good value is 3.
use_segments (optional): bool
The flag determines using of segments instead of cells to evaluate empowerment. By default: False.
use_memory (optional): bool
The flag determines using of the Memory object. Useful only if this object was initialised.
By default: False
Returns
-------
empowerment: float
The empowerment value (always > 0).
p: np.array
The array of probabilities on that the empowerment was calculated.
start_state: np.array
The SDR representation of given state that is used in TM. (Only if sp is defined it differs from parameter
state).
"""
if self.sp is not None:
self.sdr_0.sparse = state
self.sp.compute(self.sdr_0, learn=False, output=self.sdr_sp)
sdr = self.sdr_sp
else:
self.sdr_0.sparse = state
sdr = self.sdr_0
start_state = np.copy(sdr.sparse)
data = np.zeros(self.tm.getColumnDimensions()[0])
for actions in range(horizon):
self.tm.reset()
self.tm.compute(sdr, learn=False)
self.tm.activateDendrites(learn=False)
predictiveCells = self.tm.getPredictiveCells().sparse
predictedColumnIndices = [self.tm.columnForCell(i) for i in predictiveCells]
sdr.sparse = np.unique(predictedColumnIndices)
if use_segments:
predictedColumnIndices = map(self.tm.columnForCell,
map(self.tm.connections.cellForSegment, self.tm.getActiveSegments()))
for i in predictedColumnIndices:
data[i] += 1
if self.memory is not None and use_memory:
if (self.memory.kernels is not None) and (self.memory.kernels.size > 0):
clusters = self.memory.adopted_kernels(self.sparsity)
mask = (clusters[:, data!=0].sum(axis=1) / (self.sparsity * self.size)) < self.memory.threshold
p = np.dot(clusters, data.T) / (self.sparsity * self.size)
p[mask] = 0
total_p = p.sum()
empowerment = np.sum(-p / (total_p + EPS) * np.log(p / (total_p + EPS), where=p != 0), where=p != 0)
p = p / (total_p + EPS)
return empowerment, p, start_state
else:
return 0, None, start_state
empowerment = np.sum(-data / (data.sum() + EPS) * np.log(data / (data.sum() + EPS), where=data != 0), where=data != 0)
p = data / (data.sum() + EPS)
return empowerment, p, start_state
def eval_env(self, environment, horizon, use_segments=False, use_memory=False):
"""This function evaluate empowerment for every state in gridworld environment.
Parameters
----------
environment:
The gridworld environment to be evaluated.
horizon: int
The horison of evaluating for given state. The good value is 3.
use_segments (optional): bool
The flag determines using of segments instead of cells to evaluate empowerment. By default: False.
use_memory (optional): bool
The flag determines using of the Memory object. Useful only if this object was initialised.
By default: False
Returns
-------
empowerment_map: np.array
This is the map of the environment with values of empowerment for each state.
"""
env = deepcopy(environment)
empowerment_map = np.zeros(env.env.shape)
for i in range(env.env.shape[0]):
for j in range(env.env.shape[1]):
if not env.env.entities['obstacle'].mask[i, j]:
env.env.agent.position = (i, j)
_, s, _ = env.observe()
empowerment_map[i, j] = self.eval_state(s, horizon, use_segments, use_memory)[0]
# plt.imshow(empowerment_map)
# plt.show()
return empowerment_map
def learn(self, state_0, state_1):
"""This function realize learning of TM.
Parameters
----------
state_0: np.array
The SDR representation of the state (sparse form).
state_1: np.array
The SDR representation of the next state (sparse form).
Returns
-------
"""
self.sdr_0.sparse = state_0
self.sdr_1.sparse = state_1
self.tm.reset()
if self.sp is not None:
self.sp.compute(self.sdr_0, learn=True, output=self.sdr_sp)
if self.memory is not None:
self.memory.add(self.sdr_sp.sparse)
self.tm.compute(self.sdr_sp, learn=True)
else:
if self.memory is not None:
self.memory.add(self.sdr_0.sparse)
self.tm.compute(self.sdr_0, learn=True)
if self.evaluate:
self.tm.activateDendrites(learn=False)
predictiveCells = self.tm.getPredictiveCells().sparse
predictedColumnIndices = np.unique([self.tm.columnForCell(i) for i in predictiveCells])
if self.sp is not None:
self.sp.compute(self.sdr_1, learn=True, output=self.sdr_sp)
self.tm.compute(self.sdr_sp, learn=True)
if self.evaluate:
intersection = np.intersect1d(self.sdr_sp.sparse, predictedColumnIndices)
union = np.union1d(self.sdr_sp.sparse, predictedColumnIndices)
else:
self.tm.compute(self.sdr_1, learn=True)
if self.evaluate:
intersection = np.intersect1d(self.sdr_1.sparse, predictedColumnIndices)
union = np.union1d(self.sdr_1.sparse, predictedColumnIndices)
if self.evaluate:
self.IoU.append(len(intersection) / len(union))
self.anomalies.append(self.tm.anomaly)
self.tm.reset()
def detailed_evaluate(self, env, horizon, use_segments=False, use_memory=False):
"""This function evaluate TM and real empowerment and confusion matrix for every state in gridworld environment.
Parameters
----------
env:
The gridworld environment to be evaluated.
horizon: int
The horison of evaluating for given state. The good value is 3.
use_segments (optional): bool
The flag determines using of segments instead of cells to evaluate empowerment. By default: False.
use_memory (optional): bool
The flag determines using of the Memory object. Useful only if this object was initialised.
By default: False
Returns
-------
plot normalised maps with TM and real empowerment. Also plot confusion matrix in map style.
"""
confusion_data = np.zeros((env.env.shape[0] * env.env.shape[1], self.tm.getColumnDimensions()[0]))
empowerment_map = np.zeros(env.env.shape)
real_empowerment_map = np.zeros(env.env.shape)
for i in trange(env.env.shape[0]):
for j in range(env.env.shape[1]):
if not env.env.entities['obstacle'].mask[i, j]:
env.env.agent.position = (i, j)
_, s, _ = env.observe()
emp, _, s = self.eval_state(s, horizon, use_segments, use_memory)
empowerment_map[i, j] = emp
confusion_data[env.env.shape[1] * i + j, s] = 1
real_empowerment_map[i, j] = real_empowerment(env, (i, j), horizon)[0]
plt.figure(figsize=(10, 5))
plt.subplot(121)
mask = empowerment_map != 0
empowerment_map[mask] = (empowerment_map[mask != 0] - np.min(empowerment_map[mask])) / (
np.max(empowerment_map) - np.min(empowerment_map[mask]))
plt.imshow(empowerment_map)
plt.colorbar()
plt.title('TM')
plt.subplot(122)
mask = real_empowerment_map != 0
real_empowerment_map[mask] = (real_empowerment_map[mask != 0] - np.min(real_empowerment_map[mask])) / (
np.max(real_empowerment_map) - np.min(real_empowerment_map[mask]))
plt.imshow(real_empowerment_map)
plt.colorbar()
plt.title('Real')
plt.show()
intersection = confusion_data @ confusion_data.T
inv_mat = ~confusion_data.astype(bool)
union = inv_mat.shape[1] - inv_mat.astype(float) @ inv_mat.astype(float).T
iou = np.divide(intersection, union, out=np.zeros_like(intersection), where=union != 0)
plot_data = iou.reshape(env.env.shape[0], env.env.shape[1], env.env.shape[0], env.env.shape[1])
image = np.zeros((env.env.shape[0] ** 2, env.env.shape[0] ** 2))
for i in range(env.env.shape[0]):
for j in range(env.env.shape[1]):
image[env.env.shape[0] * i:env.env.shape[0] * (i + 1),
env.env.shape[1] * j:env.env.shape[1] * (j + 1)] = \
plot_data[i, j]
plt.figure(figsize=(15, 15))
plt.imshow(image)
plt.yticks([-0.5 + env.env.shape[0] * i for i in range(env.env.shape[0])])
plt.xticks([-0.5 + env.env.shape[1] * i for i in range(env.env.shape[0])])
plt.grid(linewidth=3)
plt.colorbar()
plt.show()
def draw_tm(tm, grid_step):
tm.activateDendrites(learn=False)
activeCells = tm.getActiveCells().dense
predictedCells = tm.getPredictiveCells().dense
data = np.zeros((tm.getColumnDimensions()[0], tm.getCellsPerColumn(), 3))
data[:, :, 0] = activeCells
data[:, :, 1] = predictedCells
plt.figure(figsize=(tm.getColumnDimensions()[0] / 10, tm.getCellsPerColumn() * 2))
plt.imshow(np.moveaxis(data, [0, 1, 2], [1, 0, 2]), aspect='auto')
plt.yticks([-0.5 + i for i in range(tm.getCellsPerColumn())])
plt.xticks([-0.5 + i * grid_step for i in range(tm.getColumnDimensions()[0] // grid_step)])
plt.grid(linewidth=2)
plt.show()
def draw_segments(tm):
data = np.zeros(tm.getCellsPerColumn() * tm.getColumnDimensions()[0])
max_seg = 0
for cell in trange(tm.getCellsPerColumn() * tm.getColumnDimensions()[0]):
segs = tm.connections.segmentsForCell(cell)
data[cell] = len(segs)
if len(segs) > max_seg:
max_seg = len(segs)
plt.figure(figsize=(tm.getColumnDimensions()[0] / 10, tm.getCellsPerColumn() * 2))
print(f'Number of segments. Max: {max_seg}')
plt.imshow(data.reshape((tm.getCellsPerColumn(), tm.getColumnDimensions()[0]), order='F'), aspect='auto')
plt.show()
def draw_active_segments(tm):
data = np.zeros(tm.getCellsPerColumn() * tm.getColumnDimensions()[0])
for seg in tm.getActiveSegments():
cell = tm.connections.cellForSegment(seg)
data[cell] += 1
plt.figure(figsize=(tm.getColumnDimensions()[0] / 10, tm.getCellsPerColumn() * 2))
print(f'Number of segments. Max: {data.max()}')
plt.imshow(data.reshape((tm.getCellsPerColumn(), tm.getColumnDimensions()[0]), order='F'), aspect='auto')
plt.show()
def moving_average(x, w):
return np.convolve(x, np.ones(w), 'valid') / w
def real_empowerment(env, position, horizon):
data = np.zeros(env.env.shape)
for actions in product(range(4), repeat=horizon):
env.env.agent.position = position
for a in actions:
env.act(a)
data[env.env.agent.position] += 1
return np.sum(-data / data.sum() * np.log(data / data.sum(), where=data != 0), where=data != 0), data
def learn(seed,
empowerment,
env,
steps,
dump_step=None,
horizon=3,
use_segments=False,
use_memory=False,
):
np.random.seed(seed)
visit_map = np.zeros(env.env.shape)
encode_sizes = []
for t in trange(steps):
visit_map[env.env.agent.position] += 1
a = np.random.randint(env.n_actions)
_, s0, _ = env.observe()
encode_sizes.append(len(s0))
env.act(a)
_, s1, _ = env.observe()
empowerment.learn(s0, s1)
if dump_step is not None:
if (t + 1) % dump_step == 0:
empowerment.eval_env(env, horizon, use_segments, use_memory)
plt.title('Visit')
plt.imshow(visit_map)
plt.colorbar()
plt.show()
plt.plot(moving_average(empowerment.anomalies, 100))
plt.title('Anomaly')
plt.ylim(0, 1)
plt.grid()
plt.show()
plt.plot(moving_average(empowerment.IoU, 100))
plt.title('Intersection over union')
plt.ylim(0, 1)
plt.grid()
plt.show()
plt.plot(moving_average(encode_sizes, 100))
plt.title('Number of active columns')
plt.show()
| 39.423853 | 126 | 0.601043 | 17,804 | 0.828633 | 0 | 0 | 187 | 0.008703 | 0 | 0 | 7,675 | 0.357209 |
20ea9a3199f5ede3e9ec9371bbc365bff86fc2fb | 2,550 | py | Python | integration/python/instantiate_docker.py | DomainDrivenArchitecture/dda-smeagol-crate | d2f4a76dde2df416f905a691d2e7b0b80fd282b9 | [
"Apache-2.0"
] | 2 | 2019-01-02T08:59:47.000Z | 2021-08-05T09:13:46.000Z | integration/python/instantiate_docker.py | DomainDrivenArchitecture/dda-smeagol-crate | d2f4a76dde2df416f905a691d2e7b0b80fd282b9 | [
"Apache-2.0"
] | null | null | null | integration/python/instantiate_docker.py | DomainDrivenArchitecture/dda-smeagol-crate | d2f4a76dde2df416f905a691d2e7b0b80fd282b9 | [
"Apache-2.0"
] | 1 | 2018-11-27T11:17:03.000Z | 2018-11-27T11:17:03.000Z | import docker
import sys
import os
import argparse
# Please perform the following steps in order to use this script
# 1) Install pyton 3 and pip3: sudo apt install python3-pip python3
# 2) Install the docker sdk with pip: pip3 install docker
parser = argparse.ArgumentParser()
parser.add_argument("jar", help="relative or absolute path to the dda-serverspec-crate uberjar.")
parser.add_argument("config", help="relative or absolute path to the config file in edn format.")
# TODO: Review jem 2018.11.08: relevant only for debug? If yes, then remove!
parser.add_argument("-c", "--cmd", help="alternative command to execute in the docker container.\
Default is to run the given uberjar with the given config.")
parser.add_argument("-i", "--image", help="image for the docker container. Default image is openjdk:8 (where netstat tests do not work since net-tools is not installed).")
args = parser.parse_args()
docker_logs = os.getcwd() + '/docker-logs/'
if not os.path.exists(docker_logs):
os.makedirs(docker_logs)
edn_file = os.path.abspath(args.config)
jar_file = os.path.abspath(args.jar)
# TODO: Review jem 2018.11.08: Put defaults to the argparse section
execute_command = 'java -jar /app/uberjar.jar /app/config.edn'
if args.cmd:
execute_command = args.cmd
# TODO: Review jem 2018.11.08: Put defaults to the argparse section
image = 'openjdk:8'
if args.image:
image = args.image
# TODO: Review jem 2018.11.08: we curl the serverspec outside - is'nt it a bad idea to do the curl inside of this test-script?
debug_map = {'edn_file':edn_file, 'jar_file':jar_file, 'docker_logs':docker_logs}
client = docker.APIClient()
# docker run --volume $(pwd)/example-serverspec.edn:/app/config.edn --volume $(pwd)/target/dda-serverspec-crate-1.1.4-SNAPSHOT-standalone.jar:/app/uberjar.jar --volume $(pwd)/docker_logs/:/logs/ -it openjdk:8 /bin/bash
container = client.create_container(
image=image,
command=execute_command,
volumes=['/app/config.edn', '/app/uberjar.jar', '/logs'],
host_config=client.create_host_config(binds={
edn_file: {
'bind': '/app/config.edn',
'mode': 'ro',
},
jar_file: {
'bind': '/app/uberjar.jar',
'mode': 'ro',
},
docker_logs: {
'bind': '/logs/',
'mode': 'rw',
}
})
)
response = client.start(container=container)
for log in client.logs(container, stream=True, stdout=True, stderr=True):
print(log)
sys.exit(client.wait(container)['StatusCode'])
| 37.5 | 218 | 0.688627 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,417 | 0.555686 |
20eb14ed9ef59d282c828a88ed96fcb8fa827785 | 2,452 | py | Python | Classdef.py | gregoryng/subradar | fae4b33325732cf08daf1a265d092d9aa8c5aeca | [
"MIT"
] | null | null | null | Classdef.py | gregoryng/subradar | fae4b33325732cf08daf1a265d092d9aa8c5aeca | [
"MIT"
] | null | null | null | Classdef.py | gregoryng/subradar | fae4b33325732cf08daf1a265d092d9aa8c5aeca | [
"MIT"
] | null | null | null | """Various Python classes"""
__author__ = 'Cyril Grima'
from . import roughness, utils
from numpy import inf
nan = float('nan')
class Signal(object):
"""Signal relationships"""
def __init__(self, wf=nan, bw=nan, th=0., bmw=nan, h=nan, **kwargs):
self.wf = wf # Signal central frequency [Hz]
self.bw = bw # Signal bandwidth [Hz]
self.th = th # Incident angle [rad]
self.bmw = bmw # Beamwidth [rad]
self.h = h # Altitude [m]
# Defined from above variables
self.wl = utils.wf2wl(wf)
self.wk = utils.wf2wk(wf)
self.wk_x = utils.wk2vec(self.wk, self.th)['x']
self.wk_z = utils.wk2vec(self.wk, self.th)['z']
self.footprint_rad = {'beam':utils.footprint_rad_beam(self.h, self.bmw),
'pulse':utils.footprint_rad_pulse(self.h, self.bw),
'fresnel':utils.footprint_rad_fresnel(self.h, self.wl)
}
class Fresnel(object):
"""Fresnel and Snell's relationships"""
def __init__(self, ep1=1., ep2=1., mu1=1., mu2=1., th=0, **kwargs):
self.ep1 = ep1 # Electric permittivity 1
self.ep2 = ep2 # Electric permittivity 2
self.ep = ep2/ep1 # Relative electric permittivity
self.mu1 = mu1 # Magnetic permeability 1
self.mu2 = mu2 # Magnetic permeability 2
self.mu = mu2/mu1 # Relative magnetic permeability 2/1
self.th = th # Incident angle [rad]
self.n1 = utils.epmu2n(ep1, mu1)
self.n2 = utils.epmu2n(ep2, mu2)
self.R = {'vv':utils.R_v(self.ep1, self.ep2, self.mu1, self.mu2, self.th),
'hh':utils.R_h(self.ep1, self.ep2, self.mu1, self.mu2, self.th),
'nn':utils.R(self.ep1, self.ep2, self.mu1, self.mu2, self.th)
}
self.T = {'vv':utils.T_v(self.ep1, self.ep2, self.mu1, self.mu2, self.th),
'hh':utils.T_h(self.ep1, self.ep2, self.mu1, self.mu2, self.th)
}
class Roughness(object):
"""Roughness relationships"""
def __init__(self, wf=nan, sh=0, cl=inf, **kwargs):
self.wf = wf # Signal central frequency [Hz]
self.sh = sh # RMS height [m]
self.cl = cl # Correlation Length [m]
# Defined from above variables
self.wl = utils.wf2wl(wf)
self.wk = utils.wf2wk(wf)
self.ks = self.wk*self.sh
self.kl = self.wk*self.cl
| 38.3125 | 83 | 0.569331 | 2,312 | 0.942904 | 0 | 0 | 0 | 0 | 0 | 0 | 617 | 0.251631 |
20eb6f5d85628505190d93e8509c7390562e9bac | 528 | py | Python | tests/test_setup.py | TheCheapestPixels/panda3d-simplepbr | e439dcbab48b1b61cf7e477e51ef7c7507b97f29 | [
"BSD-3-Clause"
] | null | null | null | tests/test_setup.py | TheCheapestPixels/panda3d-simplepbr | e439dcbab48b1b61cf7e477e51ef7c7507b97f29 | [
"BSD-3-Clause"
] | null | null | null | tests/test_setup.py | TheCheapestPixels/panda3d-simplepbr | e439dcbab48b1b61cf7e477e51ef7c7507b97f29 | [
"BSD-3-Clause"
] | null | null | null | import panda3d.core as p3d
import pytest #pylint:disable=wrong-import-order
import simplepbr
#pylint:disable=redefined-outer-name
@pytest.fixture(scope='session')
def showbase():
from direct.showbase.ShowBase import ShowBase
p3d.load_prc_file_data(
'',
'window-type offscreen\n'
'framebuffer-hardware false\n'
)
return ShowBase()
def test_setup(showbase):
simplepbr.init(
render_node=showbase.render,
window=showbase.win,
camera_node=showbase.cam,
)
| 20.307692 | 49 | 0.6875 | 0 | 0 | 0 | 0 | 239 | 0.452652 | 0 | 0 | 136 | 0.257576 |
20ebb89e959fc0a3416f450671154a21d95c196a | 820 | py | Python | hivedata.py | PENGsBIT/DifferentialAnalysis | 0c0262bb22553d98fea209ffa8124346b9335993 | [
"Apache-2.0"
] | 1 | 2019-07-02T07:30:30.000Z | 2019-07-02T07:30:30.000Z | hivedata.py | PENGsBIT/DifferentialAnalysis | 0c0262bb22553d98fea209ffa8124346b9335993 | [
"Apache-2.0"
] | null | null | null | hivedata.py | PENGsBIT/DifferentialAnalysis | 0c0262bb22553d98fea209ffa8124346b9335993 | [
"Apache-2.0"
] | null | null | null | from impala.dbapi import connect
import numpy as np
from different import *
args = ["10.141.212.155", 10010, "", "", "bigbench_100g", "websales_home_myshop"]
# conn = hive.Connection(host="10.141.212.155", port=10010, database='bigbench_100g')
conn = connect(host="10.141.212.155", port=10010, database='bigbench_100g', auth_mechanism='PLAIN')
assign_columns = []
cur = conn.cursor()
cur.execute('SELECT * FROM websales_home_myshop LIMIT 1')
des = cur.description
para = ""
for i in des:
colnames = i[0].split(".")
if ('1' in colnames[1]):
continue
else:
assign_columns.append(colnames[1])
for i in assign_columns:
para += i + ","
para = para[0:-1]
cur.execute("SELECT " + para + " FROM " + args[5])
data = np.array(cur.fetchall())
cur.close()
conn.close()
differInterval(data,para) | 28.275862 | 99 | 0.671951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 254 | 0.309756 |
20ec501020c2772aa4720af36c66feb8ad8012c1 | 817 | py | Python | blin/config.py | cyber-chuvash/blinBlinskiyNoviyGod | b4dab1c254194579636aa7db18dd49203e60e381 | [
"MIT"
] | null | null | null | blin/config.py | cyber-chuvash/blinBlinskiyNoviyGod | b4dab1c254194579636aa7db18dd49203e60e381 | [
"MIT"
] | null | null | null | blin/config.py | cyber-chuvash/blinBlinskiyNoviyGod | b4dab1c254194579636aa7db18dd49203e60e381 | [
"MIT"
] | null | null | null | import json
import logging
import os
class _JsonConfig:
def __init__(self):
self._conf = \
json.loads(open(os.path.join(os.path.abspath(os.path.dirname(__file__)), '../config.json'), 'r').read())
@property
def log_level(self):
return logging.getLevelName(self._conf.get('log_level', 'INFO'))
def __getattr__(self, item):
return self._conf[item]
class _EnvConfig:
@property
def log_level(self):
return logging.getLevelName(os.environ.get('LOG_LEVEL', 'INFO'))
@property
def checkpoints(self):
return os.environ.get('CHECKPOINTS', '60,30,10,5,4,3,2,1').split(',')
def __getattr__(self, item):
return os.environ[item.upper()]
try:
Config = _JsonConfig()
except FileNotFoundError:
Config = _EnvConfig()
| 20.425 | 116 | 0.640147 | 685 | 0.838433 | 0 | 0 | 328 | 0.401469 | 0 | 0 | 89 | 0.108935 |
20ece0378df7625ec1d27cc3e2aacc6037e534e4 | 18,374 | py | Python | gryphon/core/common_operations.py | vittorfp/labskit_cli | 28e109b4a9f36a03d499eb953e04a4fb787632fe | [
"MIT"
] | null | null | null | gryphon/core/common_operations.py | vittorfp/labskit_cli | 28e109b4a9f36a03d499eb953e04a4fb787632fe | [
"MIT"
] | null | null | null | gryphon/core/common_operations.py | vittorfp/labskit_cli | 28e109b4a9f36a03d499eb953e04a4fb787632fe | [
"MIT"
] | null | null | null | """
File containing operations that are common to the commands.
"""
import os
import sys
import json
import glob
import logging
import platform
import shutil
from datetime import datetime
from pathlib import Path
import git
from .registry.template import Template
from .core_text import Text
from ..constants import (
SUCCESS, VENV_FOLDER, ALWAYS_ASK, GRYPHON_HOME,
GENERATE, INIT, SYSTEM_DEFAULT, CONFIG_FILE, DEFAULT_PYTHON_VERSION
)
logger = logging.getLogger('gryphon')
REQUIREMENTS = "requirements.txt"
# PATH UTILS
def get_destination_path(folder=None) -> Path:
"""
Function that helps to define the full path to a directory.
It checks if the path is an absolute or relative path, then
if relative, it appends the current folder to it, transforming
it into an absolute path.
"""
if folder is None:
return Path.cwd()
path_obj = Path(folder)
if not path_obj.is_absolute():
return path_obj.resolve()
return path_obj
def quote_windows_path(folder_path):
return '"' + folder_path + '"'
def escape_windows_path(folder_path):
return fr'{folder_path}'
# BASH UTILS
def remove_folder(folder: Path):
"""
Removes a folder (location relative to cwd or absolute).
"""
shutil.rmtree(folder, ignore_errors=False)
def create_folder(folder: Path):
"""
Create a folder in the given path (location relative to cwd or absolute).
"""
folder.mkdir(exist_ok=True)
def copy_project_template(template_source: Path, template_destiny: Path):
"""Copies the templates to destination folder."""
template_path = template_source / "template"
template_path.mkdir(exist_ok=True)
shutil.copytree(
src=template_path,
dst=rf'{str(template_destiny)}',
dirs_exist_ok=True
)
def execute_and_log(command):
logger.debug(f"command: {command}")
cmd = os.popen(command)
output = cmd.read()
for line in output.split('\n'):
logger.debug(line)
# status code
return cmd.close()
# GIT
def init_new_git_repo(folder: Path) -> git.Repo:
"""Init new git repository on folder."""
return git.Repo.init(folder)
def initial_git_commit(repository: git.Repo):
"""Does the first git commit."""
repository.git.add(A=True)
repository.index.commit("Initial commit")
# VENV
def create_venv(folder=None, python_version=None):
"""Function to a virtual environment inside a folder."""
python_path = "python"
if python_version and python_version != ALWAYS_ASK:
if python_version != SYSTEM_DEFAULT:
env_folder = GRYPHON_HOME / f"reserved_env_python_{python_version}"
if not env_folder.is_dir():
logger.info(f"Installing python version with Conda.")
create_conda_env(
folder=GRYPHON_HOME / f"reserved_env_python_{python_version}",
python_version=python_version
)
if platform.system() == "Windows":
# On Windows the venv folder structure is different from unix
python_path = env_folder / "envs" / "python.exe"
else:
python_path = env_folder / "envs" / "bin" / "python"
target_folder = get_destination_path(folder)
venv_path = target_folder / VENV_FOLDER
# Create venv
logger.info(f"Creating virtual environment in {venv_path}")
return_code = execute_and_log(f"\"{python_path}\" -m venv \"{venv_path}\"")
if return_code:
raise RuntimeError("Failed to create virtual environment.")
logger.log(SUCCESS, "Done creating virtual environment.")
def install_libraries_venv(folder=None):
"""
Function to install the libraries from a 'requirements.txt' file
"""
target_folder = get_destination_path(folder)
requirements_path = target_folder / REQUIREMENTS
if platform.system() == "Windows":
# On Windows the venv folder structure is different from unix
pip_path = target_folder / VENV_FOLDER / "Scripts" / "pip.exe"
else:
pip_path = target_folder / VENV_FOLDER / "bin" / "pip"
# Install requirements
logger.info("Installing requirements. This may take several minutes ...")
if not pip_path.is_file():
raise RuntimeError(f"Virtual environment not found inside folder. Should be at {pip_path}")
if not requirements_path.is_file():
raise FileNotFoundError("requirements.txt file not found.")
return_code = execute_and_log(f'\"{pip_path}\" install -r \"{requirements_path}\" --disable-pip-version-check')
if return_code is not None:
raise RuntimeError(f"Failed on pip install command. Status code: {return_code}")
logger.log(SUCCESS, "Installation successful!")
def install_extra_nbextensions_venv(folder_path):
"""
Function to install the libraries from a 'requirements.txt' file
"""
target_folder = get_destination_path(folder_path)
requirements_path = target_folder / REQUIREMENTS
if platform.system() == "Windows":
# On Windows the venv folder structure is different from unix
pip_path = target_folder / VENV_FOLDER / "Scripts" / "pip.exe"
activate_env_command = target_folder / VENV_FOLDER / "Scripts" / "activate.bat"
silent = "START /B \"\""
# redirect = ">> .output 2>&1"
redirect = ">nul 2>&1"
else:
pip_path = target_folder / VENV_FOLDER / "bin" / "pip"
activate_path = target_folder / VENV_FOLDER / "bin" / "activate"
activate_env_command = str(activate_path)
os.system(f"chmod 777 \"{activate_path}\"")
silent = "nohup"
redirect = ""
# Install requirements
logger.info("Installing extra notebook extensions.")
if not pip_path.is_file():
raise RuntimeError(f"Virtual environment not found inside folder. Should be at {pip_path}")
if not requirements_path.is_file():
raise FileNotFoundError("requirements.txt file not found.")
with open(requirements_path, "r", encoding="UTF-8") as f1:
requirements = f1.read()
for lib in ["jupyter_nbextensions_configurator", "jupyter_contrib_nbextensions"]:
if lib not in requirements:
with open(requirements_path, "a", encoding="UTF-8") as f2:
f2.write(f"\n{lib}")
return_code = execute_and_log(f'\"{activate_env_command}\" && pip --disable-pip-version-check '
f'install jupyter_contrib_nbextensions jupyter_nbextensions_configurator')
if return_code is not None:
raise RuntimeError(f"Failed on pip install command. Return code: {return_code}")
os.chdir(target_folder)
return_code = execute_and_log(
f"\"{activate_env_command}\" "
f"&& ({silent} jupyter nbextensions_configurator enable --user) {redirect}"
f"&& ({silent} jupyter contrib nbextension install --user) {redirect}"
f"&& ({silent} jupyter nbextension enable codefolding/main --user) {redirect}"
f"&& ({silent} jupyter nbextension enable toc2/main --user) {redirect}"
f"&& ({silent} jupyter nbextension enable collapsible_headings/main --user) {redirect}"
)
if return_code is not None:
raise RuntimeError(f"Failed to install jupyter nbextensions. Return code: {return_code}")
os.chdir(target_folder.parent)
def change_shell_folder_and_activate_venv(location):
if 'pytest' not in sys.modules:
target_folder = get_destination_path(location)
if platform.system() == "Windows":
# On windows the venv folder structure is different from unix
# activate_path = target_folder / VENV / "Scripts" / "activate.bat"
# os.system(
# f"""start cmd /k "echo Activating virtual environment & """
# f"""{activate_path} & """
# """echo "Virtual environment activated. Now loading Gryphon" & """
# """gryphon" """
# )
logger.warning(f"""
{Text.install_end_message_1}
ANACONDA PROMPT/COMMAND PROMPT:
>> cd \"{target_folder}\"
>> .venv\\Scripts\\activate.bat
GIT BASH:
>> cd \"{str(target_folder).replace(chr(92),'/')}\"
>> source .venv/Scripts/activate
{Text.install_end_message_2}
""")
else:
logger.info("Opening your new project folder and activating virtual environment.")
activate_path = target_folder / VENV_FOLDER / "bin" / "activate"
os.chdir(target_folder)
shell = os.environ.get('SHELL', '/bin/sh')
os.execl(shell, shell, "--rcfile", activate_path)
# CONDA
def create_conda_env(folder=None, python_version=None):
"""Function to a virtual environment inside a folder."""
target_folder = get_destination_path(folder)
conda_path = target_folder / 'envs'
# Create venv
logger.info(f"Creating Conda virtual environment in {conda_path}")
execute_and_log("conda config --set notify_outdated_conda false")
execute_and_log("conda config --append channels conda-forge --json >> out.json")
os.remove("out.json")
command = f"conda create --prefix=\"{conda_path}\" -y -k"
if python_version and python_version != SYSTEM_DEFAULT:
command += f" python={python_version}"
return_code = execute_and_log(command)
if return_code is not None:
raise RuntimeError(f"Failed to create conda environment. Status code: {return_code}")
logger.log(SUCCESS, "Done creating virtual environment.")
def install_libraries_conda(folder=None):
logger.info("Installing requirements. This may take several minutes ...")
target_folder = get_destination_path(folder)
requirements_path = target_folder / "requirements.txt"
conda_path = target_folder / 'envs'
execute_and_log("conda config --set notify_outdated_conda false")
return_code = execute_and_log(f"conda install --prefix \"{conda_path}\" --file \"{requirements_path}\" -k -y")
if return_code is not None:
raise RuntimeError(f"Failed to install requirements on conda environment. Status code: {return_code}")
logger.log(SUCCESS, "Installation successful!")
def install_extra_nbextensions_conda(folder_path):
"""
Function to install the libraries from a 'requirements.txt' file
"""
target_folder = get_destination_path(folder_path)
conda_path = target_folder / 'envs'
requirements_path = target_folder / REQUIREMENTS
# Install requirements
logger.info("Installing extra notebook extensions.")
if not conda_path.is_dir():
raise RuntimeError(f"Conda environment not found inside folder. Should be at {conda_path}")
if not requirements_path.is_file():
raise FileNotFoundError("requirements.txt file not found.")
with open(requirements_path, "r", encoding="UTF-8") as f1:
requirements = f1.read()
for lib in ["jupyter_nbextensions_configurator", "jupyter_contrib_nbextensions"]:
if lib not in requirements:
with open(requirements_path, "a", encoding="UTF-8") as f2:
f2.write(f"\n{lib}")
if platform.system() == "Windows":
# On Windows the venv folder structure is different from unix
conda_python = conda_path / "python.exe"
silent = "START /B \"\""
# redirect = ">> .output 2>&1"
redirect = ">nul 2>&1"
else:
conda_python = conda_path / "bin" / "python"
silent = "nohup"
redirect = ""
execute_and_log("conda config --set notify_outdated_conda false")
return_code = execute_and_log(f'conda install jupyter_contrib_nbextensions '
f'jupyter_nbextensions_configurator --prefix=\"{conda_path}\" --yes -k')
if return_code is not None:
raise RuntimeError(f"Failed on conda install command. Return code: {return_code}")
os.chdir(target_folder)
try:
return_code = execute_and_log(
f'({silent} \"{conda_python}\" -m jupyter nbextensions_configurator enable --user) {redirect}')
assert return_code is None
return_code = execute_and_log(
f'({silent} \"{conda_python}\" -m jupyter nbextension enable codefolding/main --user) {redirect}')
assert return_code is None
return_code = execute_and_log(
f'({silent} \"{conda_python}\" -m jupyter contrib nbextension install --user) {redirect}')
assert return_code is None
return_code = execute_and_log(
f'({silent} \"{conda_python}\" -m jupyter nbextension enable toc2/main --user) {redirect}')
assert return_code is None
return_code = execute_and_log(
f'({silent} \"{conda_python}\" -m '
f'jupyter nbextension enable collapsible_headings/main --user) {redirect}'
)
assert return_code is None
# os.remove("nohup.out")
except AssertionError:
raise RuntimeError(f"Failed to install jupyter nbextensions. Return code: {return_code}")
os.chdir(target_folder.parent)
def change_shell_folder_and_activate_conda_env(location):
if 'pytest' not in sys.modules:
target_folder = get_destination_path(location)
if platform.system() == "Windows":
logger.warning(f"""
{Text.install_end_message_1}
>> cd {target_folder}
>> conda activate \"{target_folder / "envs"}\"
[Or once in the folder, simply: >> conda activate .\\envs]
{Text.install_end_message_2}
""")
else:
logger.warning(f"""
{Text.install_end_message_1}
>> cd {target_folder}
>> conda activate --prefix=\"{target_folder / "envs"}\"
{Text.install_end_message_2}
""")
def update_conda():
if execute_and_log("conda update conda -k") is not None:
raise RuntimeError("Failed to update conda.")
# requirements.txt UTILS
def append_requirement(library_name):
"""Appends a given requirement to the requirements.txt file."""
current_path = get_destination_path()
requirements_path = current_path / REQUIREMENTS
try:
with open(requirements_path, "r", encoding='UTF-8') as file:
requirements = file.read()
if library_name not in requirements:
with open(requirements_path, "a", encoding='UTF-8') as file:
file.write(f"\n{library_name}")
except FileNotFoundError:
logger.error(f"Could not find requirements file at {requirements_path}, "
f"It is required in order to run this command.")
def rollback_append_requirement(library_name):
current_path = get_destination_path()
requirements_path = current_path / REQUIREMENTS
assert requirements_path.is_file()
with open(requirements_path, "r", encoding='UTF-8') as file:
requirements = file.read()
requirements_list = requirements.split('\n')
last_requirement_added = requirements_list[-1]
if library_name == last_requirement_added:
with open(requirements_path, "w", encoding='UTF-8') as file:
file.write('\n'.join(requirements_list[:-1]))
# RC FILE
def get_rc_file(folder=Path.cwd()):
"""
Updates the needed options inside the .labskitrc file.
"""
path = folder / ".gryphon_history"
if path.is_file():
return path
with open(path, "w", encoding="utf-8") as f:
f.write("{}")
return path
def log_new_files(template: Template, performed_action: str, logfile=None):
assert performed_action in [INIT, GENERATE]
if logfile is None:
logfile = Path.cwd() / ".gryphon_history"
files_and_folders = glob.glob(str(template.path / "template" / "**"), recursive=True)
files = list(filter(lambda x: x.is_file(), map(Path, files_and_folders)))
with open(logfile, "r+", encoding="utf-8") as f:
contents = json.load(f)
new_contents = contents.copy()
for file in files:
new_contents.setdefault("files", []).append(
dict(
path=str(file.relative_to(template.path / "template")),
template_name=template.name,
version=template.version,
action=performed_action,
created_at=str(datetime.now())
)
)
f.seek(0)
f.write(json.dumps(new_contents))
f.truncate()
def log_operation(template, performed_action: str, logfile=None):
assert performed_action in [INIT, GENERATE]
if logfile is None:
logfile = Path.cwd() / ".gryphon_history"
with open(logfile, "r+", encoding="utf-8") as f:
contents = json.load(f)
new_contents = contents.copy()
new_contents.setdefault("operations", []).append(
dict(
template_name=template.name,
version=template.version,
action=performed_action,
created_at=str(datetime.now())
)
)
f.seek(0)
f.write(json.dumps(new_contents))
f.truncate()
def log_add_library(libraries, logfile=None):
if logfile is None:
logfile = Path.cwd() / ".gryphon_history"
try:
with open(logfile, "r+", encoding="utf-8") as f:
contents = json.load(f)
new_contents = contents.copy()
for lib in libraries:
new_contents.setdefault("libraries", []).append(
dict(
name=lib,
added_at=str(datetime.now())
)
)
f.seek(0)
f.write(json.dumps(new_contents))
f.truncate()
except FileNotFoundError:
raise RuntimeError("The .gryphon_history file was not found, therefore you are not inside a "
"Gryphon project directory.")
def get_current_python_version():
with open(CONFIG_FILE, "r", encoding="UTF-8") as f:
return json.load(f).get(
"default_python_version",
DEFAULT_PYTHON_VERSION
)
| 32.928315 | 115 | 0.636606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,180 | 0.39077 |
20ed6b7104d66706dd28783944068d6c82b7b1c2 | 2,599 | py | Python | src/cho_util/math/transform.py | yycho0108/cho-util | 331efc7aac8cddfb4258620b80cb7fc5f0688d1f | [
"MIT"
] | null | null | null | src/cho_util/math/transform.py | yycho0108/cho-util | 331efc7aac8cddfb4258620b80cb7fc5f0688d1f | [
"MIT"
] | null | null | null | src/cho_util/math/transform.py | yycho0108/cho-util | 331efc7aac8cddfb4258620b80cb7fc5f0688d1f | [
"MIT"
] | null | null | null | import numpy as np
from .common import *
from . import rotation
def to_homogeneous(x):
x = np.asarray(x)
o = np.ones_like(x[..., :1])
return np.concatenate([x, o], axis=-1)
def from_homogeneous(x):
return x[..., :-1] / x[..., -1:]
def compose(r, t, rtype, out=None):
if out is None:
shape = tuple(np.shape(t)[:-1]) + (4, 4)
out = np.zeros(shape, dtype=t.dtype)
rtype.to_matrix(r, out=out[..., :3, :3])
out[..., :3, 3:] = t.reshape(out[...,:3,3:].shape)
return out
def translation_from_matrix(T):
return T[..., :3, 3]
def rotation_from_matrix(T):
return T[..., :3, :3]
def rotation_2d(x, R=None, c=None, s=None):
if R is None:
shape = tuple(np.shape(x)[:-1]) + (2, 2)
R = np.zeros(shape, dtype=x.dtype)
if c is None:
c = np.cos(x)
if s is None:
s = np.sin(x)
R[..., 0, 0] = c
R[..., 0, 1] = -s
R[..., 1, 0] = s
R[..., 1, 1] = c
return R
def Rz(x, T=None, c=None, s=None):
if T is None:
shape = tuple(np.shape(x)[:-1]) + (4, 4)
T = np.zeros(shape, dtype=np.float32)
if c is None:
c = np.cos(x)
if s is None:
s = np.sin(x)
T[..., 0, 0] = c
T[..., 0, 1] = -s
T[..., 1, 0] = s
T[..., 1, 1] = c
T[..., 2, 2] = 1
return T
def invert(T, out=None):
R = T[..., :3, :3]
t = T[..., :3, 3:]
if out is None:
out = np.zeros_like(T)
out[..., :3, :3] = R.swapaxes(-1, -2)
out[..., :3, 3:] = -np.einsum('...ba,...bc->...ac', R, t)
out[..., 3, 3] = 1
return out
def Rti(R, t):
Ri = R.swapaxes(-1, -2)
if np.ndim(t) < np.ndim(Ri):
# case (...,D)
ti = -np.einsum('...ab,...b->...a', Ri, t)
else:
# case (...,D,1)
ti = -np.einsum('...ab,...bc->...ac', Ri, t)
return Ri, ti
def lerp(a, b, w):
return (a * (1.0-w)) + (b*w)
def flerp(a, b, w, f, fi):
return fi(lerp(f(a), f(b), w))
def rlerp(ra, rb, w):
Ra = np.eye(4, dtype=np.float32)
Rb = np.eye(4, dtype=np.float32)
Ra[:3, :3] = ra
Rb[:3, :3] = rb
qa = tx.quaternion_from_matrix(Ra)
qb = tx.quaternion_from_matrix(Rb)
q = tx.quaternion_slerp(q0, q1, w)
R = tx.quaternion_matrix(q)[:3, :3]
return R
def rx3(R, x):
rx = np.einsum('...ab,...b->...a', R[..., :3, :3], x)
return rx
def tx3(T, x):
rx = np.einsum('...ab,...b->...a', T[..., :3, :3], x)
return rx + T[..., :3, 3:].swapaxes(-2, -1)
def rtx3(r, t, x):
return x.dot(r.swapaxes(-2, -1)) + t
def tx4(T, x):
return np.einsum('...ab,...b->...a', T, x)
| 22.405172 | 61 | 0.464025 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.054636 |
20edfabbb18d57e5b8c63c2383e738c3f3549841 | 27,895 | py | Python | LinearRegression-GradientDescent/GradientDescent.py | saurabbhsp/machineLearning | e468854b57c8de6e80bc397bde6f1c71ee50d495 | [
"Apache-2.0"
] | 3 | 2017-12-01T16:57:03.000Z | 2021-01-08T08:30:12.000Z | LinearRegression-GradientDescent/GradientDescent.py | saurabbhsp/machineLearning | e468854b57c8de6e80bc397bde6f1c71ee50d495 | [
"Apache-2.0"
] | null | null | null | LinearRegression-GradientDescent/GradientDescent.py | saurabbhsp/machineLearning | e468854b57c8de6e80bc397bde6f1c71ee50d495 | [
"Apache-2.0"
] | 6 | 2019-07-05T12:27:36.000Z | 2022-01-25T11:52:21.000Z |
# coding: utf-8
# In[1]:
get_ipython().run_cell_magic('javascript', '', '<!-- Ignore this block -->\nIPython.OutputArea.prototype._should_scroll = function(lines) {\n return false;\n}')
# # Data preprocessing
# 1. convert any non-numeric values to numeric values.
# 2. If required drop out the rows with missing values or NA. In next lectures we will handle sparse data, which will allow us to use records with missing values.
# 3. Split the data into a train(80%) and test(20%) .
# In[2]:
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
from __future__ import division
import pandas as pd
import numpy as np
from math import sqrt, isnan
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
"""Set global rcParams for pyplotlib"""
plt.rcParams["figure.figsize"] = "18,25"
# ### TextEncoder
#
# Here the data is mix of numbers and text. Text value cannot be directly used and should be converted to numeric data.<br>
# For this I have created a function text encoder which accepts a pandas series. Text encoder returns a lookUp dictionary for recreating the numeric value for text value.
# In[3]:
def textEncoder(*textVectors):
lookUpDictionary = {}
lookupValue = 1
for textVector in textVectors:
for key in textVector.unique():
if key not in lookUpDictionary:
lookUpDictionary[key] = lookupValue
lookupValue +=1
return lookUpDictionary
# ### SplitDataSet Procedure
# This method splits the dataset into trainset and testset based upon the trainSetSize value. For splitting the dataset, I am using pandas.sample to split the data. This gives me trainset. For testset I am calculating complement of the trainset. This I am doing by droping the index present in training set.
# In[4]:
"""Splits the provided pandas dataframe into training and test dataset"""
def splitDataSet(inputDataframe, trainSetSize):
trainSet = inputDataframe.sample(frac=trainSetSize)
testSet = inputDataframe.drop(trainSet.index)
return trainSet,testSet
# ### generatePearsonCoefficient Procedure
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/f76ccfa7c2ed7f5b085115086107bbe25d329cec">
# For sample:-
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/bd1ccc2979b0fd1c1aec96e386f686ae874f9ec0">
# For selecting some features and for dropping others I am using Pearson's Coefficient. The value of Pearson's coefficient lies between [-1, 1] and tells how two features are related<br>
# <table>
# <tr><td>Strength of Association</td><td>Positive</td><td>Negative</td></tr><tr><td>Small</td><td>.1 to .3 </td><td>-0.1 to -0.3 </td></tr><tr><td>Medium</td><td>.3 to .5 </td><td>-0.3 to -0.5 </td></tr><tr><td>Large</td><td>.5 to 1.0 </td><td>-0.5 to -1.0 </td></tr></table>
#
# In[5]:
"""Generate pearson's coefficient"""
def generatePearsonCoefficient(A, B):
A = A - A.mean()
B = B - B.mean()
return ((A * B).sum())/(sqrt((A * A).sum()) * sqrt((B * B).sum()))
# ### predictLinearRegression Procedure
# This method performs predicts the value for Y given X and model parameters. This method will add bias to X.<br>
# The prediction is given by BX<sup>T</sup>
# In[6]:
"""Method to make prediction for yTest"""
def predictionLinearRegression(X, modelParameters):
X = np.insert(X, 0, 1, axis=1)
yPrediction = np.dot(modelParameters, X.T)
return yPrediction
# ### RMSE procedure
# Will calculate root mean squared error for given Ytrue values and YPrediction.
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/fc187c3557d633423444d4c80a4a50cd6ecc3dd4">
#
# In[7]:
"""Model accuracy estimator RMSE"""
def RMSE(yTrue, yPrediction):
n = yTrue.shape[0]
return sqrt((1.0) * np.sum(np.square((yTrue - yPrediction))))/n
# ### armijoStepLengthController proedure
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/ed6d74a5c23f9034a072125eeb316eee5faeed43">
# In[8]:
"""Uses armijo principle to detect next value of alpha.
Alpha values are rewritten. Passed to function just to maintain uniformity
"""
def armijoStepLengthController(fx, alpha, x, y, beta, gradient, delta, maxIterations = 1000):
alpha = 1.0
gradientSquare = np.dot(gradient, gradient)
for i in range(0, maxIterations):
alpha = alpha/2
residual_alpha_gradient = y - np.dot((beta - (alpha * gradient)), x .T)
fx_alpha_gradient = np.dot(residual_alpha_gradient.T, residual_alpha_gradient)
"""Convergence condition for armijo principle"""
if fx_alpha_gradient < fx - (alpha * delta * gradientSquare):
break;
return alpha
# ### boldDriverStepLengthController procedure
# An extension to armijo steplength controller. Retain alpha values.
# In[9]:
def boldDriverStepLengthController(fx, alpha, x, y, beta, gradient, maxIterations = 1000,
alphaMinus = 0.5, alphaPlus = 1.1):
alpha = alpha * alphaPlus
for i in range(0, maxIterations):
alpha = alpha * alphaMinus
residual_alpha_gradient = y - np.dot((beta - (alpha * gradient)), x .T)
fx_alpha_gradient = np.dot(residual_alpha_gradient.T, residual_alpha_gradient)
"""Convergence condition for bold driver method"""
if fx - fx_alpha_gradient > 0:
break;
return alpha
# ### linearRegressionGradientDescent procedure
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/26a319f33db70a80f8c5373f4348a198a202056c">
# Calculate slope at the given point(gradient) and travel in the negative direction with provided step length.<br/>
# In[10]:
"""If no step length controller is provided then values of alpha will be taken as step length.
Else the step length controller will be used. Additional parameters to the controller are
provided by stepLengthControllerParameters"""
def linearRegressionGradientDescent(x, y, xTest, yTest, alpha, beta,
maxIterations=1000, epsilon=1.1e-20,
stepLengthController = None, stepLengthControllerParameters = None):
x = np.insert(x, 0, 1, axis=1)
x = x * 1.0
y = y * 1.0
if stepLengthController != None:
print("Warning using stepLengthController alpha values will be rewritten")
plotX = []
plotY_diff = []
plotY_RMSE = []
y_prediction = np.dot(beta, x.T)
residual = y_prediction - y
f_x = np.dot(residual.T, residual)
rmse = RMSE(yTest, predictionLinearRegression(xTest, beta))
"""For plotting graph"""
plotY_RMSE.append(rmse)
plotY_diff.append(f_x)
plotX.append(0)
for i in range(1, maxIterations):
gradient = np.dot(x.T, residual) * 2
"""Use step length controller if required"""
if stepLengthController != None:
alpha = stepLengthController(fx = f_x, alpha = alpha, x = x, y = y,
beta = beta, gradient = gradient, **stepLengthControllerParameters)
beta = beta - (alpha * gradient)
y_prediction = np.dot(beta, x.T)
residual = y_prediction - y
f_x_new = np.dot(residual.T, residual)
rmse = RMSE(yTest, predictionLinearRegression(xTest, beta))
"""For plotting graph"""
plotY_RMSE.append(rmse)
plotY_diff.append(abs(f_x_new - f_x))
plotX.append(i)
if abs(f_x - f_x_new) < epsilon:
print("Converged in " + str(i) + " iterations")
return beta, plotX, plotY_diff, plotY_RMSE, f_x, rmse
f_x = f_x_new
print("Warning algorithm failed to converge in " + str(maxIterations) + " interations")
return beta, plotX, plotY_diff, plotY_RMSE, f_x, rmse
# # Gradient descent for airlines fare data
# ### Load the airlines dataset
# In[11]:
""" File path change accordingly"""
directoryPath = "data"
airFareData = pd.read_csv(directoryPath+"/airq402.dat", sep='\s+',header = None)
airFareData.head(10)
"""Adding header"""
airFareData.columns = ["city1", "city2", "avgFare", "distance", "avgWeeklyPassengers",
"marketLeadingAirline", "marketShareLA", "averageFare", "lowPriceAirline",
"marketShareLPA", "price"]
airFareData.head()
# ### Using textEncoder to convert text data to numeric data
# In[12]:
"""Using lambda functions to replace text values based upon lockup dictionary"""
cityLookupDictionary = textEncoder(airFareData.city1, airFareData.city2)
airFareData['city1'] = airFareData.city1.apply(lambda cityName:
cityLookupDictionary[cityName])
airFareData['city2'] = airFareData.city2.apply(lambda cityName:
cityLookupDictionary[cityName])
airLineLookupDictionary = textEncoder(airFareData.lowPriceAirline, airFareData.marketLeadingAirline)
airFareData['lowPriceAirline'] = airFareData.lowPriceAirline.apply(lambda cityName:
airLineLookupDictionary[cityName])
airFareData['marketLeadingAirline'] = airFareData.marketLeadingAirline.apply(lambda cityName:
airLineLookupDictionary[cityName])
# ### Check and remove missing data
# In[13]:
airFareData.dropna(inplace = True)
airFareData.head()
# ### Check for corelation between different X and Y
# In[14]:
for column in airFareData:
if column != "price":
print("The corelation between " + column +" vs price is " +
str(generatePearsonCoefficient(airFareData[column], airFareData['price'])))
# ### Visualizing the data
# In[15]:
plt.close()
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6), (ax7, ax8), (ax9, ax10)) = plt.subplots(5,2,sharey='none')
ax1.plot(airFareData.city1, airFareData.price, "ro")
ax1.grid()
ax1.set_title("city1 vs price")
ax1.set_xlabel("city1")
ax1.set_ylabel("price")
ax2.plot(airFareData.city2, airFareData.price, "ro")
ax2.grid()
ax2.set_title("city2 vs price")
ax2.set_xlabel("city2")
ax2.set_ylabel("price")
ax3.plot(airFareData.avgFare, airFareData.price, "ro")
ax3.grid()
ax3.set_title("avgFare vs price")
ax3.set_xlabel("avgFare")
ax3.set_ylabel("price")
ax4.plot(airFareData.distance, airFareData.price, "ro")
ax4.grid()
ax4.set_title("distance vs price")
ax4.set_xlabel("distance")
ax4.set_ylabel("price")
ax5.plot(airFareData.avgWeeklyPassengers, airFareData.price, "ro")
ax5.grid()
ax5.set_title("avgWeeklyPassengers vs price")
ax5.set_xlabel("avgWeeklyPassengers")
ax5.set_ylabel("price")
ax6.plot(airFareData.marketLeadingAirline, airFareData.price, "ro")
ax6.grid()
ax6.set_title("marketLeadingAirline vs price")
ax6.set_xlabel("marketLeadingAirline")
ax6.set_ylabel("price")
ax7.plot(airFareData.marketShareLA, airFareData.price, "ro")
ax7.grid()
ax7.set_title("marketShareLA vs price")
ax7.set_xlabel("marketShareLA")
ax7.set_ylabel("price")
ax8.plot(airFareData.averageFare, airFareData.price, "ro")
ax8.grid()
ax8.set_title("averageFare vs price")
ax8.set_xlabel("averageFare")
ax8.set_ylabel("price")
ax9.plot(airFareData.lowPriceAirline, airFareData.price, "ro")
ax9.grid()
ax9.set_title("lowPriceAirline vs price")
ax9.set_xlabel("lowPriceAirline")
ax9.set_ylabel("price")
ax10.plot(airFareData.marketShareLPA, airFareData.price, "ro")
ax10.grid()
ax10.set_title("marketShareLPA vs price")
ax10.set_xlabel("marketShareLPA")
ax10.set_ylabel("price")
plt.show()
# By looking at pearson's coefficient we can drop city1, city2, marketLeadingAirline, lowPriceAirline as they do not have any corelation with price.
# ### Selecting the required features and splitting the dataset using splitDataSetProcedure
# In[16]:
airFareData = airFareData[['avgFare', 'distance', 'avgWeeklyPassengers', 'marketShareLA',
'averageFare', 'marketShareLPA', 'price']]
airFareData.head()
# In[17]:
trainSet, testSet = splitDataSet(airFareData, 0.8)
print(trainSet.shape)
print(testSet.shape)
# In[18]:
trainSet.head()
# ### Running gradient descent with alpha parameter grid serach
# In[19]:
"""Setting beta constant as future comparasion will be easy"""
np.random.seed(8)
inputBeta = np.random.random_sample(7)
alpha_parameterGrid = [0.1, 1.7e-9, 1.17e-11]
X_train = trainSet.as_matrix(columns = ['avgFare', 'distance', 'avgWeeklyPassengers', 'marketShareLA',
'averageFare', 'marketShareLPA'])
X_test = testSet.as_matrix(columns = ['avgFare', 'distance', 'avgWeeklyPassengers', 'marketShareLA',
'averageFare', 'marketShareLPA'])
Y_train = trainSet['price']
Y_test = testSet['price']
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3,2,sharey='none')
axis = ((ax1, ax2), (ax3, ax4), (ax5, ax6))
index = 0
bestModelParameters = None
bestModelX = None
bestModelY = None
leastRMSE = None
leastRSS = None
for alpha in alpha_parameterGrid:
"""No step length controller provided so normal gradient descent will be executed"""
modelParameters, X, Ydiff, Yrmse, rss, rmse = linearRegressionGradientDescent(X_train, Y_train, X_test, Y_test,
alpha, inputBeta,
maxIterations = 1000)
"""Selecting the best model with least RMSE"""
if not(isnan(rmse)):
if leastRMSE is None or leastRMSE > rmse:
leastRMSE = rmse
bestModelParameters = modelParameters
leastRSS = rss
bestModelX = X
bestModelY = Yrmse
print("RMSE "+ str(rmse))
axis[index][0].plot(X, Ydiff)
axis[index][0].grid()
axis[index][0].set_title("Iteration vs abs(fx+1 - fx), alpha = " + str(alpha))
axis[index][0].set_xlabel("Iterations")
axis[index][0].set_ylabel("abs(fx+1 - fx)")
axis[index][1].plot(X, Yrmse)
axis[index][1].grid()
axis[index][1].set_title("Iteration vs RMSE, alpha = " + str(alpha))
axis[index][1].set_xlabel("Iterations")
axis[index][1].set_ylabel("RMSE")
index = index + 1
plt.show()
plt.close()
# ### Graph description
# <ul><li><b>Alpha = 0.1</b>
# <br>Here the alpha value is very big. Because of this instead of converging we are diverging away. Both abs(fx+1 - fx) and RMSE appear to be diverging.
# </li><li><b>Alpha = 1.7e-9</b><br>Here also the alpha value is too big. The observed effect is still the same</li><li><b>Alpha = 1.17e-11</b><br>Now alpha value is small enough for algorithm to converge. RMSE is also converging</li>
# ### Best model
# In[20]:
print("Best rmse for alpha grid is "+ str(leastRMSE))
print("Best rss for alpha grid is "+ str(leastRSS))
# ### Some sample predictions
# In[21]:
yPrediction = predictionLinearRegression(X_test,bestModelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Armijo Step Length Controller
# In[22]:
plt.close()
figure, ((ax1, ax2)) = plt.subplots(1, 2, sharey='none')
"""Set steplengthController to armijoStepLengthController and
stepLengthControllerParameters as any additional model parameters"""
modelParameters, xArmijo, ydiffArmijo, yRMSEArmijo, rss, rmse = linearRegressionGradientDescent(X_train, Y_train,
X_test, Y_test,
None, inputBeta, maxIterations = 1000,
stepLengthController = armijoStepLengthController,
stepLengthControllerParameters = {"delta":0.2})
figure.set_figheight(8)
print("RMSE "+ str(rmse))
ax1.plot(xArmijo, ydiffArmijo)
ax1.grid()
ax1.set_title("Iteration vs abs(fx+1 - fx)")
ax1.set_xlabel("Iterations")
ax1.set_ylabel("(fx+1 - fx)")
ax2.plot(xArmijo, yRMSEArmijo)
ax2.grid()
ax2.set_title("Iteration vs RMSE")
ax2.set_xlabel("Iterations")
ax2.set_ylabel("RMSE")
plt.show()
# ### Some sample predictions
# In[23]:
yPrediction = predictionLinearRegression(X_test,modelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Bold Driver Step Length Controller
# In[24]:
figure, ((ax1, ax2)) = plt.subplots(1, 2, sharey='none')
"""Set steplengthController to boldDriverStepLengthController and
stepLengthControllerParameters as any additional model parameters"""
modelParameters, xBold, yDiffBold, yRMSEBold, rss, rmse = linearRegressionGradientDescent(X_train, Y_train,
X_test, Y_test,
1.0, inputBeta, maxIterations = 1000,
stepLengthController = boldDriverStepLengthController,
stepLengthControllerParameters =
{"alphaMinus" : 0.9, "alphaPlus" : 1.5})
figure.set_figheight(8)
print("RMSE "+ str(rmse))
ax1.plot(xBold, yDiffBold)
ax1.grid()
ax1.set_title("Iteration vs abs(fx+1 - fx)")
ax1.set_xlabel("Iterations")
ax1.set_ylabel("(fx+1 - fx)")
ax2.plot(xBold, yRMSEBold)
ax2.grid()
ax2.set_title("Iteration vs RMSE")
ax2.set_xlabel("Iterations")
ax2.set_ylabel("RMSE")
plt.show()
# ### Sample predictions
# In[25]:
yPrediction = predictionLinearRegression(X_test,modelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Comparasion
# In[26]:
plt.close()
plt.figure(figsize=(9,7))
plt.plot(bestModelX, bestModelY, label = "Gradient Descent")
plt.plot(xArmijo, yRMSEArmijo, label = "Gradient Descent with Armijo step length controller")
plt.plot(xBold, yRMSEBold, label = "Gradient Descent with Bold driver length controller")
plt.grid()
plt.xlabel("Iteration")
plt.ylabel("RMSE")
plt.title("Comparasion of constant steplength and variable steplength with controller")
plt.legend()
plt.show()
# # Gradient descent for wine data
# ## Load data
# I am combining both red wine and white wine data in a single dataframe
# In[27]:
"""Load redwine data and add a new feature type
type = 0 => RedWine
type = 1 => WhiteWine """
tmpFrame = pd.read_csv(directoryPath+"/winequality-red.csv", sep=";")
tmpFrame['type'] = 0
wineData = tmpFrame
tmpFrame = pd.read_csv(directoryPath+"/winequality-white.csv", sep=";")
tmpFrame['type'] = 1
wineData = pd.concat([wineData, tmpFrame])
wineData.head()
# ## All data is numeric. Checking for NA data
# In[28]:
wineData.dropna(inplace = True)
wineData.head()
# ### Check for corelation between different X and Y
# #### For red wine
# In[29]:
redWine = wineData.loc[wineData['type'] == 0]
for column in redWine:
if column != "quality":
print("The corelation between " + column +" vs quality is " +
str(generatePearsonCoefficient(redWine[column], redWine['quality'])))
# #### For white wine
# In[30]:
whiteWine = wineData.loc[wineData['type'] == 1]
for column in whiteWine:
if column != "quality":
print("The corelation between " + column +" vs quality is " +
str(generatePearsonCoefficient(whiteWine[column], whiteWine['quality'])))
# #### Combined
# In[31]:
for column in wineData:
if column != "quality":
print("The corelation between " + column +" vs quality is " +
str(generatePearsonCoefficient(wineData[column], wineData['quality'])))
# ### Visualizing the data
# In[32]:
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6), (ax7, ax8), (ax9, ax10), (ax11, ax12)) = plt.subplots(6,2,
sharey='none')
figure.tight_layout()
figure.set_figheight(40)
ax1.plot(wineData['fixed acidity'], wineData.quality, "ro")
ax1.grid()
ax1.set_title("fixed acidity vs quality")
ax1.set_xlabel("fixed acidity")
ax1.set_ylabel("quality")
ax2.plot(wineData['volatile acidity'], wineData.quality, "ro")
ax2.grid()
ax2.set_title("volatile acidity vs quality")
ax2.set_xlabel("volatile acidity")
ax2.set_ylabel("quality")
ax3.plot(wineData['citric acid'], wineData.quality, "ro")
ax3.grid()
ax3.set_title("citric acid vs quality")
ax3.set_xlabel("citric acid")
ax3.set_ylabel("quality")
ax4.plot(wineData['residual sugar'], wineData.quality, "ro")
ax4.grid()
ax4.set_title("residual sugar vs quality")
ax4.set_xlabel("residual sugar")
ax4.set_ylabel("quality")
ax5.plot(wineData['chlorides'], wineData.quality, "ro")
ax5.grid()
ax5.set_title("chlorides vs quality")
ax5.set_xlabel("chlorides")
ax5.set_ylabel("quality")
ax6.plot(wineData['free sulfur dioxide'], wineData.quality, "ro")
ax6.grid()
ax6.set_title("free sulfur dioxide vs quality")
ax6.set_xlabel("free sulfur dioxide")
ax6.set_ylabel("quality")
ax7.plot(wineData['total sulfur dioxide'], wineData.quality, "ro")
ax7.grid()
ax7.set_title("total sulfur dioxide vs quality")
ax7.set_xlabel("total sulfur dioxide")
ax7.set_ylabel("quality")
ax8.plot(wineData['density'], wineData.quality, "ro")
ax8.grid()
ax8.set_title("density vs quality")
ax8.set_xlabel("density")
ax8.set_ylabel("quality")
ax9.plot(wineData['pH'], wineData.quality, "ro")
ax9.grid()
ax9.set_title("pH vs quality")
ax9.set_xlabel("pH")
ax9.set_ylabel("quality")
ax10.plot(wineData['sulphates'], wineData.quality, "ro")
ax10.grid()
ax10.set_title("sulphates vs quality")
ax10.set_xlabel("sulphates")
ax10.set_ylabel("quality")
ax11.plot(wineData['alcohol'], wineData.quality, "ro")
ax11.grid()
ax11.set_title("alcohol vs quality")
ax11.set_xlabel("alcohol")
ax11.set_ylabel("quality")
ax12.plot(wineData['type'], wineData.quality, "ro")
ax12.grid()
ax12.set_title("type vs quality")
ax12.set_xlabel("type")
ax12.set_ylabel("quality")
plt.show()
# Selected features are volatile acidity, chlorides, density, alcohol and type
# ### Split data into trainSet and testSet
# In[33]:
trainSet, testSet = splitDataSet(wineData, 0.8)
print(trainSet.shape)
print(testSet.shape)
# ### Gradient descent no step length controller
# In[34]:
np.random.seed(8)
inputBeta = np.random.random_sample(6)
alpha_parameterGrid = [0.1, 0.007, 1.34e-7]
X_train = trainSet.as_matrix(columns = ['volatile acidity', 'chlorides', 'density', 'alcohol','type'])
X_test = testSet.as_matrix(columns = ['volatile acidity', 'chlorides', 'density', 'alcohol','type'])
Y_train = trainSet['quality']
Y_test = testSet['quality']
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3,2,sharey='none')
axis = ((ax1, ax2), (ax3, ax4), (ax5, ax6))
index = 0
bestModelParameters = None
bestModelX = None
bestModelY = None
leastRMSE = None
leastRSS = None
for alpha in alpha_parameterGrid:
modelParameters, X, Ydiff, Yrmse, rss, rmse = linearRegressionGradientDescent(X_train, Y_train, X_test, Y_test,
alpha, inputBeta,
maxIterations = 1000)
if not(isnan(rmse)):
if leastRMSE is None or leastRMSE > rmse:
leastRMSE = rmse
bestModelParameters = modelParameters
leastRSS = rss
bestModelX = X
bestModelY = Yrmse
print("RMSE "+ str(rmse))
axis[index][0].plot(X, Ydiff)
axis[index][0].grid()
axis[index][0].set_title("Iteration vs abs(fx+1 - fx), alpha = " + str(alpha))
axis[index][0].set_xlabel("Iterations")
axis[index][0].set_ylabel("abs(fx+1 - fx)")
axis[index][1].plot(X, Yrmse)
axis[index][1].grid()
axis[index][1].set_title("Iteration vs RMSE, alpha = " + str(alpha))
axis[index][0].set_xlabel("Iterations")
axis[index][0].set_ylabel("RMSE")
index = index + 1
plt.show()
plt.close()
# ### Graph description
# <ul><li><b>Alpha = 0.1</b>
# <br>Here the alpha value is very big. Because of this instead of converging we are diverging away. Both abs(fx+1 - fx) and RMSE appear to be diverging.
# </li><li><b>Alpha = 0.007</b><br>Here also the alpha value is too big. The observed effect is still the same</li><li><b>Alpha = 1.34e-7</b>Now alpha value is small enough for algorithm to converge. RMSE is also converging</li>
# ### Best model
# In[35]:
print("Best rmse for alpha grid is "+ str(leastRMSE))
print("Best rss for alpha grid is "+ str(leastRSS))
# ### Sample Predictions
# In[36]:
yPrediction = predictionLinearRegression(X_test,bestModelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Armijo Step Length Controller
# In[37]:
figure, ((ax1, ax2)) = plt.subplots(1, 2, sharey='none')
modelParameters, xArmijo, ydiffArmijo, yRMSEArmijo, rss, rmse = linearRegressionGradientDescent(X_train, Y_train,
X_test, Y_test,
alpha, inputBeta,
maxIterations = 1000,
stepLengthController = armijoStepLengthController,
stepLengthControllerParameters = {"delta" : 0.2})
figure.set_figheight(8)
print("RMSE "+ str(rmse))
ax1.plot(xArmijo, ydiffArmijo)
ax1.grid()
ax1.set_title("Iteration vs abs(fx+1 - fx)")
ax1.set_xlabel("Iterations")
ax1.set_ylabel("(fx+1 - fx)")
ax2.plot(xArmijo, yRMSEArmijo)
ax2.grid()
ax2.set_title("Iteration vs RMSE")
ax2.set_xlabel("Iterations")
ax2.set_ylabel("RMSE")
plt.show()
# ### Sample predictions
# In[38]:
yPrediction = predictionLinearRegression(X_test,modelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Bold Driver Step Length Controller
# In[39]:
figure, ((ax1, ax2)) = plt.subplots(1, 2, sharey='none')
modelParameters, xBold, yDiffBold, yRMSEBold, rss, rmse = linearRegressionGradientDescent(X_train, Y_train,
X_test, Y_test,
1.0, inputBeta, maxIterations = 1000,
stepLengthController = boldDriverStepLengthController,
stepLengthControllerParameters =
{"alphaMinus" : 0.9, "alphaPlus" : 1.5})
figure.set_figheight(8)
print("RMSE "+ str(rmse))
ax1.plot(X, Ydiff)
ax1.grid()
ax1.set_title("Iteration vs abs(fx+1 - fx)")
ax1.set_xlabel("Iterations")
ax1.set_ylabel("(fx+1 - fx)")
ax2.plot(X, Yrmse)
ax2.grid()
ax2.set_title("Iteration vs RMSE")
ax2.set_xlabel("Iterations")
ax2.set_ylabel("RMSE")
plt.show()
# ### Sample predictions
# In[40]:
yPrediction = predictionLinearRegression(X_test,modelParameters)
df = pd.DataFrame({"Actual":Y_test, "Prediction":yPrediction})
df.head(25)
# ### Comparasion
# In[41]:
plt.close()
plt.figure(figsize=(9,7))
plt.plot(bestModelX, bestModelY, label = "Gradient Descent")
plt.plot(xArmijo, yRMSEArmijo, label = "Gradient Descent with Armijo step length controller")
plt.plot(xBold, yRMSEBold, label = "Gradient Descent with Bold driver length controller")
plt.grid()
plt.xlabel("Iteration")
plt.ylabel("RMSE")
plt.title("Comparasion of constant steplength and variable steplength with controller")
plt.legend()
plt.show()
| 29.834225 | 307 | 0.650511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10,662 | 0.382219 |
20ee1c58d70a8362de03cea2a5994ea2c4f42549 | 626 | py | Python | src/pagure/validators.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | src/pagure/validators.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | src/pagure/validators.py | yifengyou/learn-pagure | e54ba955368918c92ad2be6347b53bb2c24a228c | [
"Unlicense"
] | null | null | null | import re
import six
from wtforms import validators
class EmailValidator(object):
"""Validates wtform email field"""
def __init__(self, message="The field data was not an email"):
self._message = message
def __call__(self, form, email):
if not isinstance(email.data, six.text_type):
raise validators.ValidationError(
"Email fields should be of text type. Found {0}".format(
type(email.data)
)
)
elif not re.match(r"[^@]+@[^@]+\.[^@]+", email.data):
raise validators.ValidationError(self._message)
| 28.454545 | 72 | 0.591054 | 570 | 0.910543 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.217252 |
20ef0d8c7ddd5cae1764648c947491c9f4c7cb34 | 10,524 | py | Python | code/vendor/node_js2c.py | thorium-cfx/fivem | 587eb7c12066a2ebf8631bde7bb39ee2df1b5a0c | [
"MIT"
] | 5,411 | 2017-04-14T08:57:56.000Z | 2022-03-30T19:35:15.000Z | code/vendor/node_js2c.py | thorium-cfx/fivem | 587eb7c12066a2ebf8631bde7bb39ee2df1b5a0c | [
"MIT"
] | 802 | 2017-04-21T14:18:36.000Z | 2022-03-31T21:20:48.000Z | code/vendor/node_js2c.py | thorium-cfx/fivem | 587eb7c12066a2ebf8631bde7bb39ee2df1b5a0c | [
"MIT"
] | 2,011 | 2017-04-14T09:44:15.000Z | 2022-03-31T15:40:39.000Z | import os
import subprocess
import sys
inputs = [
'lib/assert/strict.js',
'lib/assert.js',
'lib/async_hooks.js',
'lib/buffer.js',
'lib/child_process.js',
'lib/cluster.js',
'lib/console.js',
'lib/constants.js',
'lib/crypto.js',
'lib/dgram.js',
'lib/diagnostics_channel.js',
'lib/dns/promises.js',
'lib/dns.js',
'lib/domain.js',
'lib/events.js',
'lib/fs/promises.js',
'lib/fs.js',
'lib/http.js',
'lib/http2.js',
'lib/https.js',
'lib/inspector.js',
'lib/internal/abort_controller.js',
'lib/internal/assert/assertion_error.js',
'lib/internal/assert/calltracker.js',
'lib/internal/assert.js',
'lib/internal/async_hooks.js',
'lib/internal/blob.js',
'lib/internal/blocklist.js',
'lib/internal/bootstrap/environment.js',
'lib/internal/bootstrap/loaders.js',
'lib/internal/bootstrap/node.js',
'lib/internal/bootstrap/pre_execution.js',
'lib/internal/bootstrap/switches/does_not_own_process_state.js',
'lib/internal/bootstrap/switches/does_own_process_state.js',
'lib/internal/bootstrap/switches/is_main_thread.js',
'lib/internal/bootstrap/switches/is_not_main_thread.js',
'lib/internal/buffer.js',
'lib/internal/child_process/serialization.js',
'lib/internal/child_process.js',
'lib/internal/cli_table.js',
'lib/internal/cluster/child.js',
'lib/internal/cluster/primary.js',
'lib/internal/cluster/round_robin_handle.js',
'lib/internal/cluster/shared_handle.js',
'lib/internal/cluster/utils.js',
'lib/internal/cluster/worker.js',
'lib/internal/console/constructor.js',
'lib/internal/console/global.js',
'lib/internal/constants.js',
'lib/internal/crypto/aes.js',
'lib/internal/crypto/certificate.js',
'lib/internal/crypto/cipher.js',
'lib/internal/crypto/diffiehellman.js',
'lib/internal/crypto/dsa.js',
'lib/internal/crypto/ec.js',
'lib/internal/crypto/hash.js',
'lib/internal/crypto/hashnames.js',
'lib/internal/crypto/hkdf.js',
'lib/internal/crypto/keygen.js',
'lib/internal/crypto/keys.js',
'lib/internal/crypto/mac.js',
'lib/internal/crypto/pbkdf2.js',
'lib/internal/crypto/random.js',
'lib/internal/crypto/rsa.js',
'lib/internal/crypto/scrypt.js',
'lib/internal/crypto/sig.js',
'lib/internal/crypto/util.js',
'lib/internal/crypto/webcrypto.js',
'lib/internal/crypto/x509.js',
'lib/internal/debugger/inspect.js',
'lib/internal/debugger/inspect_client.js',
'lib/internal/debugger/inspect_repl.js',
'lib/internal/dgram.js',
'lib/internal/dns/promises.js',
'lib/internal/dns/utils.js',
'lib/internal/dtrace.js',
'lib/internal/encoding.js',
'lib/internal/errors.js',
'lib/internal/error_serdes.js',
'lib/internal/event_target.js',
'lib/internal/fixed_queue.js',
'lib/internal/freelist.js',
'lib/internal/freeze_intrinsics.js',
'lib/internal/fs/cp/cp-sync.js',
'lib/internal/fs/cp/cp.js',
'lib/internal/fs/dir.js',
'lib/internal/fs/promises.js',
'lib/internal/fs/read_file_context.js',
'lib/internal/fs/rimraf.js',
'lib/internal/fs/streams.js',
'lib/internal/fs/sync_write_stream.js',
'lib/internal/fs/utils.js',
'lib/internal/fs/watchers.js',
'lib/internal/heap_utils.js',
'lib/internal/histogram.js',
'lib/internal/http.js',
'lib/internal/http2/compat.js',
'lib/internal/http2/core.js',
'lib/internal/http2/util.js',
'lib/internal/idna.js',
'lib/internal/inspector_async_hook.js',
'lib/internal/js_stream_socket.js',
'lib/internal/legacy/processbinding.js',
'lib/internal/linkedlist.js',
'lib/internal/main/check_syntax.js',
'lib/internal/main/eval_stdin.js',
'lib/internal/main/eval_string.js',
'lib/internal/main/inspect.js',
'lib/internal/main/print_help.js',
'lib/internal/main/prof_process.js',
'lib/internal/main/repl.js',
'lib/internal/main/run_main_module.js',
'lib/internal/main/worker_thread.js',
'lib/internal/modules/cjs/helpers.js',
'lib/internal/modules/cjs/loader.js',
'lib/internal/modules/esm/create_dynamic_module.js',
'lib/internal/modules/esm/get_format.js',
'lib/internal/modules/esm/get_source.js',
'lib/internal/modules/esm/loader.js',
'lib/internal/modules/esm/module_job.js',
'lib/internal/modules/esm/module_map.js',
'lib/internal/modules/esm/resolve.js',
'lib/internal/modules/esm/transform_source.js',
'lib/internal/modules/esm/translators.js',
'lib/internal/modules/package_json_reader.js',
'lib/internal/modules/run_main.js',
'lib/internal/net.js',
'lib/internal/options.js',
'lib/internal/perf/event_loop_delay.js',
'lib/internal/perf/event_loop_utilization.js',
'lib/internal/perf/nodetiming.js',
'lib/internal/perf/observe.js',
'lib/internal/perf/performance.js',
'lib/internal/perf/performance_entry.js',
'lib/internal/perf/timerify.js',
'lib/internal/perf/usertiming.js',
'lib/internal/perf/utils.js',
'lib/internal/per_context/domexception.js',
'lib/internal/per_context/messageport.js',
'lib/internal/per_context/primordials.js',
'lib/internal/policy/manifest.js',
'lib/internal/policy/sri.js',
'lib/internal/priority_queue.js',
'lib/internal/process/esm_loader.js',
'lib/internal/process/execution.js',
'lib/internal/process/per_thread.js',
'lib/internal/process/policy.js',
'lib/internal/process/promises.js',
'lib/internal/process/report.js',
'lib/internal/process/signal.js',
'lib/internal/process/task_queues.js',
'lib/internal/process/warning.js',
'lib/internal/process/worker_thread_only.js',
'lib/internal/querystring.js',
'lib/internal/readline/callbacks.js',
'lib/internal/readline/emitKeypressEvents.js',
'lib/internal/readline/utils.js',
'lib/internal/repl/await.js',
'lib/internal/repl/history.js',
'lib/internal/repl/utils.js',
'lib/internal/repl.js',
'lib/internal/socketaddress.js',
'lib/internal/socket_list.js',
'lib/internal/source_map/prepare_stack_trace.js',
'lib/internal/source_map/source_map.js',
'lib/internal/source_map/source_map_cache.js',
'lib/internal/streams/add-abort-signal.js',
'lib/internal/streams/buffer_list.js',
'lib/internal/streams/compose.js',
'lib/internal/streams/destroy.js',
'lib/internal/streams/duplex.js',
'lib/internal/streams/duplexify.js',
'lib/internal/streams/end-of-stream.js',
'lib/internal/streams/from.js',
'lib/internal/streams/lazy_transform.js',
'lib/internal/streams/legacy.js',
'lib/internal/streams/passthrough.js',
'lib/internal/streams/pipeline.js',
'lib/internal/streams/readable.js',
'lib/internal/streams/state.js',
'lib/internal/streams/transform.js',
'lib/internal/streams/utils.js',
'lib/internal/streams/writable.js',
'lib/internal/stream_base_commons.js',
'lib/internal/test/binding.js',
'lib/internal/test/transfer.js',
'lib/internal/timers.js',
'lib/internal/tls/parse-cert-string.js',
'lib/internal/tls/secure-context.js',
'lib/internal/tls/secure-pair.js',
'lib/internal/trace_events_async_hooks.js',
'lib/internal/tty.js',
'lib/internal/url.js',
'lib/internal/util/comparisons.js',
'lib/internal/util/debuglog.js',
'lib/internal/util/inspect.js',
'lib/internal/util/inspector.js',
'lib/internal/util/iterable_weak_map.js',
'lib/internal/util/types.js',
'lib/internal/util.js',
'lib/internal/v8_prof_polyfill.js',
'lib/internal/v8_prof_processor.js',
'lib/internal/validators.js',
'lib/internal/vm/module.js',
'lib/internal/watchdog.js',
'lib/internal/webstreams/encoding.js',
'lib/internal/webstreams/queuingstrategies.js',
'lib/internal/webstreams/readablestream.js',
'lib/internal/webstreams/transfer.js',
'lib/internal/webstreams/transformstream.js',
'lib/internal/webstreams/util.js',
'lib/internal/webstreams/writablestream.js',
'lib/internal/worker/io.js',
'lib/internal/worker/js_transferable.js',
'lib/internal/worker.js',
'lib/module.js',
'lib/net.js',
'lib/os.js',
'lib/path/posix.js',
'lib/path/win32.js',
'lib/path.js',
'lib/perf_hooks.js',
'lib/process.js',
'lib/punycode.js',
'lib/querystring.js',
'lib/readline.js',
'lib/repl.js',
'lib/stream/consumers.js',
'lib/stream/promises.js',
'lib/stream/web.js',
'lib/stream.js',
'lib/string_decoder.js',
'lib/sys.js',
'lib/timers/promises.js',
'lib/timers.js',
'lib/tls.js',
'lib/trace_events.js',
'lib/tty.js',
'lib/url.js',
'lib/util/types.js',
'lib/util.js',
'lib/v8.js',
'lib/vm.js',
'lib/wasi.js',
'lib/worker_threads.js',
'lib/zlib.js',
'lib/_http_agent.js',
'lib/_http_client.js',
'lib/_http_common.js',
'lib/_http_incoming.js',
'lib/_http_outgoing.js',
'lib/_http_server.js',
'lib/_stream_duplex.js',
'lib/_stream_passthrough.js',
'lib/_stream_readable.js',
'lib/_stream_transform.js',
'lib/_stream_wrap.js',
'lib/_stream_writable.js',
'lib/_tls_common.js',
'lib/_tls_wrap.js',
'deps/v8/tools/splaytree.mjs',
'deps/v8/tools/codemap.mjs',
'deps/v8/tools/consarray.mjs',
'deps/v8/tools/csvparser.mjs',
'deps/v8/tools/profile.mjs',
'deps/v8/tools/profile_view.mjs',
'deps/v8/tools/logreader.mjs',
'deps/v8/tools/arguments.mjs',
'deps/v8/tools/tickprocessor.mjs',
'deps/v8/tools/sourcemap.mjs',
'deps/v8/tools/tickprocessor-driver.mjs',
'deps/acorn/acorn/dist/acorn.js',
'deps/acorn/acorn-walk/dist/walk.js',
'deps/cjs-module-lexer/lexer.js',
'deps/cjs-module-lexer/dist/lexer.js',
'lib/_third_party_main.js',
'config.gypi',
]
deps = [
'deps/v8/tools/splaytree.mjs',
'deps/v8/tools/codemap.mjs',
'deps/v8/tools/consarray.mjs',
'deps/v8/tools/csvparser.mjs',
'deps/v8/tools/profile.mjs',
'deps/v8/tools/profile_view.mjs',
'deps/v8/tools/logreader.mjs',
'deps/v8/tools/arguments.mjs',
'deps/v8/tools/tickprocessor.mjs',
'deps/v8/tools/sourcemap.mjs',
'deps/v8/tools/tickprocessor-driver.mjs',
'deps/acorn/acorn/dist/acorn.js',
'deps/acorn/acorn-walk/dist/walk.js',
'deps/cjs-module-lexer/lexer.js',
'deps/cjs-module-lexer/dist/lexer.js',
]
noderoot = sys.argv[1]
mtimes = []
for inFile in deps:
mtimes = mtimes + [ os.path.getmtime(os.path.join(noderoot, inFile)) ]
mtimes = mtimes + [ os.path.getmtime(sys.argv[0]) ]
mtimes.sort()
mtimes.reverse()
minputs = []
for inFile in deps:
minputs = minputs + [ inFile.replace('/', os.path.sep) ]
outFile = os.path.join(noderoot, 'src/node_javascript.cc')
if not os.path.exists(outFile) or os.path.getmtime(outFile) < mtimes[0]:
subprocess.check_call([sys.executable, 'tools/js2c.py', '--directory', 'lib', '--target', 'src/node_javascript.cc', 'config.gypi'] + deps, cwd = noderoot)
| 32.481481 | 155 | 0.711612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,767 | 0.833048 |
45465c3ae216f2ba2ed2de99dd3ca07f116d36fd | 1,982 | py | Python | docs/util.py | simplebutneeded/python-measurement | b14f954fc74424ed3b1c9780e089cd91c19be45c | [
"MIT"
] | null | null | null | docs/util.py | simplebutneeded/python-measurement | b14f954fc74424ed3b1c9780e089cd91c19be45c | [
"MIT"
] | null | null | null | docs/util.py | simplebutneeded/python-measurement | b14f954fc74424ed3b1c9780e089cd91c19be45c | [
"MIT"
] | 2 | 2016-09-15T18:36:33.000Z | 2019-07-17T17:39:23.000Z | from __future__ import print_function
from measurement.base import MeasureBase, BidimensionalMeasure
from measurement.utils import get_all_measures
for measure in get_all_measures():
classname = measure.__name__
print(classname)
print('-' * len(classname))
print()
if issubclass(measure, MeasureBase):
units = measure.get_units()
aliases = measure.get_aliases()
print(
'* *Acceptable as Arguments or Attributes*: %s' % (
', '.join(sorted(['``%s``' % unit for unit in units]))
)
)
print(
'* *Acceptable as Arguments*: %s' % (
', '.join(sorted(['``%s``' % alias for alias in aliases]))
)
)
elif issubclass(measure, BidimensionalMeasure):
print(".. note::")
print(" This is a bi-dimensional measurement; bi-dimensional")
print(" measures are created by finding an appropriate unit in the")
print(" measure's primary measurement class, and an appropriate")
print(" in the measure's reference class, and using them as a")
print(" double-underscore-separated keyword argument (or, if")
print(" converting to another unit, as an attribute).")
print()
print(" For example, to create an object representing 24 miles-per")
print(" hour::")
print()
print(" >>> from measurement.measure import Speed")
print(" >>> my_speed = Speed(mile__hour=24)")
print(" >>> print my_speed")
print(" 24.0 mi/hr")
print(" >>> print my_speed.km__hr")
print(" 38.624256")
print()
print(
"* *Primary Measurement*: %s" % (
measure.PRIMARY_DIMENSION.__name__
)
)
print(
"* *Reference Measurement*: %s" % (
measure.REFERENCE_DIMENSION.__name__
)
)
print()
| 36.703704 | 78 | 0.55449 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 783 | 0.395055 |
45472a59e3945cb5ab443cb81e44f1ea2ad25391 | 3,006 | py | Python | gibson2/core/render/mesh_renderer/mesh_renderer_tensor.py | myalfred03/GibsonEnvV2 | 9dfc340d85e167983df7abfd7425d4ba78ab8524 | [
"MIT"
] | null | null | null | gibson2/core/render/mesh_renderer/mesh_renderer_tensor.py | myalfred03/GibsonEnvV2 | 9dfc340d85e167983df7abfd7425d4ba78ab8524 | [
"MIT"
] | null | null | null | gibson2/core/render/mesh_renderer/mesh_renderer_tensor.py | myalfred03/GibsonEnvV2 | 9dfc340d85e167983df7abfd7425d4ba78ab8524 | [
"MIT"
] | null | null | null | import cv2
import sys
import numpy as np
from gibson2.core.render.mesh_renderer.mesh_renderer_cpu import MeshRenderer, GL
import torch
from gibson2.core.render.mesh_renderer.get_available_devices import get_cuda_device
class MeshRendererG2G(MeshRenderer):
"""
Similar to MeshRenderer, but allows rendering to pytorch tensor, note that
pytorch installation is required.
"""
def __init__(self, width=512, height=512, fov=90, device_idx=0, use_fisheye=False):
super(MeshRendererG2G, self).__init__(width, height, fov, device_idx, use_fisheye)
self.cuda_idx = get_cuda_device(self.device_minor)
print("Using cuda device {}".format(self.cuda_idx))
with torch.cuda.device(self.cuda_idx):
self.image_tensor = torch.cuda.ByteTensor(height, width, 4).cuda()
self.normal_tensor = torch.cuda.ByteTensor(height, width, 4).cuda()
self.seg_tensor = torch.cuda.ByteTensor(height, width, 4).cuda()
self.pc_tensor = torch.cuda.FloatTensor(height, width, 4).cuda()
def readbuffer_to_tensor(self, modes=('rgb', 'normal', 'seg', '3d')):
results = []
with torch.cuda.device(self.cuda_idx):
if 'rgb' in modes:
self.r.map_tensor(int(self.color_tex_rgb), int(self.width), int(self.height),
self.image_tensor.data_ptr())
results.append(self.image_tensor.clone())
if 'normal' in modes:
self.r.map_tensor(int(self.color_tex_normal), int(self.width), int(self.height),
self.normal_tensor.data_ptr())
results.append(self.normal_tensor.clone())
if 'seg' in modes:
self.r.map_tensor(int(self.color_tex_semantics), int(self.width), int(self.height),
self.seg_tensor.data_ptr())
results.append(self.seg_tensor.clone())
if '3d' in modes:
self.r.map_tensor_float(int(self.color_tex_3d), int(self.width), int(self.height),
self.pc_tensor.data_ptr())
results.append(self.pc_tensor.clone())
return results
def render(self, modes=('rgb', 'normal', 'seg', '3d'), hidden=()):
"""
A function to render all the instances in the renderer and read the output from framebuffer into pytorch tensor.
:param modes: it should be a tuple consisting of a subset of ('rgb', 'normal', 'seg', '3d').
:param hidden: Hidden instances to skip. When rendering from a robot's perspective, it's own body can be
hidden
"""
GL.glClearColor(0, 0, 0, 1)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glEnable(GL.GL_DEPTH_TEST)
for instance in self.instances:
if not instance in hidden:
instance.render()
GL.glDisable(GL.GL_DEPTH_TEST)
return self.readbuffer_to_tensor(modes) | 47.714286 | 120 | 0.625083 | 2,786 | 0.926813 | 0 | 0 | 0 | 0 | 0 | 0 | 587 | 0.195276 |
4547e6bcdb29f652198235c9fc8271e10d92d051 | 117 | py | Python | BasicConcepts/SyntaxErrors/Volume1_Chapter6_SyntaxErrors_Fixed.py | jpike/PythonProgrammingForKids | 79a36d6db525d39f78e33b6f7b2d0da0d65a073c | [
"Unlicense"
] | null | null | null | BasicConcepts/SyntaxErrors/Volume1_Chapter6_SyntaxErrors_Fixed.py | jpike/PythonProgrammingForKids | 79a36d6db525d39f78e33b6f7b2d0da0d65a073c | [
"Unlicense"
] | null | null | null | BasicConcepts/SyntaxErrors/Volume1_Chapter6_SyntaxErrors_Fixed.py | jpike/PythonProgrammingForKids | 79a36d6db525d39f78e33b6f7b2d0da0d65a073c | [
"Unlicense"
] | null | null | null | # Commenting syntax errors.
print("Printing out a message!") # This is a comment.
floating_point_number = 1.5 * 2.7
| 29.25 | 54 | 0.726496 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.615385 |
454862ed0335535e0622f4cd4d4b3040600274f8 | 254 | py | Python | setup.py | piotrmaslanka/python-cassandra-jaeger | 961a33955e54a84348d97a5e0b0985a3850ee1ad | [
"MIT"
] | null | null | null | setup.py | piotrmaslanka/python-cassandra-jaeger | 961a33955e54a84348d97a5e0b0985a3850ee1ad | [
"MIT"
] | null | null | null | setup.py | piotrmaslanka/python-cassandra-jaeger | 961a33955e54a84348d97a5e0b0985a3850ee1ad | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from distutils.core import setup
from setuptools import find_packages
import python_cassandra_jaeger
setup(version=python_cassandra_jaeger.__version__,
packages=find_packages(include=['python_cassandra_jaeger']),
)
| 19.538462 | 66 | 0.795276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 46 | 0.181102 |
4549bd855bffd1eea443aa41d7d9077471e5b107 | 6,544 | py | Python | demultiplexer/demultiplexing.py | DominikBuchner/demultiplexer | f707bf4deaff9929a5778a5a4501966e8fd5a0b8 | [
"MIT"
] | null | null | null | demultiplexer/demultiplexing.py | DominikBuchner/demultiplexer | f707bf4deaff9929a5778a5a4501966e8fd5a0b8 | [
"MIT"
] | 1 | 2021-01-05T14:28:26.000Z | 2021-01-05T14:28:26.000Z | demultiplexer/demultiplexing.py | DominikBuchner/demultiplexer | f707bf4deaff9929a5778a5a4501966e8fd5a0b8 | [
"MIT"
] | null | null | null | import openpyxl, gzip, datetime, psutil, time
from Bio.Data.IUPACData import ambiguous_dna_values
from itertools import product
from Bio.SeqIO.QualityIO import FastqGeneralIterator
from pathlib import Path
from joblib import Parallel, delayed
def extend_ambiguous_dna(seq):
"""return list of all possible sequences given an ambiguous DNA input"""
d = ambiguous_dna_values
return list(map("".join, product(*map(d.get, seq))))
## extends a tag to site dict in the form of
## combines all possible tags that can be created when taking ambiguous bases into account
## {(fwd_tag, rev_tag): site}
def extend_tag_to_site_dict(dict):
tag_to_site = {}
for key in dict.keys():
forward = extend_ambiguous_dna(key[0])
reverse = extend_ambiguous_dna(key[1])
combinations = list(product(forward, reverse))
for combination in combinations:
tag_to_site[combination] = dict[key]
return tag_to_site
## the core demultiplexing function. only processes one line of the tagging scheme
## for easy handling of multiprocessing later on
## primerset is the set generated by main
## tagging_scheme_line is one row of the tagging scheme file
def demultiplex(primerset, tagging_scheme_header ,tagging_scheme_line, output_folder, tag_removal):
## create a dictionary for the in_handles which are the two files to demultiplex
in_handles = {'fwd': FastqGeneralIterator(gzip.open(Path(tagging_scheme_line[0]), 'rt')),
'rev': FastqGeneralIterator(gzip.open(Path(tagging_scheme_line[1]), 'rt'))}
## read the combinations from the header, replace name with sequence
combinations = [tuple(cell.split(' - ')) for cell in tagging_scheme_header[4:]]
combinations = [(primerset[primername[0]], primerset[primername[1]]) for primername in combinations]
## connect combinations and sites if site was used
tag_to_site = {tag: site for tag, site in zip(combinations, tagging_scheme_line[4:]) if site}
## blow up tag to site dict by taking ambiguities into account
## will slow down the algorithm a lot so its a good idea not to have
## ambiguous bases in the tag
tag_to_site = extend_tag_to_site_dict(tag_to_site)
## generate all output handles, add the nomatch handle last where everything
## that does not match a tag is written to
out_handles = {}
for sample in tagging_scheme_line[4:]:
if sample:
fwd_path = Path(output_folder).joinpath('{}_r1.fastq.gz'.format(sample))
rev_path = Path(output_folder).joinpath('{}_r2.fastq.gz'.format(sample))
out_handles[sample] = (gzip.open(fwd_path, 'wt'), gzip.open(rev_path, 'wt'))
nomatch_fwd = Path(output_folder).joinpath('no_match_{}_r1.fastq.gz'.format(Path(tagging_scheme_line[2]).with_suffix('').with_suffix('')))
nomatch_rev = Path(output_folder).joinpath('no_match_{}_r2.fastq.gz'.format(Path(tagging_scheme_line[3]).with_suffix('').with_suffix('')))
out_handles['nomatch'] = (gzip.open(nomatch_fwd, 'wt'), gzip.open(nomatch_rev, 'wt'))
## core demultiplexing code. checks all lines of the input file against all sequence combinations
## selects the corresponding output file and add the line to it
## counter count how many reads are processed for output
## optinal tag cutting removes the tag within the demultiplexing step
count = 0
for (title_f, seq_f, qual_f), (title_r, seq_r, qual_r) in zip(in_handles['fwd'], in_handles['rev']):
## handle nomatches only after all combinations where checked
no_match = False
## check all combinations for a match
for combination in tag_to_site.keys():
if seq_f.startswith(combination[0]) and seq_r.startswith(combination[1]):
## tag removal code is only required if a tag is found, otherwise there is nothing to cut off
if tag_removal:
fwd_tag_len, rev_tag_len = len(combination[0]), len(combination[1])
else:
fwd_tag_len, rev_tag_len = 0, 0
## write output, optinal removal of tags
out_handles[tag_to_site[combination]][0].write('@{}\n{}\n+\n{}\n'.format(title_f, seq_f[fwd_tag_len:], qual_f[fwd_tag_len:]))
out_handles[tag_to_site[combination]][1].write('@{}\n{}\n+\n{}\n'.format(title_r, seq_r[rev_tag_len:], qual_r[rev_tag_len:]))
count += 1
break
else:
no_match = True
## append to nomatch if tag cannot be found
if no_match:
out_handles['nomatch'][0].write('@{}\n{}\n+\n{}\n'.format(title_f, seq_f, qual_f))
out_handles['nomatch'][1].write('@{}\n{}\n+\n{}\n'.format(title_r, seq_r, qual_r))
count += 1
## close output files when done with demutliplexing
for sample in out_handles.keys():
out_handles[sample][0].close()
out_handles[sample][1].close()
## show user output STILL TO CHANGE
print('{}: {} - {}: {} reads demultiplexed.'.format(datetime.datetime.now().strftime("%H:%M:%S"), tagging_scheme_line[2], tagging_scheme_line[3], count))
## main function to handle the control of the demutliplexing process
## accepts a primerset, tagging scheme, output folder from the main script
## also an optimal removal of the tags as well as a print handle for pretty output
def main(primerset, tagging_scheme, output_folder, tag_removal, print_handle, window):
## creates a dict where primer names are associated with the corresponding sequence
primerset = {line.split(',')[0]: line.split(',')[1] for line in open(primerset, 'r')}
## load the tagging scheme
wb = openpyxl.load_workbook(tagging_scheme)
ws = wb.active
## collect all rows from the tagging scheme
rows = [[cell.value for cell in row] for row in ws.iter_rows()]
## compute physical cores - 1 to use for demutliplexing
cores_to_use = psutil.cpu_count(logical = False) - 1
## run the demultiplex function on every line in the tagging scheme in parallel
print_handle.print('{}: Starting to demultiplex {} file pairs. Output will be routed to the terminal. The window will freeze during this process'.format(datetime.datetime.now().strftime("%H:%M:%S"), len(rows) - 1))
window.Refresh()
Parallel(n_jobs = cores_to_use)(delayed(demultiplex)(primerset, rows[0], rows[i], output_folder, tag_removal) for i in range(1, len(rows)))
print_handle.print('{}: Done'.format(datetime.datetime.now().strftime("%H:%M:%S")))
| 49.203008 | 218 | 0.687347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,549 | 0.389517 |
454e58f16798ed0377b73359904d93ec690b6f7f | 1,908 | py | Python | test.py | rapidrabbit76/PaintsTensorFlow | a26de880dc43c915f3552b58e01a4314847fd58a | [
"MIT"
] | 63 | 2019-01-03T02:00:33.000Z | 2022-03-23T03:56:24.000Z | test.py | rapidrabbit76/PaintsTensorFlow | a26de880dc43c915f3552b58e01a4314847fd58a | [
"MIT"
] | 24 | 2019-02-13T09:04:19.000Z | 2022-03-26T16:57:07.000Z | test.py | rapidrabbit76/PaintsTensorFlow | a26de880dc43c915f3552b58e01a4314847fd58a | [
"MIT"
] | 10 | 2019-01-14T13:11:52.000Z | 2021-07-07T13:17:59.000Z | import tensorflow as tf
import numpy as np
import cv2
from dataset.Datasets import Datasets_512
import utils
__DRAFT_MODEL_PATH__ = "./GUI/src/saved_model/PaintsTensorFlowDraftModel"
__MODEL_PATH__ = "./GUI//src/saved_model/PaintsTensorFlowModel"
class PaintsTensorflowTest:
def __init__(self, log=False):
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.__pre_generator = tf.contrib.saved_model.load_keras_model(__DRAFT_MODEL_PATH__)
self.__generator = tf.contrib.saved_model.load_keras_model(__MODEL_PATH__)
if log: tf.summary.FileWriter("./log", tf.Session().graph)
def test(self, itr=100, zero_hint=False):
sess = self.sess
dataSets = Datasets_512(batch_size=1)
train_sets, test_sets = dataSets.buildDataSets()
train_sets = train_sets.make_initializable_iterator()
sess.run(train_sets.initializer)
train_next = train_sets.get_next()
for _ in range(itr):
line_128, hint_128, image, line, _ = sess.run(train_next)
hint = np.ones_like(hint_128)
hint += 1
if zero_hint:
draft = self.__pre_generator.predict([line_128, hint])
else:
draft = self.__pre_generator.predict([line_128, hint_128])
draft = tf.image.resize_images(draft, size=(512, 512),
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
draft = sess.run(draft)
outputs = self.__generator.predict([line, draft])
outputs = np.concatenate([outputs, image], 2)
outputs = utils.convert2uint8(outputs)[0]
cv2.imshow("", cv2.cvtColor(outputs, cv2.COLOR_RGB2BGR))
cv2.waitKey(0)
if __name__ == '__main__':
test = PaintsTensorflowTest()
test.test(3)
| 34.071429 | 92 | 0.645178 | 1,577 | 0.82652 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.060273 |
454f908f7e41a06c5a4c9549749b050cd2e1d924 | 8,027 | py | Python | ipython/attachments/Weave/iterators_example.py | cassiasamp/scipy-cookbook | 67c120be33302554edfd7fe7962f3e2773109021 | [
"BSD-3-Clause"
] | 408 | 2016-05-26T04:17:59.000Z | 2022-03-18T09:19:59.000Z | ipython/attachments/Weave/iterators_example.py | cassiasamp/scipy-cookbook | 67c120be33302554edfd7fe7962f3e2773109021 | [
"BSD-3-Clause"
] | 25 | 2016-08-28T22:20:53.000Z | 2021-11-08T16:37:00.000Z | ipython/attachments/Weave/iterators_example.py | cassiasamp/scipy-cookbook | 67c120be33302554edfd7fe7962f3e2773109021 | [
"BSD-3-Clause"
] | 185 | 2016-06-05T03:27:49.000Z | 2022-01-28T21:14:02.000Z | #!/usr/bin/env python
import sys
import numpy as npy
import pylab as P
from scipy.weave import inline, converters, blitz
from scipy.testing import measure
# Blitz conversion is terrific, but sometimes you don't have fixed array sizes
# in your problem. Fortunately numpy iterators still make writing inline
# weave code very, very simple.
def multi_iter_example():
# This is a very simple example of multi dimensional iterators, and
# their power to "broadcast" arrays of compatible shapes. It shows that
# the very same code that is entirely ignorant of dimensionality can
# achieve completely different computations based on the rules of
# broadcasting.
# it is important to know that the weave array conversion of "a"
# gives you access in C++ to:
# py_a -- PyObject *
# a_array -- PyArrayObject *
# a -- py_array->data
a = npy.ones((4,4), npy.float64)
# for the sake of driving home the "dynamic code" approach...
dtype2ctype = {
npy.dtype(npy.float64): 'double',
npy.dtype(npy.float32): 'float',
npy.dtype(npy.int32): 'int',
npy.dtype(npy.int16): 'short',
}
dt = dtype2ctype.get(a.dtype)
# this code does a = a*b inplace, broadcasting b to fit the shape of a
code = \
"""
%s *p1, *p2;
PyObject *itr;
itr = PyArray_MultiIterNew(2, a_array, b_array);
while(PyArray_MultiIter_NOTDONE(itr)) {
p1 = (%s *) PyArray_MultiIter_DATA(itr, 0);
p2 = (%s *) PyArray_MultiIter_DATA(itr, 1);
*p1 = (*p1) * (*p2);
PyArray_MultiIter_NEXT(itr);
}
""" % (dt, dt, dt)
b = npy.arange(4, dtype=a.dtype)
print '\n A B '
print a, b
# this reshaping is redundant, it would be the default broadcast
b.shape = (1,4)
inline(code, ['a', 'b'])
print "\ninline version of a*b[None,:],"
print a
a = npy.ones((4,4), npy.float64)
b = npy.arange(4, dtype=a.dtype)
b.shape = (4,1)
inline(code, ['a', 'b'])
print "\ninline version of a*b[:,None],"
print a
def data_casting_test():
# In my MR application, raw data is stored as a file with one or more
# (block-hdr, block-data) pairs. Block data is one or more
# rows of Npt complex samples in big-endian integer pairs (real, imag).
#
# At the block level, I encounter three different raw data layouts--
# 1) one plane, or slice: Y rows by 2*Npt samples
# 2) one volume: Z slices * Y rows by 2*Npt samples
# 3) one row sliced across the z-axis: Z slices by 2*Npt samples
#
# The task is to tease out one volume at a time from any given layout,
# and cast the integer precision data into a complex64 array.
# Given that contiguity is not guaranteed, and the number of dimensions
# can vary, Numpy iterators are useful to provide a single code that can
# carry out the conversion.
#
# Other solutions include:
# 1) working entirely with the string data from file.read() with string
# manipulations (simulated below).
# 2) letting numpy handle automatic byteorder/dtype conversion
nsl, nline, npt = (20,64,64)
hdr_dt = npy.dtype('>V28')
# example 1: a block is one slice of complex samples in short integer pairs
blk_dt1 = npy.dtype(('>i2', nline*npt*2))
dat_dt = npy.dtype({'names': ['hdr', 'data'], 'formats': [hdr_dt, blk_dt1]})
# create an empty volume-- nsl contiguous blocks
vol = npy.empty((nsl,), dat_dt)
t = time_casting(vol[:]['data'])
P.plot(100*t/t.max(), 'b--', label='vol=20 contiguous blocks')
P.plot(100*t/t.max(), 'bo')
# example 2: a block is one entire volume
blk_dt2 = npy.dtype(('>i2', nsl*nline*npt*2))
dat_dt = npy.dtype({'names': ['hdr', 'data'], 'formats': [hdr_dt, blk_dt2]})
# create an empty volume-- 1 block
vol = npy.empty((1,), dat_dt)
t = time_casting(vol[0]['data'])
P.plot(100*t/t.max(), 'g--', label='vol=1 contiguous block')
P.plot(100*t/t.max(), 'go')
# example 3: a block slices across the z dimension, long integer precision
# ALSO--a given volume is sliced discontiguously
blk_dt3 = npy.dtype(('>i4', nsl*npt*2))
dat_dt = npy.dtype({'names': ['hdr', 'data'], 'formats': [hdr_dt, blk_dt3]})
# a real data set has volumes interleaved, so create two volumes here
vols = npy.empty((2*nline,), dat_dt)
# and work on casting the first volume
t = time_casting(vols[0::2]['data'])
P.plot(100*t/t.max(), 'r--', label='vol=64 discontiguous blocks')
P.plot(100*t/t.max(), 'ro')
P.xticks([0,1,2], ('strings', 'numpy auto', 'inline'))
P.gca().set_xlim((-0.25, 2.25))
P.gca().set_ylim((0, 110))
P.gca().set_ylabel(r"% of slowest time")
P.legend(loc=8)
P.title('Casting raw file data to an MR volume')
P.show()
def time_casting(int_data):
nblk = 1 if len(int_data.shape) < 2 else int_data.shape[0]
bias = (npy.random.rand(nblk) + \
1j*npy.random.rand(nblk)).astype(npy.complex64)
dstr = int_data.tostring()
dt = npy.int16 if int_data.dtype.itemsize == 2 else npy.int32
fshape = list(int_data.shape)
fshape[-1] = fshape[-1]/2
float_data = npy.empty(fshape, npy.complex64)
# method 1: string conversion
float_data.shape = (npy.product(fshape),)
tstr = measure("float_data[:] = complex_fromstring(dstr, dt)", times=25)
float_data.shape = fshape
print "to-/from- string: ", tstr, "shape=",float_data.shape
# method 2: numpy dtype magic
sl = [None, slice(None)] if len(fshape)<2 else [slice(None)]*len(fshape)
# need to loop since int_data need not be contiguous
tnpy = measure("""
for fline, iline, b in zip(float_data[sl], int_data[sl], bias):
cast_to_complex_npy(fline, iline, bias=b)""", times=25)
print"numpy automagic: ", tnpy
# method 3: plain inline brute force!
twv = measure("cast_to_complex(float_data, int_data, bias=bias)",
times=25)
print"inline casting: ", twv
return npy.array([tstr, tnpy, twv], npy.float64)
def complex_fromstring(data, numtype):
if sys.byteorder == "little":
return npy.fromstring(
npy.fromstring(data,numtype).byteswap().astype(npy.float32).tostring(),
npy.complex64)
else:
return npy.fromstring(
npy.fromstring(data,numtype).astype(npy.float32).tostring(),
npy.complex64)
def cast_to_complex(cplx_float, cplx_integer, bias=None):
if cplx_integer.dtype.itemsize == 4:
replacements = tuple(["l", "long", "SWAPLONG", "l"]*2)
else:
replacements = tuple(["s", "short", "SWAPSHORT", "s"]*2)
if sys.byteorder == "big":
replacements[-2] = replacements[-6] = "NOP"
cast_code = """
#define SWAPSHORT(x) ((short) ((x >> 8) | (x << 8)) )
#define SWAPLONG(x) ((long) ((x >> 24) | (x << 24) | ((x & 0x00ff0000) >> 8) | ((x & 0x0000ff00) << 8)) )
#define NOP(x) x
unsigned short *s;
unsigned long *l;
float repart, impart;
PyObject *itr;
itr = PyArray_IterNew(py_cplx_integer);
while(PyArray_ITER_NOTDONE(itr)) {
// get real part
%s = (unsigned %s *) PyArray_ITER_DATA(itr);
repart = %s(*%s);
PyArray_ITER_NEXT(itr);
// get imag part
%s = (unsigned %s *) PyArray_ITER_DATA(itr);
impart = %s(*%s);
PyArray_ITER_NEXT(itr);
*(cplx_float++) = std::complex<float>(repart, impart);
}
""" % replacements
inline(cast_code, ['cplx_float', 'cplx_integer'])
if bias is not None:
if len(cplx_float.shape) > 1:
bsl = [slice(None)]*(len(cplx_float.shape)-1) + [None]
else:
bsl = slice(None)
npy.subtract(cplx_float, bias[bsl], cplx_float)
def cast_to_complex_npy(cplx_float, cplx_integer, bias=None):
cplx_float.real[:] = cplx_integer[0::2]
cplx_float.imag[:] = cplx_integer[1::2]
if bias is not None:
npy.subtract(cplx_float, bias, cplx_float)
if __name__=="__main__":
data_casting_test()
multi_iter_example()
| 38.22381 | 109 | 0.628005 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,161 | 0.518375 |
4550a583d75dd1b434a2910f8dcc72c96ed5f8f1 | 16,579 | py | Python | object_detection2/data/transforms/autoaugment.py | vghost2008/wml | d0c5a1da6c228e321ae59a563e9ac84aa66266ff | [
"MIT"
] | 6 | 2019-12-10T17:18:56.000Z | 2022-03-01T01:00:35.000Z | object_detection2/data/transforms/autoaugment.py | vghost2008/wml | d0c5a1da6c228e321ae59a563e9ac84aa66266ff | [
"MIT"
] | 2 | 2021-08-25T16:16:01.000Z | 2022-02-10T05:21:19.000Z | object_detection2/data/transforms/autoaugment.py | vghost2008/wml | d0c5a1da6c228e321ae59a563e9ac84aa66266ff | [
"MIT"
] | 2 | 2019-12-07T09:57:35.000Z | 2021-09-06T04:58:10.000Z | #coding=utf-8
import tensorflow as tf
import tfop
contrib_image = tf.contrib.image
def blend(image1, image2, factor):
"""Blend image1 and image2 using 'factor'.
Factor can be above 0.0. A value of 0.0 means only image1 is used.
A value of 1.0 means only image2 is used. A value between 0.0 and
1.0 means we linearly interpolate the pixel values between the two
images. A value greater than 1.0 "extrapolates" the difference
between the two pixel values, and we clip the results to values
between 0 and 255.
Args:
image1: An image Tensor of type uint8.
image2: An image Tensor of type uint8.
factor: A floating point value above 0.0.
Returns:
A blended image Tensor of type uint8.
"""
dtype = image1.dtype
if factor == 0.0:
return tf.convert_to_tensor(image1)
if factor == 1.0:
return tf.convert_to_tensor(image2)
image1 = tf.to_float(image1)
image2 = tf.to_float(image2)
difference = image2 - image1
scaled = factor * difference
# Do addition in float.
temp = tf.to_float(image1) + scaled
# Interpolate
if factor > 0.0 and factor < 1.0:
# Interpolation means we always stay within 0 and 255.
return tf.cast(temp, tf.uint8)
# Extrapolate:
#
# We need to clip and then cast.
return tf.cast(tf.clip_by_value(temp, 0.0, 255.0), dtype)
def color(image, factor):
"""Equivalent of PIL Color."""
degenerate = tf.image.grayscale_to_rgb(tf.image.rgb_to_grayscale(image))
return blend(degenerate, image, factor)
def unwrap(image, replace):
"""Unwraps an image produced by wrap.
Where there is a 0 in the last channel for every spatial position,
the rest of the three channels in that spatial dimension are grayed
(set to 128). Operations like translate and shear on a wrapped
Tensor will leave 0s in empty locations. Some transformations look
at the intensity of values to do preprocessing, and we want these
empty pixels to assume the 'average' value, rather than pure black.
Args:
image: A 3D Image Tensor with 4 channels.
replace: A one or three value 1D tensor to fill empty pixels.
Returns:
image: A 3D image Tensor with 3 channels.
"""
image_shape = tf.shape(image)
# Flatten the spatial dimensions.
flattened_image = tf.reshape(image, [-1, image_shape[2]])
# Find all pixels where the last channel is zero.
alpha_channel = flattened_image[:, -1]
replace = tf.concat([replace, tf.ones([1], image.dtype)], 0)
# Where they are zero, fill them in with 'replace'.
flattened_image = tf.where(
tf.equal(alpha_channel, 0),
tf.ones_like(flattened_image, dtype=image.dtype) * replace,
flattened_image)
image = tf.reshape(flattened_image, image_shape)
image = tf.slice(image, [0, 0, 0], [image_shape[0], image_shape[1], 3])
return image
def wrap(image):
"""Returns 'image' with an extra channel set to all 1s."""
shape = tf.shape(image)
extended_channel = tf.ones([shape[0], shape[1], 1], image.dtype)
extended = tf.concat([image, extended_channel], 2)
return extended
def translate_x(image, pixels, replace):
"""Equivalent of PIL Translate in X dimension."""
if image is None:
return None
image = contrib_image.translate(wrap(image), [-pixels, 0])
return unwrap(image, replace)
def translate_y(image, pixels, replace):
"""Equivalent of PIL Translate in Y dimension."""
if image is None:
return None
image = contrib_image.translate(wrap(image), [0, -pixels])
return unwrap(image, replace)
def _clip_bbox(min_y, min_x, max_y, max_x):
"""Clip bounding box coordinates between 0 and 1.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
Returns:
Clipped coordinate values between 0 and 1.
"""
min_y = tf.clip_by_value(min_y, 0.0, 1.0)
min_x = tf.clip_by_value(min_x, 0.0, 1.0)
max_y = tf.clip_by_value(max_y, 0.0, 1.0)
max_x = tf.clip_by_value(max_x, 0.0, 1.0)
return min_y, min_x, max_y, max_x
def _check_bbox_area(min_y, min_x, max_y, max_x, delta=0.05):
"""Adjusts bbox coordinates to make sure the area is > 0.
Args:
min_y: Normalized bbox coordinate of type float between 0 and 1.
min_x: Normalized bbox coordinate of type float between 0 and 1.
max_y: Normalized bbox coordinate of type float between 0 and 1.
max_x: Normalized bbox coordinate of type float between 0 and 1.
delta: Float, this is used to create a gap of size 2 * delta between
bbox min/max coordinates that are the same on the boundary.
This prevents the bbox from having an area of zero.
Returns:
Tuple of new bbox coordinates between 0 and 1 that will now have a
guaranteed area > 0.
"""
height = max_y - min_y
width = max_x - min_x
def _adjust_bbox_boundaries(min_coord, max_coord):
# Make sure max is never 0 and min is never 1.
max_coord = tf.maximum(max_coord, 0.0 + delta)
min_coord = tf.minimum(min_coord, 1.0 - delta)
return min_coord, max_coord
min_y, max_y = tf.cond(tf.equal(height, 0.0),
lambda: _adjust_bbox_boundaries(min_y, max_y),
lambda: (min_y, max_y))
min_x, max_x = tf.cond(tf.equal(width, 0.0),
lambda: _adjust_bbox_boundaries(min_x, max_x),
lambda: (min_x, max_x))
return min_y, min_x, max_y, max_x
def _shift_bbox(bbox, image_height, image_width, pixels, shift_horizontal):
"""Shifts the bbox coordinates by pixels.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x)
of type float that represents the normalized coordinates between 0 and 1.
image_height: Int, height of the image.
image_width: Int, width of the image.
pixels: An int. How many pixels to shift the bbox.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
pixels = tf.to_int32(pixels)
# Convert bbox to integer pixel locations.
min_y = tf.to_int32(tf.to_float(image_height) * bbox[0])
min_x = tf.to_int32(tf.to_float(image_width) * bbox[1])
max_y = tf.to_int32(tf.to_float(image_height) * bbox[2])
max_x = tf.to_int32(tf.to_float(image_width) * bbox[3])
if shift_horizontal:
min_x = tf.maximum(0, min_x - pixels)
max_x = tf.minimum(image_width, max_x - pixels)
else:
min_y = tf.maximum(0, min_y - pixels)
max_y = tf.minimum(image_height, max_y - pixels)
# Convert bbox back to floats.
min_y = tf.to_float(min_y) / tf.to_float(image_height)
min_x = tf.to_float(min_x) / tf.to_float(image_width)
max_y = tf.to_float(max_y) / tf.to_float(image_height)
max_x = tf.to_float(max_x) / tf.to_float(image_width)
# Clip the bboxes to be sure the fall between [0, 1].
min_y, min_x, max_y, max_x = _clip_bbox(min_y, min_x, max_y, max_x)
min_y, min_x, max_y, max_x = _check_bbox_area(min_y, min_x, max_y, max_x)
return tf.stack([min_y, min_x, max_y, max_x])
def translate_bbox(image, mask,bboxes, pixels, replace, shift_horizontal):
"""Equivalent of PIL Translate in X/Y dimension that shifts image and bbox.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
pixels: An int. How many pixels to shift the image and bboxes
replace: A one or three value 1D tensor to fill empty pixels.
shift_horizontal: Boolean. If true then shift in X dimension else shift in
Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of translating
image by pixels. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the shifted image.
"""
if shift_horizontal:
image = translate_x(image, pixels, replace)
mask = translate_x(mask, pixels, 0)
else:
image = translate_y(image, pixels, replace)
mask = translate_y(mask, pixels, 0)
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shift_bbox = lambda bbox: _shift_bbox(
bbox, image_height, image_width, pixels, shift_horizontal)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_shift_bbox, bboxes)
return image, mask,bboxes
def equalize(image):
"""Implements Equalize function from PIL using TF ops."""
dtype = image.dtype
def scale_channel(im, c):
"""Scale the data in the channel to implement equalize."""
im = tf.cast(im[:, :, c], tf.int32)
# Compute the histogram of the image channel.
histo = tf.histogram_fixed_width(im, [0, 255], nbins=256)
# For the purposes of computing the step, filter out the nonzeros.
nonzero = tf.where(tf.not_equal(histo, 0))
nonzero_histo = tf.reshape(tf.gather(histo, nonzero), [-1])
step = (tf.reduce_sum(nonzero_histo) - nonzero_histo[-1]) // 255
def build_lut(histo, step):
# Compute the cumulative sum, shifting by step // 2
# and then normalization by step.
lut = (tf.cumsum(histo) + (step // 2)) // step
# Shift lut, prepending with 0.
lut = tf.concat([[0], lut[:-1]], 0)
# Clip the counts to be in range. This is done
# in the C code for image.point.
return tf.clip_by_value(lut, 0, 255)
# If step is zero, return the original image. Otherwise, build
# lut from the full histogram and step and then index from it.
result = tf.cond(tf.equal(step, 0),
lambda: im,
lambda: tf.gather(build_lut(histo, step), im))
return tf.cast(result, dtype)
# Assumes RGB for now. Scales each channel independently
# and then stacks the result.
s1 = scale_channel(image, 0)
s2 = scale_channel(image, 1)
s3 = scale_channel(image, 2)
image = tf.stack([s1, s2, s3], 2)
return image
def cutout(image, pad_size, replace=0):
"""Apply cutout (https://arxiv.org/abs/1708.04552) to image.
This operation applies a (2*pad_size x 2*pad_size) mask of zeros to
a random location within `img`. The pixel values filled in will be of the
value `replace`. The located where the mask will be applied is randomly
chosen uniformly over the whole image.
Args:
image: An image Tensor of type uint8.
pad_size: Specifies how big the zero mask that will be generated is that
is applied to the image. The mask will be of size
(2*pad_size x 2*pad_size).
replace: What pixel value to fill in the image in the area that has
the cutout mask applied to it.
Returns:
An image Tensor that is of type uint8.
"""
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# Sample the center location in the image where the zero mask will be applied.
cutout_center_height = tf.random_uniform(
shape=[], minval=0, maxval=image_height,
dtype=tf.int32)
cutout_center_width = tf.random_uniform(
shape=[], minval=0, maxval=image_width,
dtype=tf.int32)
lower_pad = tf.maximum(0, cutout_center_height - pad_size)
upper_pad = tf.maximum(0, image_height - cutout_center_height - pad_size)
left_pad = tf.maximum(0, cutout_center_width - pad_size)
right_pad = tf.maximum(0, image_width - cutout_center_width - pad_size)
cutout_shape = [image_height - (lower_pad + upper_pad),
image_width - (left_pad + right_pad)]
padding_dims = [[lower_pad, upper_pad], [left_pad, right_pad]]
mask = tf.pad(
tf.zeros(cutout_shape, dtype=image.dtype),
padding_dims, constant_values=1)
mask = tf.expand_dims(mask, -1)
mask = tf.tile(mask, [1, 1, 3])
image = tf.where(
tf.equal(mask, 0),
tf.ones_like(image, dtype=image.dtype) * replace,
image)
return image
def shear_x(image, level, replace):
"""Equivalent of PIL Shearing in X dimension."""
# Shear parallel to x axis is a projective transform
# with a matrix form of:
# [1 level
# 0 1].
if replace is not None:
image = contrib_image.transform(
wrap(image), [1., level, 0., 0., 1., 0., 0., 0.])
return unwrap(image, replace)
else:
image = contrib_image.transform(
image, [1., level, 0., 0., 1., 0., 0., 0.])
return image
def shear_y(image, level, replace):
"""Equivalent of PIL Shearing in Y dimension."""
# Shear parallel to y axis is a projective transform
# with a matrix form of:
# [1 0
# level 1].
if replace is not None:
image = contrib_image.transform(
wrap(image), [1., 0., 0., level, 1., 0., 0., 0.])
return unwrap(image, replace)
else:
image = contrib_image.transform(
image, [1., 0., 0., level, 1., 0., 0., 0.])
return image
def _shear_bbox(bbox, image_height, image_width, level, shear_horizontal):
"""Shifts the bbox according to how the image was sheared.
Args:
bbox: 1D Tensor that has 4 elements (min_y, min_x, max_y, max_x), absolute coordinate
image_height: Int, height of the image.
image_width: Int, height of the image.
level: Float. How much to shear the image.
shear_horizontal: If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tensor of the same shape as bbox, but now with the shifted coordinates.
"""
image_height, image_width = (
tf.to_float(image_height), tf.to_float(image_width))
# Change bbox coordinates to be pixels.
min_y = bbox[0]
min_x = bbox[1]
max_y = bbox[2]
max_x = bbox[3]
coordinates = tf.stack(
[[min_y, min_x], [min_y, max_x], [max_y, min_x], [max_y, max_x]])
coordinates = coordinates
# Shear the coordinates according to the translation matrix.
if shear_horizontal:
translation_matrix = tf.stack(
[[1, 0], [-level, 1]])
else:
translation_matrix = tf.stack(
[[1, -level], [0, 1]])
translation_matrix = tf.cast(translation_matrix, tf.float32)
new_coords = tf.cast(
tf.matmul(translation_matrix, tf.transpose(coordinates)), tf.int32)
# Find min/max values and convert them back to floats.
min_y = tf.to_float(tf.reduce_min(new_coords[0, :]))
min_x = tf.to_float(tf.reduce_min(new_coords[1, :]))
max_y = tf.to_float(tf.reduce_max(new_coords[0, :]))
max_x = tf.to_float(tf.reduce_max(new_coords[1, :]))
# Clip the bboxes to be sure the fall between [0, 1].
min_y = tf.clip_by_value(min_y,clip_value_min=0,clip_value_max=image_height-1)
max_y = tf.clip_by_value(max_y,clip_value_min=0,clip_value_max=image_height-1)
min_x = tf.clip_by_value(min_x,clip_value_min=0,clip_value_max=image_width-1)
max_x = tf.clip_by_value(max_x,clip_value_min=0,clip_value_max=image_width-1)
return tf.stack([min_y, min_x, max_y, max_x])
def shear_with_bboxes(image, bboxes, mask,level, replace, shear_horizontal):
"""Applies Shear Transformation to the image and shifts the bboxes.
Args:
image: 3D uint8 Tensor.
bboxes: 2D Tensor that is a list of the bboxes in the image. Each bbox
has 4 elements (min_y, min_x, max_y, max_x) of type float with values
between [0, 1].
level: Float. How much to shear the image. This value will be between
-0.3 to 0.3.
replace: A one or three value 1D tensor to fill empty pixels.
shear_horizontal: Boolean. If true then shear in X dimension else shear in
the Y dimension.
Returns:
A tuple containing a 3D uint8 Tensor that will be the result of shearing
image by level. The second element of the tuple is bboxes, where now
the coordinates will be shifted to reflect the sheared image.
"""
if shear_horizontal:
image = shear_x(image, level, replace)
else:
image = shear_y(image, level, replace)
if mask is not None:
mask = tf.transpose(mask,[1,2,0])
if shear_horizontal:
mask = shear_x(mask,level,None)
else:
mask = shear_y(mask,level,None)
mask = tf.transpose(mask,[2,0,1])
bboxes = tfop.get_bboxes_from_mask(mask,stride=4)
else:
# Convert bbox coordinates to pixel values.
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
# pylint:disable=g-long-lambda
wrapped_shear_bbox = lambda bbox: _shear_bbox(
bbox, image_height, image_width, level, shear_horizontal)
# pylint:enable=g-long-lambda
bboxes = tf.map_fn(wrapped_shear_bbox, bboxes)
return image, bboxes, mask
| 36.598234 | 89 | 0.690633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,589 | 0.457748 |
4552fee979b85fce4dbf6d9c63c69fdf894b4a54 | 1,693 | py | Python | selco/selco/doctype/cash_payment_entry/cash_payment_entry.py | codingCoffee/selco_v2 | 276b62ac3f904bdc3b4ddf92882fd0bb318c5a35 | [
"MIT"
] | null | null | null | selco/selco/doctype/cash_payment_entry/cash_payment_entry.py | codingCoffee/selco_v2 | 276b62ac3f904bdc3b4ddf92882fd0bb318c5a35 | [
"MIT"
] | 111 | 2018-04-26T13:14:09.000Z | 2018-08-04T05:54:48.000Z | selco/selco/doctype/cash_payment_entry/cash_payment_entry.py | codingCoffee/selco_v2 | 276b62ac3f904bdc3b4ddf92882fd0bb318c5a35 | [
"MIT"
] | 5 | 2018-02-08T13:34:03.000Z | 2021-07-20T10:03:06.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2017, SELCO and contributors
# For license information, please see license.txt
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class CashPaymentEntry(Document):
def before_insert(self):
self.naming_series = frappe.db.get_value("Branch",self.selco_branch,"selco_cash_payment_naming_series")
def on_submit(self):
je = frappe.new_doc('Journal Entry')
je.selco_branch = self.selco_branch
je.voucher_type = self.voucher_type
je.selco_use_different_cost_center = self.use_different_cost_center
je.posting_date = self.posting_date
je.cheque_no = self.cheque_no
je.cheque_date = self.cheque_date
je.user_remark = self.user_remark
je.name = "J" + self.name
frappe.msgprint("je.name is" + str(je.name))
je.company = self.company
for d in self.get('accounts'):
je.append("accounts",{
"account":d.account,
"party_type":d.party_type,
"party":d.party,
"reference_type":d.reference_type,
"reference_name":d.reference_name,
"is_advance":d.is_advance,
"cost_center":d.cost_center,
"account_currency":d.account_currency,
"debit_in_account_currency":d.debit_in_account_currency,
"credit_in_account_currency":d.credit_in_account_currency
})
je.save()
je.submit()
| 38.477273 | 111 | 0.642056 | 1,330 | 0.785588 | 0 | 0 | 0 | 0 | 0 | 0 | 506 | 0.298878 |
4555051cbfcd1d15b946d4d5c08a4cc547ba96e4 | 1,755 | py | Python | measure.py | mtenenholtz/vehicle_footprint | d4500fcc127ed2c0400537b661a964b1d7fcef51 | [
"MIT"
] | null | null | null | measure.py | mtenenholtz/vehicle_footprint | d4500fcc127ed2c0400537b661a964b1d7fcef51 | [
"MIT"
] | null | null | null | measure.py | mtenenholtz/vehicle_footprint | d4500fcc127ed2c0400537b661a964b1d7fcef51 | [
"MIT"
] | null | null | null | class PixelMeasurer:
def __init__(self, coordinate_store, is_one_calib_block, correction_factor):
self.coordinate_store = coordinate_store
self.is_one_calib_block = is_one_calib_block
self.correction_factor = correction_factor
def get_distance(self, calibration_length):
distance_per_pixel = calibration_length / self.pixel_distance_calibration()
if not self.is_one_calib_block:
calibration_difference = float(self.pixel_distance_calibration_side()) / \
float(self.pixel_distance_calibration())
distance_correction = 1 - self.correction_factor*(1 - calibration_difference)
return self.pixel_distance_between_wheels() * distance_per_pixel * distance_correction
else:
return self.pixel_distance_between_wheels() * distance_per_pixel
def get_left_wheel_midpoint(self):
points = self.coordinate_store.get_left_wheel_points()
return int(abs(points[0][0] + points[1][0]) / 2)
def get_right_wheel_midpoint(self):
points = self.coordinate_store.get_right_wheel_points(is_one_calib_block=self.is_one_calib_block)
return int(abs(points[0][0] + points[1][0]) / 2)
def pixel_distance_between_wheels(self):
return abs(self.get_right_wheel_midpoint() - self.get_left_wheel_midpoint())
def pixel_distance_calibration(self):
calibration_points = self.coordinate_store.get_middle_calib_points()
return abs(calibration_points[0][0] - calibration_points[1][0])
def pixel_distance_calibration_side(self):
calibration_points = self.coordinate_store.get_side_calib_points()
return abs(calibration_points[0][0] - calibration_points[1][0])
| 50.142857 | 105 | 0.723077 | 1,754 | 0.99943 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
45559cb2010dc718a647583ca7727ca07bebe64f | 2,494 | py | Python | feedback-Python/models.py | gforghieri/code-demo | 7e4faf9d1411d073af360c78387db3f58ea45cf1 | [
"MIT"
] | null | null | null | feedback-Python/models.py | gforghieri/code-demo | 7e4faf9d1411d073af360c78387db3f58ea45cf1 | [
"MIT"
] | null | null | null | feedback-Python/models.py | gforghieri/code-demo | 7e4faf9d1411d073af360c78387db3f58ea45cf1 | [
"MIT"
] | null | null | null | from django.db import models
from django.utils.translation import ugettext_lazy as _
class Feature(models.Model):
name = models.CharField('Feature Name', max_length=50, blank=True, unique=True)
description = models.CharField('Feature Description', max_length=150, blank=True)
info_link = models.CharField('Feature Demo Link', max_length=100, blank=True)
class Meta:
verbose_name = _('feature')
verbose_name_plural = _('features')
ordering = ['id']
class Version(models.Model):
tag = models.CharField('Tag', max_length=50, unique=True)
class Meta:
verbose_name = _('tag')
verbose_name_plural = _('tags')
class Release(models.Model):
version = models.ForeignKey(Version, on_delete=models.CASCADE)
features = models.ManyToManyField(Feature, blank=True)
class Meta:
verbose_name = _('release')
verbose_name_plural = _('releases')
class FeedbackResult(models.Model):
user_email = models.EmailField('Email', blank=False, null=False)
service = models.ForeignKey('organizations.service', null=True, on_delete=models.SET_NULL)
feature = models.ForeignKey(Feature, on_delete=models.CASCADE)
feedback = models.CharField('Feature Feedback', max_length=512, blank=True, null=True)
liked = models.NullBooleanField('Feature Liked')
skipped = models.NullBooleanField('Feature Skipped')
class Meta:
verbose_name = _('feedback-result')
verbose_name_plural = _('feedback-results')
class FeedbackActivity(models.Model):
user_email = models.EmailField('Email', blank=False)
declined = models.NullBooleanField('Declined', null=True, blank=True)
release = models.ForeignKey(Release, null=True, blank=True, on_delete=models.CASCADE)
service = models.ForeignKey('organizations.service', null=True, blank=True, on_delete=models.CASCADE)
has_given_feedback = models.NullBooleanField('Given Feedback', blank=True)
hours_used_release = models.FloatField(null=True, blank=True)
class Meta:
verbose_name = _('feedback-activity')
verbose_name_plural = _('feedback-activities')
class UserSession(models.Model):
user_email = models.EmailField('Email', blank=False)
session_start = models.DateTimeField(null=True)
session_end = models.DateTimeField(null=True)
tag = models.CharField(null=True, max_length=30)
class Meta:
verbose_name = _('user-session')
verbose_name_plural = _('user-sessions')
| 36.676471 | 105 | 0.716119 | 2,391 | 0.958701 | 0 | 0 | 0 | 0 | 0 | 0 | 359 | 0.143945 |
455976b188c8965e37899317a939f416893472f4 | 727 | py | Python | jupyterhub_client/tests/test_async.py | minrk/jupyterhub-client | 99b856e81925690d5546fd8457a6e333fb709513 | [
"BSD-2-Clause"
] | 2 | 2017-04-06T20:28:31.000Z | 2020-03-20T06:51:21.000Z | jupyterhub_client/tests/test_async.py | minrk/jupyterhub-client | 99b856e81925690d5546fd8457a6e333fb709513 | [
"BSD-2-Clause"
] | 2 | 2017-04-10T09:30:20.000Z | 2018-11-22T16:30:03.000Z | jupyterhub_client/tests/test_async.py | minrk/jupyterhub-client | 99b856e81925690d5546fd8457a6e333fb709513 | [
"BSD-2-Clause"
] | 8 | 2017-04-06T20:28:38.000Z | 2021-04-01T13:39:13.000Z | from pytest import fixture
from tornado import gen
from tornado.ioloop import IOLoop
from .conftest import TOKEN
from ..async import AsyncJupyterHubClient
@fixture
def client(app, io_loop):
# include io_loop to avoid clear_instance calls resetting the current loop
return AsyncJupyterHubClient(TOKEN, url=app.hub.server.url + 'api')
def _run(_test, timeout=10):
loop = IOLoop.current()
deadline = loop.time() + timeout
loop.run_sync(
lambda : gen.with_timeout(deadline, _test())
)
def test_list_users(app, io_loop, client):
@gen.coroutine
def _test():
users = yield client.list_users()
assert sorted(u['name'] for u in users) == ['admin', 'user']
_run(_test)
| 24.233333 | 78 | 0.696011 | 0 | 0 | 205 | 0.281981 | 327 | 0.449794 | 0 | 0 | 98 | 0.134801 |
4559e964e50001ba3247b35781b62b682a586366 | 2,247 | py | Python | app/ml.py | JeffreyAsuncion/fastapi_test | 384a4bbb5fd283f55f34cd036905d9fb8d3314ab | [
"MIT"
] | null | null | null | app/ml.py | JeffreyAsuncion/fastapi_test | 384a4bbb5fd283f55f34cd036905d9fb8d3314ab | [
"MIT"
] | null | null | null | app/ml.py | JeffreyAsuncion/fastapi_test | 384a4bbb5fd283f55f34cd036905d9fb8d3314ab | [
"MIT"
] | null | null | null | """Machine learning functions"""
from fastapi import APIRouter
from sklearn.neighbors import NearestNeighbors
import pandas as pd
from joblib import load
import pickle
from app.city_state_json import city_state_2_id_num
router = APIRouter()
# filename = 'app/recommend/recommendation_model.sav'
# model_file = open(filename, 'rb')
# loaded_model = pickle.load(model_file)
# states_pkl = pd.read_pickle('app/recommend/states_dataset.pkl')
# @router.get('/recommend')
# async def suggest_state_ids(city_state : str):
# '''Returns the list of 10 city_states with features
# for a given city_state, i.e., "Newark, New Jersey"
# This is a sample response of 2 recommended city_states
# [
# [
# {
# "city_state": "Newark, New Jersey",
# "id_num": 17089,
# "population": 283945,
# "crime_rate": 27.4,
# "rental_rate": 1466.89,
# "walk_score": 79
# }
# ],
# [
# {
# "city_state": "Chula Vista, California",
# "id_num": 3151,
# "population": 280863,
# "crime_rate": 16.2,
# "rental_rate": 2477.6,
# "walk_score": 43
# }
# ]
# ]
# NOTE: This route will return 10 recommmended city_states
# '''
# # use dictionary to find state_id
# state_id = city_state_2_id_num[city_state]
# # pass state_id to model
# state_index = states_pkl.index[states_pkl['id_num'] == state_id]
# # use 'state_id' to find state_features
# state_features = states_pkl.iloc[state_index, 2:].to_numpy()
# # load pkl NearestNeighbors Model
# dist, indices = loaded_model.kneighbors(state_features)
# # list of 10 recommended state_id
# recommended_list = list(states_pkl.loc[indices[0], 'id_num'])
# # list of state_id with respective state feature
# results = []
# for i in range(len(recommended_list)):
# r_list = states_pkl[states_pkl['id_num']==recommended_list[i]]
# r = r_list.to_dict('records')
# results.append(r)
# return results
| 29.565789 | 82 | 0.581219 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,939 | 0.862928 |
455a32504d4c9286e9e3a194024362339d3c0c0a | 2,431 | py | Python | worker/server.py | OmarZOS/remote-extraction-proxy-and-worker | 739466a0df588d7eb5b1dae9666ceb8c7a25e928 | [
"MIT"
] | null | null | null | worker/server.py | OmarZOS/remote-extraction-proxy-and-worker | 739466a0df588d7eb5b1dae9666ceb8c7a25e928 | [
"MIT"
] | 10 | 2022-03-17T23:23:18.000Z | 2022-03-18T00:15:11.000Z | worker/server.py | OmarZOS/remote-extraction-proxy-and-worker | 739466a0df588d7eb5b1dae9666ceb8c7a25e928 | [
"MIT"
] | 1 | 2022-03-24T23:56:46.000Z | 2022-03-24T23:56:46.000Z | import json
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.server import SimpleXMLRPCRequestHandler
from multiprocessing import Process
import xmlrpc.client
from locator import locator
from constants import *
from functions import *
import os
import constants
current_tasks = [{"id":1}]
print("Serving somewhere")
# Restrict to a particular path.
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
print(f"Serving at {SERVING_HOST}:{SERVING_PORT}")
print("binding to ")
# Create server
with SimpleXMLRPCServer((SERVING_HOST, int(SERVING_PORT)),
requestHandler=RequestHandler) as server:
server.register_introspection_functions()
@server.register_function(name='subscribe_in_proxy')
def subscribe_in_proxy(): #first things first
get_proxy().register_worker(SERVING_HOST,SERVING_PORT,locator.availableServices)
return True
@server.register_function(name='change_proxy')
def change_proxy(proxy_scheme,proxy_host,proxy_port):#first things first
os.environ["PROXY_SCHEME"] = str(proxy_scheme)
constants.PROXY_SCHEME = proxy_scheme
os.environ["PROXY_HOST"] = str(proxy_host)
constants.PROXY_HOST = proxy_host
os.environ["PROXY_PORT"] = str(proxy_port)
constants.PROXY_PORT = proxy_port
return True
# Setting a get_context() variable
def setVariable(varname,value):
return get_context().set(varname,value)
server.register_function(setVariable, 'set')
def subscribe_service(api,service_name,instance,json_info):
locator.availableServices[api][service_name]=json_info
locator.setService(f"{api}",f"{service_name}",instance)
return True;
server.register_function(setVariable, 'subscribe_service')
@server.register_function(name='start_harvesting_data')
def start_harvesting_data(api_name,service_name,model):
try:
p = Process(target=locator.getService(api_name,service_name),
args=(get_context(),model,locator.getPublisher(),))
p.start()
return p.pid
except Exception :
return False
#current_tasks is invloved, don't forget to pass get_context() object
get_proxy().register_worker(SERVING_HOST,SERVING_PORT,locator.availableServices)
# Run the server's main loop
server.serve_forever()
| 34.239437 | 88 | 0.712875 | 76 | 0.031263 | 0 | 0 | 1,105 | 0.454545 | 0 | 0 | 446 | 0.183464 |
455a510ca3f33288b1e82aa99f9b204ab68b6807 | 3,972 | py | Python | get_player.py | jason-sa/baseball_lin_regression | 936535693f00b28d17b2b901144dcba8bce45ab9 | [
"MIT"
] | null | null | null | get_player.py | jason-sa/baseball_lin_regression | 936535693f00b28d17b2b901144dcba8bce45ab9 | [
"MIT"
] | null | null | null | get_player.py | jason-sa/baseball_lin_regression | 936535693f00b28d17b2b901144dcba8bce45ab9 | [
"MIT"
] | null | null | null | '''
Script to loop through all baseballrefernce.com pages and store the HTML in data frames
'''
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import TimeoutException
import requests
import time
import os
from selenium.webdriver.common.by import By
import pickle
import re
import time
def get_rookie_player_pages_html(rookie_player_pages, driver, stop=None):
index = 0
html_df = rookie_player_pages[rookie_player_pages.html.isnull()]
for l in html_df.link.values:
start = time.time()
driver.get(l) #Could add try and except to write to csv if error, so can restart from last write.
end = time.time()
print((end-start), l)
rookie_player_pages.loc[rookie_player_pages.link == l, 'html'] = driver.page_source
if index != 0 and index % 100 == 0:
print('Rows completed', rookie_player_pages[~rookie_player_pages.html.isnull()].shape[0])
rookie_player_pages.to_csv('data/rookie_player_pages.csv')
if index == stop:
break
index += 1
rookie_player_pages.to_csv('data/rookie_player_pages.csv')
return rookie_player_pages
def build_rookie_pages(start, end, driver):
rookie_pages = pd.DataFrame(columns=['year','link','html'])
rookie_player_pages = pd.DataFrame(columns=['year','name','link','html'])
#attempt to load from csv
try:
rookie_pages = pd.read_csv('data/rookie_pages.csv', index_col=0)
except FileNotFoundError:
pass
print(rookie_pages.shape)
try:
rookie_player_pages = pd.read_csv('data/rookie_player_pages.csv', index_col=0)
except FileNotFoundError:
pass
print(rookie_player_pages.shape)
for i in range(start, end+1):
links_list = []
names_list = []
#if year == i, then move onto link loop
if not (rookie_pages.year == i).any():
url = 'https://www.baseball-reference.com/leagues/MLB/'+str(i)+'-rookies.shtml'
start = time.time()
driver.get(url)
end = time.time()
print(end-start, i)
rookie_pages.loc[i] = [i, url, driver.page_source]
# scrape the rookie batters (includes pitchers if PA)
batting = driver.find_element_by_id('misc_batting') ## HTML tables
links = batting.find_elements_by_xpath('.//tbody/tr/td/a') ## player pages
# add these to the DF to save
links_list = [a.get_attribute('href') for a in links if re.search(r'players/.', a.get_attribute('href'))]
names_list = [a.text for a in links if re.search(r'players/.', a.get_attribute('href'))]
if len(links_list) != 0: # add new data
year_l = [i] * len(links_list)
new_df = pd.DataFrame({'year': year_l, 'name': names_list, 'link': links_list})
rookie_player_pages = rookie_player_pages.append(new_df, sort=True)
rookie_pages.to_csv('data/rookie_pages.csv')
rookie_player_pages.to_csv('data/rookie_player_pages.csv')
return rookie_pages, rookie_player_pages
chromedriver = "chromedriver" # path to the chromedriver executable
os.environ["webdriver.chrome.driver"] = chromedriver
driver = webdriver.Chrome(chromedriver)
while True:
try:
rookie_pages, rookie_player_pages = build_rookie_pages(1985, 2017, driver)
except TimeoutException:
pass
else:
break
tries = 0
while tries <= 2:
try:
rookie_player_pages = pd.read_csv('data/rookie_player_pages.csv',index_col=0)
print('Try:', tries)
print(rookie_player_pages.shape)
rookie_player_pages = get_rookie_player_pages_html(rookie_player_pages, driver, stop=6000)
except TimeoutException:
tries += 1
pass
else:
break
driver.close() | 34.241379 | 117 | 0.659366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 867 | 0.218278 |
455a83cc696af95c9057d25a2138c7ac1f9ece53 | 1,621 | py | Python | src/puzzle_1_you_will_all_conform/site/line-exercise1.py | foryourselfand/mit_6_S095_programming_for_the_puzzled | 88371bd8461709011acbed6066ac4f40c5cde29e | [
"MIT"
] | null | null | null | src/puzzle_1_you_will_all_conform/site/line-exercise1.py | foryourselfand/mit_6_S095_programming_for_the_puzzled | 88371bd8461709011acbed6066ac4f40c5cde29e | [
"MIT"
] | null | null | null | src/puzzle_1_you_will_all_conform/site/line-exercise1.py | foryourselfand/mit_6_S095_programming_for_the_puzzled | 88371bd8461709011acbed6066ac4f40c5cde29e | [
"MIT"
] | null | null | null | # Programming for the Puzzled -- Srini Devadas
# You Will All Conform
# Input is a vector of F's and B's, in terms of forwards and backwards caps
# Output is a set of commands (printed out) to get either all F's or all B's
# Fewest commands are the goal
caps = ['F', 'F', 'B', 'B', 'B', 'F', 'B', 'B', 'B', 'F', 'F', 'B', 'F']
cap2 = ['F', 'F', 'B', 'B', 'B', 'F', 'B', 'B', 'B', 'F', 'F', 'F', 'F']
def pleaseConform(caps):
# Initialization
start = 0
forward = 0
backward = 0
intervals = []
# Determine intervals where caps are on in the same direction
for i in range(len(caps)):
if caps[start] != caps[i]:
# each interval is a tuple with 3 elements (start, end, type)
intervals.append((start, i - 1, caps[start]))
if caps[start] == 'F':
forward += 1
else:
backward += 1
start = i
# Need to add the last interval after for loop completes execution
intervals.append((start, len(caps) - 1, caps[start]))
if caps[start] == 'F':
forward += 1
else:
backward += 1
## print (intervals)
## print (forward, backward)
if forward < backward:
flip = 'F'
else:
flip = 'B'
for t in intervals:
if t[2] == flip:
# Exercise: if t[0] == t[1] change the printing!
if t[0] == t[1]:
print('Person at position', t[0], 'flip your cap!')
else:
print('People in positions', t[0], 'through', t[1], 'flip your caps!')
pleaseConform(caps)
##pleaseConform(cap2)
| 30.018519 | 86 | 0.526218 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 749 | 0.46206 |
455b9271ec975980f2d05b9d2d018c5d0532f369 | 515 | py | Python | python3/python-tk/tk_busybar_demo.py | cgoldberg/corey-projects | 5e6ca9fd02f00a8e44692eed68234a12cf2d5bca | [
"WTFPL"
] | null | null | null | python3/python-tk/tk_busybar_demo.py | cgoldberg/corey-projects | 5e6ca9fd02f00a8e44692eed68234a12cf2d5bca | [
"WTFPL"
] | null | null | null | python3/python-tk/tk_busybar_demo.py | cgoldberg/corey-projects | 5e6ca9fd02f00a8e44692eed68234a12cf2d5bca | [
"WTFPL"
] | null | null | null | #!/usr/bin/env python
# Python 3
# Corey Goldberg - 2009
import tkinter
from tkinter import ttk
import BusyBar
class Application:
def __init__(self, root):
self.root = root
self.root.title('BusyBar Demo')
ttk.Frame(self.root, width=300, height=100).pack()
bb = BusyBar.BusyBar(self.root, width=200)
bb.place(x=40, y=20)
bb.on()
if __name__ == '__main__':
root = tkinter.Tk()
Application(root)
root.mainloop() | 20.6 | 59 | 0.582524 | 285 | 0.553398 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.157282 |
455c8ff0c603475739ee650d803fb990973e2d71 | 115 | py | Python | abc110_c.py | hythof/atc | 12cb94ebe693e1f469ce0d982bc2924b586552cd | [
"CC0-1.0"
] | null | null | null | abc110_c.py | hythof/atc | 12cb94ebe693e1f469ce0d982bc2924b586552cd | [
"CC0-1.0"
] | null | null | null | abc110_c.py | hythof/atc | 12cb94ebe693e1f469ce0d982bc2924b586552cd | [
"CC0-1.0"
] | null | null | null | s=input()
t=input()
ss=sorted(map(s.count,set(s)))
tt=sorted(map(t.count,set(t)))
print('Yes' if ss==tt else 'No')
| 19.166667 | 32 | 0.643478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 | 0.078261 |
455c9467c943ec1ae7bba780b8f13a898da0c074 | 1,547 | py | Python | app/schema/item.py | qateam123/eq | 704757952323647d659c49a71975c56406ff4047 | [
"MIT"
] | null | null | null | app/schema/item.py | qateam123/eq | 704757952323647d659c49a71975c56406ff4047 | [
"MIT"
] | 8 | 2020-03-24T15:24:18.000Z | 2022-03-02T04:32:56.000Z | app/schema/item.py | qateam123/eq | 704757952323647d659c49a71975c56406ff4047 | [
"MIT"
] | null | null | null | from abc import ABCMeta, abstractmethod
from app.questionnaire_state.exceptions import StateException
class Item(metaclass=ABCMeta):
"""
Abstract class for all items in a schema. Subclasses must provide an id and redefine State accordingly
"""
def __init__(self, item_id):
"""
id, children and questionnaire must be set by derived classes
"""
self.id = item_id
self.children = None
self.questionnaire = None
def construct_state(self):
state_class = self.get_state_class()
if state_class:
state = state_class(self.id, self)
for child in self.children:
child_state = child.construct_state()
child_state.parent = state
state.children.append(child_state)
return state
else:
return None
@abstractmethod
def get_state_class(self):
raise NotImplementedError
def validate(self, state, skip_mandatory_validation):
if isinstance(state, self.get_state_class()):
is_valid = True
for child_state in state.children:
child_schema = self.questionnaire.get_item_by_id(child_state.id)
child_valid = child_schema.validate(child_state, skip_mandatory_validation)
if child_valid is not None and child_valid is False:
is_valid = False
return is_valid
else:
raise StateException('Cannot validate - incorrect state class')
| 32.914894 | 106 | 0.630252 | 1,441 | 0.93148 | 0 | 0 | 80 | 0.051713 | 0 | 0 | 244 | 0.157725 |
455d83d3700365dc3ad6599f1d91ada671e96df4 | 378 | py | Python | scripts/get_alignments.py | kant/transition-amr-parser | b450b8cf0ddb7cc475f70d0bf46d80ab186a2f7d | [
"Apache-2.0"
] | 1 | 2021-07-08T08:24:21.000Z | 2021-07-08T08:24:21.000Z | scripts/get_alignments.py | kant/transition-amr-parser | b450b8cf0ddb7cc475f70d0bf46d80ab186a2f7d | [
"Apache-2.0"
] | null | null | null | scripts/get_alignments.py | kant/transition-amr-parser | b450b8cf0ddb7cc475f70d0bf46d80ab186a2f7d | [
"Apache-2.0"
] | 1 | 2020-07-30T10:12:33.000Z | 2020-07-30T10:12:33.000Z | import sys
from amr import JAMR_CorpusReader
if __name__ == '__main__':
args = sys.argv
infile = args[1]
cr = JAMR_CorpusReader()
cr.load_amrs(infile)
gold_amrs = cr.amrs
for sentidx, amr in enumerate(gold_amrs):
for n in amr.alignments:
print(str(sentidx)+'\t'+n+'\t'+','.join(str(s) for s in amr.alignments[n]))
print()
| 22.235294 | 87 | 0.616402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.055556 |
455ec14366cbca1ad699f01e8d919c9468857541 | 126 | py | Python | setup.py | HDembinski/bibtools | 1dcc9b2ad19c11e01848a2d9a26bcad7f20e9141 | [
"BSD-3-Clause"
] | null | null | null | setup.py | HDembinski/bibtools | 1dcc9b2ad19c11e01848a2d9a26bcad7f20e9141 | [
"BSD-3-Clause"
] | null | null | null | setup.py | HDembinski/bibtools | 1dcc9b2ad19c11e01848a2d9a26bcad7f20e9141 | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup
from glob import glob
setup(
name='bibtools',
version='0.0.1',
scripts=glob('src/*.py'),
) | 15.75 | 28 | 0.68254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.214286 |
455ec713befd7532c83b45583b4e08a6cd8de1bc | 70 | py | Python | src/prodis/packets/handshaking/__init__.py | blubberdiblub/prodis | c70d31b7df0358edd8969e9a94341b3771ee2e0f | [
"MIT"
] | null | null | null | src/prodis/packets/handshaking/__init__.py | blubberdiblub/prodis | c70d31b7df0358edd8969e9a94341b3771ee2e0f | [
"MIT"
] | null | null | null | src/prodis/packets/handshaking/__init__.py | blubberdiblub/prodis | c70d31b7df0358edd8969e9a94341b3771ee2e0f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from .serverbound import Packet as ServerBound
| 17.5 | 46 | 0.785714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.3 |
45615865d28df52f0bed3733b2d9feada286ab25 | 572 | py | Python | send.py | EmelyanenkoK/freeton_scheduler | 8fd88d09e95e7ce48923efc48e22f2068705a72a | [
"MIT"
] | null | null | null | send.py | EmelyanenkoK/freeton_scheduler | 8fd88d09e95e7ce48923efc48e22f2068705a72a | [
"MIT"
] | null | null | null | send.py | EmelyanenkoK/freeton_scheduler | 8fd88d09e95e7ce48923efc48e22f2068705a72a | [
"MIT"
] | 2 | 2021-08-30T15:05:53.000Z | 2022-02-20T20:11:12.000Z | import sys
from graphqlclient import GraphQLClient
from random import randint
import codecs, json
client = GraphQLClient("https://net.ton.dev/graphql")
mutation_template = '''
mutation {
postRequests(requests:[{id:"%(request_id)s",body:"%(base64_boc)s",expireAt:2e12}])
}
'''
def send_boc(client, boc):
data = {'request_id':str(randint(0,2**32)), 'base64_boc': codecs.decode(codecs.encode(boc,'base64'),'utf8').replace('\n','')}
r = json.loads(client.execute(mutation_template%data))
print(r)
with open(sys.argv[1], "rb+") as f:
send_boc(client, f.read())
| 27.238095 | 127 | 0.702797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 183 | 0.31993 |
456204f6871f6eb8dedf973fadca394db0e5504c | 87 | py | Python | scripts/exercicios/ex025.py | RuanBarretodosSantos/python | 4142ccd71c4ffb4bb6a10d61c85f612758f5bb41 | [
"MIT"
] | null | null | null | scripts/exercicios/ex025.py | RuanBarretodosSantos/python | 4142ccd71c4ffb4bb6a10d61c85f612758f5bb41 | [
"MIT"
] | null | null | null | scripts/exercicios/ex025.py | RuanBarretodosSantos/python | 4142ccd71c4ffb4bb6a10d61c85f612758f5bb41 | [
"MIT"
] | null | null | null | nome = str(input('Digite seu nome: ')).strip()
cap = nome.title()
print('Silva' in cap) | 29 | 46 | 0.655172 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.298851 |
45633fd4e855086655acb3712ae36101ef73aca3 | 1,097 | py | Python | tests/test_tokenizer.py | AlessandroVol23/DEMorphy | a19532a21ac0ce7334ba3203274dc28ba95075d3 | [
"MIT"
] | 43 | 2018-03-07T11:05:00.000Z | 2022-01-28T06:44:01.000Z | tests/test_tokenizer.py | AlessandroVol23/DEMorphy | a19532a21ac0ce7334ba3203274dc28ba95075d3 | [
"MIT"
] | 6 | 2018-04-06T09:40:32.000Z | 2019-12-30T08:08:57.000Z | tests/test_tokenizer.py | AlessandroVol23/DEMorphy | a19532a21ac0ce7334ba3203274dc28ba95075d3 | [
"MIT"
] | 13 | 2018-03-20T09:22:53.000Z | 2021-11-12T13:33:23.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import pytest
from demorphy.tokenizer import tokenize
class TestTokenizer:
def test_split_simple(self):
assert tokenize(u"Ich bin krank") == [u"Ich", u"bin", u"krank"]
def test_split_hypen(self):
assert tokenize(u"Wir können uns auf der U-Bahn treffen") == [u'Wir', u'können', u'uns', u'auf', u'der', u'U-Bahn', u'treffen.']
def test_split_email(self):
assert tokenize(u"Bitte schreiben Sie an duygu@iam.uni-bonn.de ") == [u"Bitte", u"schreiben", u"Sie", u"an", u"duygu@iam.uni-bonn.de"]
def test_split_url(self):
assert tokenize(u"www.akbank.com.tr ich du Sie bahn.de") == [u'www.akbank.com.tr', u'ich', u'du', u'Sie', u'bahn.de']
def test_split_punct(self):
assert tokenize(u"Ich bin krank, sie auch; ich auch") == [u'Ich', u'bin', u'krank', u'sie', u'auch', u'ich', u'auch']
def test_split_abbrev(self):
assert tokenize(u"ggf. kommen wir auf Ihr Angebot zurück") == [u'ggf.', u'kommen', u'wir', u'auf', u'Ihr', u'Angebot', u'zurück']
| 43.88 | 142 | 0.64722 | 963 | 0.874659 | 0 | 0 | 0 | 0 | 0 | 0 | 521 | 0.473206 |
45639632859da23d700860306c51ed0f2069eeb3 | 7,995 | py | Python | pandalog/cmd.py | bitpanda-labs/pandalog | 3c2df6bcc8afe20de6a1e9ef8faa560cd6ee03d1 | [
"BSD-3-Clause"
] | null | null | null | pandalog/cmd.py | bitpanda-labs/pandalog | 3c2df6bcc8afe20de6a1e9ef8faa560cd6ee03d1 | [
"BSD-3-Clause"
] | null | null | null | pandalog/cmd.py | bitpanda-labs/pandalog | 3c2df6bcc8afe20de6a1e9ef8faa560cd6ee03d1 | [
"BSD-3-Clause"
] | null | null | null | """Pandalog collection of entrypoint scripts
Scripts:
- pandalog
- pandalog-auth
To install the collection, use either one of the following options:
1) Build the Dockerfile and run the application
2) Run "python3 setup.py install"
The scripts are installed as executable binaries under /usr/local/bin.
It is always preferable to isolate the application execution in a
virtual environment or a container.
"""
import click
import logging
from pandalog.client import GraylogAPIClient
@click.group()
@click.version_option()
def auth_entrypoint():
"""Pandalog Auth - Retrieve STS tokens for Pandalog user
\b
Example usage:
\b
$ pandalog-auth get-sts-token -h $HOST -u $USER
$ pandalog-auth get-sts-token -h $HOST -u $USER -p ${PASS}
"""
pass
@auth_entrypoint.command()
@click.option("-h", "--host",
required=True,
envvar="GRAYLOG_HOST",
type=str,
help="graylog host")
@click.option("-u", "--user",
required=True,
envvar="GRAYLOG_USER",
type=str,
help="graylog user")
@click.option("-p", "--password",
prompt=True,
hide_input=True,
type=str,
envvar="GRAYLOG_PASS",
help="graylog password")
def get_sts_token(host: str,
user: str,
password: str):
"""get/issue a temporary session token"""
# initialize graylog API client
client = GraylogAPIClient(host)
# retrieve, dump STS token
print(client.get_sts_token(user, password))
@click.group()
@click.version_option()
def entrypoint():
"""Pandalog - Bitpanda Graylog Python Wrapper
\b
Example Usage:
\b
$ GRAYLOG_HOST=logs.staging.bitpanda
$ GRAYLOG_TOKEN=$(pandalog-auth get-sts-token -u ${USER} -p ${PASS})
$ pandalog get-teams
$ pandalog get-streams
$ pandalog to-stream --all "All Pandas,developer"
$ pandalog from-stream --streams "API,ledger" "staging-developer"
"""
pass
@entrypoint.command()
@click.option("-h", "--host",
required=True,
envvar="GRAYLOG_HOST",
type=str,
help="graylog host")
@click.option("-t", "--token",
required=True,
envvar="GRAYLOG_TOKEN",
type=str,
help="graylog API token")
def get_teams(host: str,
token: str):
"""list teams"""
# initialize graylog API client
client = GraylogAPIClient(host)
# retrieve list of all teams
teams = client.get_teams(token)
# print stdout header
print("ID\t\t\t\t\tNAME")
# sort teams in alphanumerical order
ordered = sorted(teams, key=lambda x: x["name"])
# loop over sorted list
for team in ordered:
# print team ID and name
print("{}\t\t{}".format(team.get("id"),
team.get("name")))
@entrypoint.command()
@click.option("-h", "--host",
required=True,
envvar="GRAYLOG_HOST",
type=str,
help="graylog host")
@click.option("-t", "--token",
required=True,
envvar="GRAYLOG_TOKEN",
type=str,
help="graylog API token")
def get_streams(host: str,
token: str):
"""list streams"""
# initialize graylog API client
client = GraylogAPIClient(host)
# retrieve list of all streams
streams = client.get_streams(token)
# print stdout header
print("ID\t\t\t\t\tTITLE")
# sort streams in alphanumerical order
ordered = sorted(streams, key=lambda x: x["title"])
# loop over sorted list
for stream in ordered:
# print stream ID and title
print("{}\t\t{}".format(stream.get("id"),
stream.get("title")))
@entrypoint.command()
@click.option("-h", "--host",
required=True,
envvar="GRAYLOG_HOST",
type=str,
help="graylog host")
@click.option("-t", "--token",
required=True,
envvar="GRAYLOG_TOKEN",
type=str,
help="graylog API token")
@click.option("-a", "--all",
is_flag=True,
type=bool,
help="all streams")
@click.option("-s", "--stream-names",
required=False,
type=str,
help="comma-separated list of streams")
@click.argument("team-names", nargs=-1)
def to_stream(host: str,
token: str,
all: bool,
stream_names: str,
team_names: list):
"""share stream(s) with team(s)"""
# initialize graylog API client
client = GraylogAPIClient(host)
# initialize empty list of teams
teams = []
# loop over team names
for team in team_names:
# retrieve team based on the name and append to the list
teams.append(client.get_team(team, token))
# initialize empty list of streams
streams = []
# if --all flag is set
if all:
# gather list of all streams
streams = client.get_streams(token)
# else (i.e., if --stream-names is defined)
else:
# if streams were provided
if stream_names is not None:
split_streams = [s.strip() for s in stream_names.split(",")]
# for each specified stream
for stream in split_streams:
# append stream to list
streams.append(client.get_stream(stream, token))
# if --all flag is not set and no stream was provided
else:
# print error and exit
raise SystemExit("please provide streams or set the --all flag")
# loop over streams
for stream in streams:
# add view permissions to specified teams to current stream
client.to_stream(stream.get("id"), "view", teams, token)
@entrypoint.command()
@click.option("-h", "--host",
required=True,
envvar="GRAYLOG_HOST",
type=str,
help="graylog host")
@click.option("-t", "--token",
required=True,
envvar="GRAYLOG_TOKEN",
type=str,
help="graylog API token")
@click.option("-a", "--all",
is_flag=True,
type=bool,
help="all streams")
@click.option("-s", "--stream-names",
required=False,
type=str,
help="comma-separated list of streams")
@click.argument("team-names", nargs=-1)
def from_stream(host: str,
token: str,
all: bool,
stream_names: str,
team_names: list):
"""unshare stream(s) with team(s)"""
# initialize graylog API client
client = GraylogAPIClient(host)
# initialize empty list of teams
teams = []
# loop over team names
for team in team_names:
# retrieve team based on the name and append to the list
teams.append(client.get_team(team, token))
# initialize empty list of streams
streams = []
# if --all flag is set
if all:
# gather list of all streams
streams = client.get_streams(token)
# else (i.e., if --stream-names is defined)
else:
# if streams were provided
if stream_names is not None:
split_streams = [s.strip() for s in stream_names.split(",")]
# for each specified stream
for stream in split_streams:
# append stream to list
streams.append(client.get_stream(stream, token))
# if --all flag is not set and no stream was provided
else:
# print error and exit
raise SystemExit("please provide streams or set the --all flag")
# loop over streams
for stream in streams:
# remove view permissions from specified teams from current stream
client.from_stream(stream.get("id"), "view", teams, token)
| 30.515267 | 76 | 0.572358 | 0 | 0 | 0 | 0 | 7,488 | 0.936585 | 0 | 0 | 3,401 | 0.425391 |
45644252af580ad3b26baf06271591b906d31ba9 | 2,803 | py | Python | disaster_recovery/actions/views.py | openstack/freezer-web-ui | ed5462edace65a69667e1f2f42cce343cc5bbeea | [
"Apache-2.0"
] | 32 | 2015-10-18T02:53:48.000Z | 2022-03-10T23:41:18.000Z | disaster_recovery/actions/views.py | stackforge/freezer-web-ui | 722ae8a051762bbd5e8bdede691d55b0c74bb1b4 | [
"Apache-2.0"
] | 1 | 2018-01-10T03:59:48.000Z | 2018-01-10T03:59:48.000Z | disaster_recovery/actions/views.py | stackforge/freezer-web-ui | 722ae8a051762bbd5e8bdede691d55b0c74bb1b4 | [
"Apache-2.0"
] | 12 | 2015-12-11T10:09:07.000Z | 2021-01-05T13:35:12.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pprint
from django.urls import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views import generic
from horizon import tables
from horizon import workflows
import disaster_recovery.api.api as freezer_api
from disaster_recovery.actions import tables as freezer_tables
from disaster_recovery.actions.workflows import action as action_workflow
from disaster_recovery.utils import shield
class IndexView(tables.DataTableView):
name = _("Actions")
slug = "actions"
table_class = freezer_tables.ActionsTable
template_name = "disaster_recovery/actions/index.html"
@shield("Unable to get actions", redirect="actions:index")
def get_data(self):
filters = self.table.get_filter_string() or None
return freezer_api.Action(self.request).list(search=filters)
class ActionView(generic.TemplateView):
template_name = 'disaster_recovery/actions/detail.html'
@shield('Unable to get action', redirect='actions:index')
def get_context_data(self, **kwargs):
action = freezer_api.Action(self.request).get(kwargs['action_id'],
json=True)
return {'data': pprint.pformat(action)}
class ActionWorkflowView(workflows.WorkflowView):
workflow_class = action_workflow.ActionWorkflow
success_url = reverse_lazy("horizon:disaster_recovery:actions:index")
def is_update(self):
return 'action_id' in self.kwargs and bool(self.kwargs['action_id'])
@shield("Unable to get job", redirect="jobs:index")
def get_initial(self):
initial = super(ActionWorkflowView, self).get_initial()
if self.is_update():
initial.update({'action_id': None})
action = freezer_api.Action(self.request).get(
self.kwargs['action_id'], json=True)
initial.update(**action['freezer_action'])
initial.update({
"mandatory": action.get('mandatory', None),
"max_retries": action.get('max_retries', None),
"max_retries_interval":
action.get('max_retries_interval', None)
})
initial.update({'action_id': action['action_id']})
return initial
| 37.878378 | 76 | 0.696397 | 1,819 | 0.648948 | 0 | 0 | 1,236 | 0.440956 | 0 | 0 | 967 | 0.344988 |
45650bfff480eaae33caf9f4aae1e7c37cec59e0 | 507 | py | Python | osf_oauth2_adapter/apps.py | enrobyn/lookit-api | 621fbb8b25100a21fd94721d39003b5d4f651dc5 | [
"MIT"
] | null | null | null | osf_oauth2_adapter/apps.py | enrobyn/lookit-api | 621fbb8b25100a21fd94721d39003b5d4f651dc5 | [
"MIT"
] | null | null | null | osf_oauth2_adapter/apps.py | enrobyn/lookit-api | 621fbb8b25100a21fd94721d39003b5d4f651dc5 | [
"MIT"
] | null | null | null | import os
from django.apps import AppConfig
class OsfOauth2AdapterConfig(AppConfig):
name = 'osf_oauth2_adapter'
# staging by default so people don't have to run OSF to use this.
osf_api_url = os.environ.get('OSF_API_URL', 'https://staging-api.osf.io').rstrip('/') + '/'
osf_accounts_url = os.environ.get('OSF_ACCOUNTS_URL', 'https://staging-accounts.osf.io').rstrip('/') + '/'
default_scopes = ['osf.users.email_read', 'osf.users.profile_read', ]
humans_group_name = 'OSF_USERS'
| 39 | 110 | 0.704142 | 459 | 0.905325 | 0 | 0 | 0 | 0 | 0 | 0 | 246 | 0.485207 |
4565a962599f606102d9b2cb87ed6c04a795800f | 3,040 | py | Python | wiktionarifier/scrape/core.py | lgessler/wiktionarifier | 7359a946f2dcef08455f9e552d6a1e3875beed63 | [
"MIT"
] | 1 | 2021-02-25T16:22:47.000Z | 2021-02-25T16:22:47.000Z | wiktionarifier/scrape/core.py | lgessler/wiktionarifier | 7359a946f2dcef08455f9e552d6a1e3875beed63 | [
"MIT"
] | null | null | null | wiktionarifier/scrape/core.py | lgessler/wiktionarifier | 7359a946f2dcef08455f9e552d6a1e3875beed63 | [
"MIT"
] | null | null | null | import os
import requests as R
import click
import wiktionarifier.scrape.db as db
def process_page(page):
if not any(" lemma" in cat.title().lower() for cat in page.categories()) or any(
"non-lemma" in cat.title().lower() for cat in page.categories()
):
click.secho("\tPage doesn't appear to be a lemma, skipping", fg="yellow")
return page.full_url(), True
mediawiki_link = str(page)
if db.mwtext_exists(mediawiki_link):
return page.full_url(), True
title = page.title()
url = page.full_url()
if "%3A" in url:
click.secho("\tPage doesn't look like a page with dictionary entries, skipping", fg="yellow")
return page.full_url(), True
file_safe_url = page.title(as_filename=True)
latest_revision = page.latest_revision
rev_id = str(latest_revision["revid"])
text = latest_revision["text"]
oldest_revision_time = page.oldest_revision.timestamp.isoformat()
latest_revision_time = latest_revision.timestamp.isoformat()
response = R.get(page.full_url())
if response.status_code != 200:
click.secho(f"\tNon-200 response from wiktionary: {response.status_code}", fg="red")
return page.full_url(), True
html = response.content
db.add_text(
mediawiki_link,
url,
rev_id,
text,
html,
title,
file_safe_url,
oldest_revision_time,
latest_revision_time,
)
return url, False
def scrape(output_dir, wiktionary_language, strategy, max_pages, overwrite):
import pywikibot
site = pywikibot.Site(code=wiktionary_language, fam="wiktionary")
site.login()
if not os.path.exists(output_dir):
click.echo(f"Output dir {output_dir} does not exist. Creating...")
os.makedirs(output_dir, exist_ok=True)
if overwrite:
click.echo(f"Removing existing database at {db.db_path(output_dir)}...")
db.remove_db(output_dir)
click.echo(f"Initializing connection to database at {db.db_path(output_dir)}")
db.initialize(output_dir)
count = db.mwtext_count()
click.echo(f"Initialized connection with {count} existing records.")
if strategy == "inorder":
last_visited = db.get_last_modified()
if last_visited is not None:
click.echo(f"Resuming scraping session beginning from {last_visited.url}...")
pages = site.allpages(start=last_visited.title if last_visited is not None else "!")
elif strategy == "random":
pages = site.randompages()
else:
raise Exception(f"Unknown scraping strategy: `{strategy}`")
for page in pages:
count = db.mwtext_count()
if count >= max_pages:
click.secho(f"Maximum page count {max_pages} reached, quitting", fg="green")
break
url, skipped = process_page(page)
if skipped:
click.secho(f"[{count}/{max_pages}] Skipping {url}", fg="yellow")
else:
click.secho(f"[{count}/{max_pages}] Processed {url}", dim=True)
| 35.348837 | 101 | 0.653289 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 753 | 0.247697 |
4566f1df5335aa685d6a7563d2d07326d10dd71f | 911 | py | Python | melange/event_serializer.py | Rydra/melange | 5de67dd4eab506353ea05f30df6d250997e3c97f | [
"MIT"
] | 7 | 2017-11-22T15:52:46.000Z | 2022-01-17T23:01:24.000Z | melange/event_serializer.py | Rydra/melange | 5de67dd4eab506353ea05f30df6d250997e3c97f | [
"MIT"
] | 1 | 2017-11-30T16:13:52.000Z | 2019-02-12T14:51:54.000Z | melange/event_serializer.py | Rydra/melange | 5de67dd4eab506353ea05f30df6d250997e3c97f | [
"MIT"
] | 3 | 2017-11-15T16:34:20.000Z | 2022-01-24T11:05:54.000Z | import json
from typing import Generic, Dict, Optional
from melange.helpers.typing import T
class MessageSerializer(Generic[T]):
"""
You need to provide a way to convert a message from your sqs
into something meaningful for your domain (e.g. into a Domain Event)
"""
def manifest(self, data: T) -> str:
return ""
def deserialize(self, data: str, manifest: Optional[str] = None) -> str:
pass
def serialize(self, data: T) -> str:
"""
Serializes and object to a string representation
"""
pass
class JsonSQSSerializer(MessageSerializer[Dict]):
def manifest(self, data: Dict):
return "json"
def deserialize(self, serialized_data: str, manifest: Optional[str] = None) -> str:
data = json.loads(serialized_data)
return data
def serialize(self, data: Dict) -> str:
return json.dumps(data)
| 25.305556 | 87 | 0.641054 | 812 | 0.891328 | 0 | 0 | 0 | 0 | 0 | 0 | 229 | 0.251372 |
45712dc508f41385cf0cbd9c2e96f1445b5789c9 | 163 | py | Python | utils/decorators/__init__.py | jemand2001/python-utils | 652d3998cb272530e42100ae844178ad7a092c8c | [
"MIT"
] | null | null | null | utils/decorators/__init__.py | jemand2001/python-utils | 652d3998cb272530e42100ae844178ad7a092c8c | [
"MIT"
] | null | null | null | utils/decorators/__init__.py | jemand2001/python-utils | 652d3998cb272530e42100ae844178ad7a092c8c | [
"MIT"
] | null | null | null | from .strict import (strict)
from .overload import (overload)
from .template import (template)
from .convert import (convert)
from .auto_slots import (auto_slots)
| 27.166667 | 36 | 0.785276 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4571f5ea9930c1e8a75a9d3d4c06b5bd761edd2d | 4,058 | py | Python | src/ebonite/ext/numpy/dataset.py | geffy/ebonite | 2d85eeca44ac1799e743bafe333887712e325060 | [
"Apache-2.0"
] | 1 | 2019-11-27T14:33:45.000Z | 2019-11-27T14:33:45.000Z | src/ebonite/ext/numpy/dataset.py | geffy/ebonite | 2d85eeca44ac1799e743bafe333887712e325060 | [
"Apache-2.0"
] | null | null | null | src/ebonite/ext/numpy/dataset.py | geffy/ebonite | 2d85eeca44ac1799e743bafe333887712e325060 | [
"Apache-2.0"
] | null | null | null | from typing import Tuple, Type
import numpy as np
from pyjackson.core import ArgList, Field
from pyjackson.generics import Serializer
from ebonite.core.analyzer.base import CanIsAMustHookMixin, TypeHookMixin
from ebonite.core.analyzer.dataset import DatasetHook
from ebonite.core.objects.dataset_type import DatasetType
from ebonite.runtime.interface.typing import ListTypeWithSpec, SizedTypedListType
def _python_type_from_np_string_repr(string_repr: str) -> type:
np_type = _np_type_from_string(string_repr)
return _python_type_from_np_type(np_type)
def _python_type_from_np_type(np_type: Type):
value = np_type()
if np_type.__module__ == 'numpy':
value = value.item()
return type(value)
def _np_type_from_string(string_repr):
try:
return getattr(np, string_repr)
except AttributeError:
raise ValueError('Unknown numpy type {}'.format(string_repr))
class NumpyNumberDatasetType(DatasetType):
"""
:class:`.DatasetType` implementation for `numpy.number` objects which
converts them to built-in Python numbers and vice versa.
:param dtype: `numpy.number` data type as string
"""
type = 'numpy_number'
def __init__(self, dtype: str):
self.dtype = dtype
def get_spec(self) -> ArgList:
return [Field(None, self.actual_type, False)]
def deserialize(self, obj: dict) -> object:
return self.actual_type(obj)
def serialize(self, instance: np.number) -> object:
return instance.item()
@property
def actual_type(self):
return _np_type_from_string(self.dtype)
class NumpyNumberHook(CanIsAMustHookMixin, DatasetHook):
"""
:class:`.DatasetHook` implementation for `numpy.number` objects which uses :class:`NumpyNumberDatasetType`.
"""
def must_process(self, obj) -> bool:
return isinstance(obj, np.number)
def process(self, obj: np.number) -> DatasetType:
return NumpyNumberDatasetType(obj.dtype.name)
class NumpyNdarrayHook(TypeHookMixin, DatasetHook):
"""
:class:`.DatasetHook` implementation for `np.ndarray` objects which uses :class:`NumpyNdarrayDatasetType`
"""
valid_types = [np.ndarray]
def process(self, obj) -> DatasetType:
return NumpyNdarrayDatasetType(obj.shape, obj.dtype.name)
class NumpyDTypeSerializer(Serializer):
"""
PyJackson :class:`.Serializer` for `numpy` data types: stores types in JSON as their names.
"""
def deserialize(self, obj: str):
return getattr(np, obj)
def serialize(self, instance) -> str:
return str(instance)
class NumpyNdarrayDatasetType(DatasetType, ListTypeWithSpec):
"""
:class:`.DatasetType` implementation for `np.ndarray` objects
which converts them to built-in Python lists and vice versa.
:param shape: shape of `numpy.ndarray` objects in dataset
:param dtype: data type of `numpy.ndarray` objects in dataset
"""
real_type = np.ndarray
type = 'numpy_ndarray'
def __init__(self, shape: Tuple[int, ...], dtype: str):
# TODO assert shape and dtypes len
self.shape = shape
self.dtype = dtype
@property
def size(self):
if len(self.shape) == 1:
return 1
else:
return self.shape[0] # TODO more dimensions
def list_size(self):
return self.shape[0]
def _get_subtype(self, shape):
if len(shape) == 0:
return _python_type_from_np_string_repr(self.dtype)
elif len(shape) == 1:
subtype = _python_type_from_np_string_repr(self.dtype)
else:
subtype = self._get_subtype(shape[1:])
return SizedTypedListType(shape[0], subtype)
def get_spec(self) -> ArgList:
return [Field(None, self._get_subtype(self.shape[1:]), False)]
def deserialize(self, obj):
return np.array(obj)
def serialize(self, instance: np.ndarray):
# if self.shape == 1:
# return [instance.tolist()] # TODO better shapes
return instance.tolist()
| 28.985714 | 111 | 0.679645 | 3,132 | 0.771809 | 0 | 0 | 238 | 0.05865 | 0 | 0 | 1,012 | 0.249384 |
457331800134bf6e126c42a54f07847df0aef032 | 179 | py | Python | where my anagrams at.py | faaiqbilal/codewars-code | decc7001134234f7ec9ffa74653e2dca5fa40ef6 | [
"MIT"
] | null | null | null | where my anagrams at.py | faaiqbilal/codewars-code | decc7001134234f7ec9ffa74653e2dca5fa40ef6 | [
"MIT"
] | null | null | null | where my anagrams at.py | faaiqbilal/codewars-code | decc7001134234f7ec9ffa74653e2dca5fa40ef6 | [
"MIT"
] | null | null | null | def anagrams(word, words):
agrams = []
s1 = sorted(word)
for i in range (0, len(words)):
s2 = sorted(words[i])
if s1 == s2:
agrams.append(words[i])
return agrams | 22.375 | 33 | 0.603352 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
45734ef76afd9e1a67dca51aafc6b99a76fd2b37 | 3,676 | py | Python | src/scenic/simulators/gta/img_modf.py | cahartsell/Scenic | 2e7979011aef426108687947668d9ba6f5439136 | [
"BSD-3-Clause"
] | 141 | 2019-03-07T07:17:19.000Z | 2022-03-19T16:15:48.000Z | src/scenic/simulators/gta/img_modf.py | cahartsell/Scenic | 2e7979011aef426108687947668d9ba6f5439136 | [
"BSD-3-Clause"
] | 27 | 2019-06-18T23:04:29.000Z | 2022-03-31T13:42:05.000Z | src/scenic/simulators/gta/img_modf.py | cahartsell/Scenic | 2e7979011aef426108687947668d9ba6f5439136 | [
"BSD-3-Clause"
] | 59 | 2019-04-08T15:20:15.000Z | 2022-03-29T07:23:26.000Z | '''
This file has basic image modification functions
'''
from PIL import Image
import cv2
from scipy.spatial import Voronoi
from itertools import product
import numpy as np
def convert_black_white(img_data=None, img_file=None, threshold=100):
assert img_data is not None or img_file is not None
if img_data is None:
img_data = Image.open(img_file)
img_copy = img_data.copy()
pixels = img_copy.load()
for j,k in product(range(img_copy.size[0]), range(img_copy.size[1])):
if (np.array(pixels[j, k][0:3]) > threshold).any():
pixels[j, k] = (255, 255, 255, 255)
else:
pixels[j,k] = (0, 0, 0, 255)
return img_copy
def get_edges(img_data=None, img_file=None, threshold=100, kernelsize=1):
assert img_data is not None or img_file is not None
if img_data is None:
img_data = Image.open(img_file)
img_copy = img_data.copy()
# Get the black and white image
img_bw = convert_black_white(img_data=img_copy, img_file=img_file,
threshold=threshold)
cv_bw = cv2.cvtColor(np.array(img_bw), cv2.COLOR_RGB2BGR)
# Detect edges using Laplacian
laplacian = cv2.Laplacian(cv_bw, cv2.CV_8U, ksize=kernelsize)
# Convert back to Pillow image
pil_lap = Image.fromarray(laplacian)
# For computing Voronoi images, we need to squeeze the RGB data to 0s and 1s
pil_squeezed = pil_lap.convert('L')
pil_squeezed_01 = pil_squeezed.point(lambda x: 0 if x < 128 else 255, '1')
return pil_squeezed_01
def voronoi_edge(img_data=None, img_file=None, threshold=100, kernelsize=1):
assert img_data is not None or img_file is not None
if img_data is None:
img_data = Image.open(img_file)
img_copy = img_data.copy()
# Get 0s and 1s of the edges
pil_squeezed_01 = get_edges(img_data=img_copy, img_file=img_file,
threshold=threshold, kernelsize=kernelsize)
# Collecting point for Voronoi edge computation
nz_elements = np.nonzero(np.asarray(pil_squeezed_01))
points = np.fliplr(np.array(nz_elements).T)
vor = Voronoi(points)
vor_x = vor.vertices.T[0]
vor_y = -vor.vertices.T[1] + img_data.size[1]
# Convert the black and white image to 0s and 1s
img_bw = convert_black_white(img_data=img_copy,
img_file=img_file, threshold=threshold)
img_bw_squeezed = img_bw.convert('L')
img_bw_01 = img_bw_squeezed.point(lambda x:0 if x< 128 else 255, '1')
pixels = img_bw_01.load()
center_x = []
center_y = []
for x, y in zip(vor_x, vor_y):
if 0 < x and x < img_data.size[0] and 0 < y and y < img_data.size[1] \
and pixels[int(x), img_data.size[1]-1 -int(y)] == 0:
center_x.append(int(x))
center_y.append(int(y))
return {'edge_image':pil_squeezed_01, 'vor_center_x': center_x,
'vor_center_y': center_y}
def plot_voronoi_plot(img_data=None, img_file=None, threshold=100, kernelsize=3,
plot_name=None):
import matplotlib.pyplot as plt
assert img_data is not None or img_file is not None
vor_results = voronoi_edge(img_data=img_data, img_file=img_file,
threshold=threshold, kernelsize=kernelsize)
xlim = vor_results['edge_image'].size[0]
ylim = vor_results['edge_image'].size[1]
x_data = vor_results['vor_center_x']
y_data = vor_results['vor_center_y']
plt.figure()
plt.scatter(x_data, y_data, s=0.5)
plt.xlim(0, xlim)
plt.ylim(0, ylim)
if plot_name is None:
plt.savefig('voronoi_fig.png')
else:
plt.savefig(plot_name+'.png')
| 33.117117 | 80 | 0.658596 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 473 | 0.128672 |
4573d78a926be44e3e975ec76045bd812f719ca9 | 1,396 | py | Python | scripts/uri_checks.py | geological-survey-of-queensland/vocabularies | b9a47ad495bb56e09ac7057e0019ce5b014272f1 | [
"CC-BY-4.0"
] | 15 | 2019-03-12T04:20:36.000Z | 2021-11-03T20:30:05.000Z | scripts/uri_checks.py | geological-survey-of-queensland/vocabularies | b9a47ad495bb56e09ac7057e0019ce5b014272f1 | [
"CC-BY-4.0"
] | 207 | 2019-02-11T13:05:56.000Z | 2022-03-29T00:57:20.000Z | scripts/uri_checks.py | geological-survey-of-queensland/vocabularies | b9a47ad495bb56e09ac7057e0019ce5b014272f1 | [
"CC-BY-4.0"
] | 9 | 2019-02-11T21:42:15.000Z | 2021-05-29T05:29:04.000Z | # This script checks the redirect status of vocabs' URIs
#
# as of 2020-06-03, all 64 vocab' URIs are resulting in successful redirects to GSQ VocPrez
import requests
import os
from os.path import join, dirname
import glob
from rdflib import Graph
from rdflib.namespace import RDF, SKOS
def get_vocab_uri_statuses():
vocabs_dir = join(dirname(os.getcwd()), "vocabularies")
vocab_uris = []
for v in sorted(glob.glob(vocabs_dir + "/*.ttl")):
g = Graph().parse(v, format="turtle")
for s, p, o in g.triples((None, RDF.type, SKOS.ConceptScheme)):
vocab_uris.append(str(s))
vocab_uri_statues = []
for vocab_uri in vocab_uris:
r = requests.head(vocab_uri)
if r.status_code == 401:
print(vocab_uri)
status = r.status_code
vocab_uri_statues.append((vocab_uri, status))
return vocab_uri_statues
if __name__ == "__main__":
vocab_uri_statuses = get_vocab_uri_statuses()
resolving = 0
unresolving = 0
total = len(vocab_uri_statuses)
for r in vocab_uri_statuses:
if r[1] == 404:
unresolving += 1
print(r[0])
elif r[1] in [302, 200]: # the only 200 is the W3C's Gregorian (months) vocab
resolving += 1
print("resolving: {}".format(resolving))
print("unresolving: {}".format(unresolving))
print("total: {}".format(total))
| 29.083333 | 91 | 0.644699 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 283 | 0.202722 |
4575d22b2678c4979e2f0e7aaa2fadd3460d6e76 | 5,164 | py | Python | src/analysis/measurement.py | rabenstefan/epp1819 | 67a46b9967cd6499a8724778246b5f289abef020 | [
"MIT"
] | null | null | null | src/analysis/measurement.py | rabenstefan/epp1819 | 67a46b9967cd6499a8724778246b5f289abef020 | [
"MIT"
] | null | null | null | src/analysis/measurement.py | rabenstefan/epp1819 | 67a46b9967cd6499a8724778246b5f289abef020 | [
"MIT"
] | null | null | null | """Measurement class: organizes measurements and parameters per factor.
MeasurementDimensionError class: exception if dimensions do not fit.
"""
import numpy as np
import pandas as pd
from scipy.stats import norm
class Measurement:
"""Organize measurement-data and parameters pertaining to the measurement
equations of one factor. Provide probabilities of factors, given that.
Instance variables:
+ *parameters* (list of dictionaries):
Each dictionary contains names 'beta1', 'beta2' (coefficients
of controls), 'z' (coefficient of factor) and 'var' (variance
of error). Each dictionary describes one measurement equation.
+ *data* (pd.DataFrame):
Has measurements for all measurement equations and additional
controls, over all observations. Has MultiIndex (caseid, period)
and columns 'control', 'control_2', 'meas1', 'meas2', 'meas3'.
Public methods:
+ marginal_probability
"""
def __init__(self, parameters, data):
"""Store parameters and measurement data for use in linear measurement
equation.
Args:
+ *parameters* (list of dictionaries):
Each dictionary contains names 'beta1', 'beta2' (coefficients
of controls), 'z' (coefficient of factor) and 'var' (variance
of error). Each dictionary describes one measurement equation.
+ *data* (pd.DataFrame):
Has MultiIndex (caseid, period) and columns 'control',
'control_2', 'meas1', 'meas2', 'meas3'.
Created class attributes:
+ *meas_res* (list of pd.DataFrame):
DataFrame with MultiIndex (caseid, period) and column that
stores the residuals of measurements given controls (and
coefficients), for each measurement equation.
+ *fac_coeff* (list of scalars):
Stores coefficient of factor for each measurement equation.
+ *variances* (list of scalars):
Stores error variances for each measurement equation.
"""
self.meas_res = []
self.fac_coeff = []
self.variances = []
controls = np.array(data.loc[:,['control', 'control_2']])
for i, param_dic in enumerate(parameters):
eq_nr = str(i+1)
betas = np.array([param_dic['beta1'], param_dic['beta2']])
meas = np.array(data['meas'+eq_nr])
#Generate residuals of measurements given controls.
resid = meas - np.matmul(controls, betas)
self.meas_res.append(
pd.DataFrame(
data = resid,
index = data.index,
columns = ['res'+eq_nr]
)
)
#Store coefficients and variances in lists.
self.fac_coeff.append(param_dic['z'])
self.variances.append(param_dic['var'])
def _density(self, x, var):
"""Return value of density evaluated at x.
Args:
+ *x* (np.ndarray): matrix of values
+ *var* (scalar): variance of normal density
Returns:
+ np.ndarray of normal densities at x
"""
return norm.pdf(x, scale = np.sqrt(var))
def marginal_probability(self, factors, period):
"""Returns marginal (since density-values are returned) probability of
factors, given measurements, for one period.
Args:
+ *factors* (np.ndarray):
Array with shape NxM, where N is number of observations and M
is number of factors per period and observation.
+ *period* (integer):
number of period, starting at 1
Returns:
+ marginal probabilities (np.ndarray):
Array with shape NxM, filled with density-values of the factors
at the according indices.
"""
nr_obs, nr_facs = factors.shape
marginals = np.ones((nr_obs, nr_facs))
# Calculate densities for each measurement equation.
for i, var in enumerate(self.variances):
meas = self.meas_res[i].xs(period, level = 1)
if (meas.empty) or (nr_obs != meas.shape[0]):
raise MeasurementDimensionError
x = (
np.repeat(meas.values, nr_facs, axis = 1)
- self.fac_coeff[i]*factors
)
marginals *= self._density(x, var)
return marginals
class MeasurementDimensionError(Exception):
def __str__(self):
return (
"Measurements are not available for this number of " +
"observations and / or periods."
) | 40.031008 | 79 | 0.540279 | 4,913 | 0.951394 | 0 | 0 | 0 | 0 | 0 | 0 | 3,239 | 0.627227 |
457699cdcae4bafe41114c01a0f125a7668be5fb | 5,823 | py | Python | tests/processors/test_processors.py | foxis/EasyVision | ffb2ce1a93fdace39c6bcc13c8ae518cec76919e | [
"MIT",
"BSD-3-Clause"
] | 7 | 2018-12-27T07:45:31.000Z | 2021-06-17T03:49:15.000Z | tests/processors/test_processors.py | itohio/EasyVision | de6a4bb9160cd08278ae9c5738497132a4cd3202 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | tests/processors/test_processors.py | itohio/EasyVision | de6a4bb9160cd08278ae9c5738497132a4cd3202 | [
"MIT",
"BSD-3-Clause"
] | 3 | 2019-08-21T03:36:56.000Z | 2021-10-08T16:12:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from pytest import raises, approx
from EasyVision.vision.base import *
from EasyVision.processors.base import *
from tests.common import VisionSubclass, ProcessorA, ProcessorB
@pytest.mark.main
def test_abstract():
with raises(TypeError):
ProcessorBase()
@pytest.mark.main
def test_implementation():
source = VisionSubclass()
pr = ProcessorA(source)
assert(pr.source is source)
@pytest.mark.main
def test_capture():
vision = VisionSubclass(0)
with ProcessorA(vision) as processor:
img = processor.capture()
assert(isinstance(img, Frame))
assert(img.images[0].source is processor)
assert(img.images[0].image == "AN IMAGE")
@pytest.mark.main
def test_capture_disabled():
vision = VisionSubclass(0)
with ProcessorA(vision, enabled=False) as processor:
img = processor.capture()
assert(isinstance(img, Frame))
assert(img.images[0].source is vision)
assert(img.images[0].image == "an image")
@pytest.mark.main
def test_capture_append():
vision = VisionSubclass(0)
with ProcessorA(vision, append=True) as processor:
img = processor.capture()
assert(isinstance(img, Frame))
assert(img.images[0].source is vision)
assert(img.images[0].image == "an image")
assert(img.images[1].source is processor)
assert(img.images[1].image == "AN IMAGE")
@pytest.mark.main
def test_capture_mask_images():
vision = VisionSubclass(0, num_images=2, processor_mask="10")
with ProcessorA(vision) as processor:
img = processor.capture()
assert(isinstance(img, Frame))
assert(img.images[0].source is processor)
assert(img.images[0].image == "AN IMAGE")
assert(img.images[1].source is vision)
assert(img.images[1].image == "an image1")
@pytest.mark.main
def test_capture_mask_processor():
vision = VisionSubclass(0, num_images=2)
with ProcessorA(vision, processor_mask="01") as processor:
img = processor.capture()
assert(isinstance(img, Frame))
assert(img.images[0].source is vision)
assert(img.images[0].image == "an image")
assert(img.images[1].source is processor)
assert(img.images[1].image == "AN IMAGE1")
@pytest.mark.main
def test_capture_mask_processor_override():
vision = VisionSubclass(0, num_images=2, processor_mask="10")
with ProcessorA(vision, processor_mask="01") as processor:
img = processor.capture()
assert(isinstance(img, Frame))
assert(img.images[0].source is vision)
assert(img.images[0].image == "an image")
assert(img.images[1].source is processor)
assert(img.images[1].image == "AN IMAGE1")
@pytest.mark.main
def test_capture_mask_processor_override_append():
vision = VisionSubclass(0, num_images=2, processor_mask="10")
with ProcessorA(vision, append=True, processor_mask="01") as processor:
img = processor.capture()
assert(isinstance(img, Frame))
assert(img.images[0].source is vision)
assert(img.images[0].image == "an image")
assert(img.images[1].source is vision)
assert(img.images[1].image == "an image1")
assert(img.images[2].source is processor)
assert(img.images[2].image == "AN IMAGE1")
@pytest.mark.main
def test_capture_incorrect():
vision = VisionSubclass(0)
processor = ProcessorA(vision)
with raises(AssertionError):
processor.capture()
@pytest.mark.main
def test_capture_stacked_incorrect():
vision = VisionSubclass("Test")
processorA = ProcessorA(vision)
processorB = ProcessorB(processorA)
assert(processorB.name == "ProcessorB <- ProcessorA <- Test")
with raises(AssertionError):
processorB.capture()
@pytest.mark.main
def test_capture_stacked():
vision = VisionSubclass("Test")
processorA = ProcessorA(vision)
processorB = ProcessorB(processorA)
assert(processorB.name == "ProcessorB <- ProcessorA <- Test")
with processorB as processor:
img = processor.capture()
assert(isinstance(img, Frame))
assert(img.images[0].source is processorB)
assert(img.images[0].image == "An Image")
assert(processorB.get_source('VisionSubclass') is vision)
assert(processorB.get_source('ProcessorA') is processorA)
assert(processorB.get_source('ProcessorB') is processorB)
assert(processorB.get_source('Test no') is None)
@pytest.mark.main
def test_method_resolution():
vision = VisionSubclass("Test")
processorA = ProcessorA(vision)
processorB = ProcessorB(processorA)
assert(processorB.name == "ProcessorB <- ProcessorA <- Test")
assert(not vision.camera_called)
assert(processorB.camera_())
assert(processorB._camera_called)
assert(vision._camera_called)
@pytest.mark.main
def test_processor_properties():
vision = VisionSubclass("Test")
processorA = ProcessorA(vision)
processorB = ProcessorB(processorA)
with processorB as s:
assert(s.autoexposure is None)
assert(s.autofocus is None)
assert(s.autowhitebalance is None)
assert(s.autogain is None)
assert(s.exposure is None)
assert(s.focus is None)
assert(s.whitebalance is None)
s.autoexposure = 1
s.autofocus = 2
s.autowhitebalance = 3
s.autogain = 4
s.exposure = 5
s.focus = 6
s.whitebalance = 7
assert(vision.autoexposure == 1)
assert(vision.autofocus == 2)
assert(vision.autowhitebalance == 3)
assert(vision.autogain == 4)
assert(vision.exposure == 5)
assert(vision.focus == 6)
assert(vision.whitebalance == 7)
| 29.558376 | 75 | 0.666667 | 0 | 0 | 0 | 0 | 5,544 | 0.952087 | 0 | 0 | 388 | 0.066632 |
45770058508f21a3a4be648d911aa80a191fe520 | 5,858 | py | Python | utils/ErrorMetrics.py | caozidong/Depth-Completion | a4d95cd33f29c5c8610fc8f40dd3b1fc81186143 | [
"Apache-2.0"
] | 5 | 2021-01-19T13:59:14.000Z | 2021-12-01T12:09:01.000Z | utils/ErrorMetrics.py | caozidong/Depth-Completion | a4d95cd33f29c5c8610fc8f40dd3b1fc81186143 | [
"Apache-2.0"
] | null | null | null | utils/ErrorMetrics.py | caozidong/Depth-Completion | a4d95cd33f29c5c8610fc8f40dd3b1fc81186143 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class MAE(nn.Module):
def __init__(self):
super(MAE, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = (target > 0).float().cuda()
err = torch.abs(target * val_pixels - outputs * val_pixels)
loss = torch.sum(err.view(err.size(0), 1, -1), -1, keepdim=True)
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1, keepdim=True)
return torch.mean(loss / cnt)
class RMSE(nn.Module):
def __init__(self):
super(RMSE, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = (target > 0).float().cuda()
err = (target * val_pixels - outputs * val_pixels) ** 2
loss = torch.sum(err.view(err.size(0), 1, -1), -1, keepdim=True)
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1, keepdim=True)
return torch.mean(torch.sqrt(loss / cnt))
class Deltas(nn.Module):
def __init__(self):
super(Deltas, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = (target > 0).float().cuda()
rel = torch.max((target * val_pixels) / (outputs * val_pixels + 1e-3),
(outputs * val_pixels) / (target * val_pixels))
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1, keepdim=True)
def del_i(i):
r = (rel < 1.01 ** i).float()
delta = torch.sum(r.view(r.size(0), 1, -1), -1, keepdim=True) / cnt
return torch.mean(delta)
return del_i(1), del_i(2), del_i(3)
class Huber(nn.Module):
def __init__(self):
super(Huber, self).__init__()
def forward(self, outputs, target, delta=5):
l1_loss = F.l1_loss(outputs, target, reduce=False)
mse_loss = F.mse_loss(outputs, target, reduce=False)
mask = (l1_loss < delta).float()
loss = (0.5 * mse_loss) * mask + delta * (l1_loss - 0.5 * delta) * (1 - mask)
return torch.mean(loss)
class EPE_metric(nn.Module):
def __init__(self):
super(EPE_metric, self).__init__()
def forward(self, outputs, target):
mask = (target > 0)
outputs, target = outputs[mask], target[mask]
err = torch.abs(target - outputs)
loss = torch.mean(err)
return loss
class D1_metric(nn.Module):
def __init__(self):
super(D1_metric, self).__init__()
def forward(self, outputs, target):
mask = (target > 0)
outputs, target = outputs[mask], target[mask]
E = torch.abs(outputs - target)
# err_mask = (E > 3) & (E / target.abs() > 0.05)
err_mask = (E > 3)
return torch.mean(err_mask.float())
class Thres_metric(nn.Module):
def __init__(self):
super(Thres_metric, self).__init__()
def forward(self, outputs, target):
mask_tar = (target > 0)
mask_out = (outputs > 0)
mask = mask_tar * mask_out
# mask = (target > 0)
thres = 3
assert isinstance(thres, (int, float))
outputs, target = outputs[mask], target[mask]
E = torch.abs(target - outputs)
err_mask = (E > thres)
return torch.mean(err_mask.float())
class Deltas_Paint(nn.Module):
def __init__(self):
super(Deltas_Paint, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = (target > 0).float().cuda()
rel = torch.max((target * val_pixels) / (outputs * val_pixels + 1e-3),
(outputs * val_pixels) / (target * val_pixels))
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1, keepdim=True)
def del_i(i):
r = (rel < 1.25 ** i).float()
delta = torch.sum(r.view(r.size(0), 1, -1), -1, keepdim=True) / cnt
return torch.mean(delta)
def del_j(i):
r = (rel < i).float()
delta = torch.sum(r.view(r.size(0), 1, -1), -1, keepdim=True) / cnt
return torch.mean(delta)
return del_j(1.05), del_j(1.10), del_i(1), del_i(2), del_i(3), cnt
class SSIM_Metric(nn.Module):
def __init__(self):
super(SSIM_Metric, self).__init__()
def forward(self, x, y):
C1 = 0.01 ** 2
C2 = 0.03 ** 2
mu_x = F.avg_pool2d(x, 3, 1, 1)
mu_y = F.avg_pool2d(y, 3, 1, 1)
sigma_x = F.avg_pool2d(x ** 2, 3, 1, 1) - mu_x ** 2
sigma_y = F.avg_pool2d(y ** 2, 3, 1, 1) - mu_y ** 2
sigma_xy = F.avg_pool2d(x * y, 3, 1, 1) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + C1) * (2 * sigma_xy + C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + C1) * (sigma_x + sigma_y + C2)
SSIM = SSIM_n / SSIM_d
# return torch.clamp((1 - SSIM) / 2, 0, 1)
return SSIM.mean(), torch.tensor([torch.numel(x)])
class MAE_Paint(nn.Module):
def __init__(self):
super(MAE_Paint, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = (target > 0).float().cuda()
err = torch.abs(target * val_pixels - outputs * val_pixels)
loss = torch.sum(err.view(err.size(0), 1, -1), -1, keepdim=True)
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1, keepdim=True)
return torch.mean(loss / torch.numel(outputs)), torch.tensor([torch.numel(outputs)])
class RMSE_Paint(nn.Module):
def __init__(self):
super(RMSE_Paint, self).__init__()
def forward(self, outputs, target, *args):
val_pixels = (target > 0).float().cuda()
err = (target * val_pixels - outputs * val_pixels) ** 2
loss = torch.sum(err.view(err.size(0), 1, -1), -1, keepdim=True)
cnt = torch.sum(val_pixels.view(val_pixels.size(0), 1, -1), -1, keepdim=True)
return (loss / torch.numel(outputs)), torch.tensor([torch.numel(outputs)]) | 35.50303 | 92 | 0.575111 | 5,751 | 0.981734 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.018948 |
45788c6b217430689dd700b6c88fe7ce35978163 | 1,445 | py | Python | main.py | jagadeesh-vinnakota/iiot_health | 315d8e572d93cea76e235d440f6299b585a0e98c | [
"MIT"
] | null | null | null | main.py | jagadeesh-vinnakota/iiot_health | 315d8e572d93cea76e235d440f6299b585a0e98c | [
"MIT"
] | null | null | null | main.py | jagadeesh-vinnakota/iiot_health | 315d8e572d93cea76e235d440f6299b585a0e98c | [
"MIT"
] | null | null | null | import dash
import dash_core_components as dcc
import dash_html_components as html
from data_gather import plot_line_graph
from run_save_model import predict_line_graph, train_save_load
from generating_data import generate_sensors_data
# generating sensors data
generate_sensors_data()
train_save_load()
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
html.Div([
html.Div([
dcc.Graph(id='g1', figure=plot_line_graph(warehouse_name='wh1', warehouse_zone='zone1', title="Gerogia warehouse zone 1 present health"))
], className="six columns"),
html.Div([
dcc.Graph(id='g2', figure=predict_line_graph(warehouse_name='wh1', warehouse_zone='zone1', title="Gerogia warehouse zone 1 future health"))
], className="six columns"),
], className="row"),
html.Div([
html.Div([
dcc.Graph(id='g3', figure=plot_line_graph(warehouse_name='wh1', warehouse_zone='zone2', title="Georgia warehouse zone 2 present health"))
], className="six columns"),
html.Div([
dcc.Graph(id='g4', figure=predict_line_graph(warehouse_name='wh1', warehouse_zone='zone2', title="Georgia warehouse zone 2 future health"))
], className="six columns"),
], className="row")
])
if __name__ == '__main__':
app.run_server(debug=False) | 35.243902 | 151 | 0.703806 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 367 | 0.253979 |
4578fe4c6cbb19782f80ec6f027b88bca5feadb5 | 5,131 | py | Python | what_is_code.py | ccostino/what-is-code | 0147113e0193d6ade91e6ae054cec9ff6939b0ab | [
"Apache-2.0"
] | 1 | 2015-06-19T16:37:27.000Z | 2015-06-19T16:37:27.000Z | what_is_code.py | ccostino/what-is-code | 0147113e0193d6ade91e6ae054cec9ff6939b0ab | [
"Apache-2.0"
] | null | null | null | what_is_code.py | ccostino/what-is-code | 0147113e0193d6ade91e6ae054cec9ff6939b0ab | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
WhatIsCode
This extremely simple script was an inspiration driven by a combination of
Paul Ford's article on Bloomberg Business Week, "What is Code?"[1] and
Haddaway's song, "What is Love"[2]. It is probably best enjoyed while
watching an 8-bit demake of the song[3], or 16-bit if you prefer[4]. :-)
[1] http://www.bloomberg.com/graphics/2015-paul-ford-what-is-code/
[2] https://en.wikipedia.org/wiki/What_Is_Love_%28Haddaway_song%29
[3] https://www.youtube.com/watch?v=CT8t_1JXWn8
[4] https://www.youtube.com/watch?v=I2ufcU7I9-I
Usage Instructions/Examples:
In a REPL or other program:
# Import the module.
from what_is_code import WhatIsCode
# Instantiate the object.
song = WhatIsCode()
# Output the song.
song.sing()
As a standalone script on the shell:
Make sure the script is executable:
chmod +x what_is_code.py
Run the script:
./what_is_code.py
Enjoy! :-)
Copyright 2015 Carlo Costino
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class WhatIsCode(object):
def __init__(self):
"""
Initialize the main refrain and verses used throughout the song.
"""
self.main_refrain = [
'What is code?',
'Program don\'t crash now',
'Don\'t crash now, again',
]
self.first_verse = [
'I don\'t know why you don\'t work',
'I type things in right, but you just fail',
'So what is True?',
'And what is False?',
'Gimme a break',
]
self.second_verse = [
'Oh, I give up, nothing I do',
'Will work in this app, that I called foo',
'I wrote some tests',
'And they all pass',
'But my app fails',
]
self.third_verse = [
'Can you please just work, just work as I want',
'This is insane, wasted time',
'I thought this was easy, I though this was simple',
'Is it code?',
]
@property
def secondary_refrain(self):
"""
The secondary refrain is just the last two parts of the main refrain.
"""
return self.main_refrain[1:]
@property
def tertiary_refrain(self):
"""
A tertiary refrain appears once toward the end of the song made up of
a substring of the last line in the main refrain.
"""
return [self.main_refrain[2][:-7]] * 2
@property
def what_is_code(self):
"""
"What is code?" is repeated many times throughout the song, so let's
make it easy to repeat on its own.
"""
return self.main_refrain[0]
def sing_line(self, line=''):
"""
Outputs a single string with a newline character at the end for proper
formatting.
"""
return '%s\n' % line
def sing_lines(self, lines=[]):
"""
Outputs a list of strings with a newline character at the end of each
string, including the last one.
"""
return '%s\n' % '\n'.join(lines)
def sing(self):
"""
Sings the entire song by printing out every line found within it.
Yes, this is brute force and somewhat ugly, but also simple.
"""
print(self.sing_lines(self.main_refrain))
print(self.sing_lines(self.secondary_refrain))
print(self.sing_line(self.what_is_code))
print('Damn it\n')
print(self.sing_lines(self.first_verse))
print(self.sing_lines(self.main_refrain))
print(self.sing_lines(self.main_refrain))
print(self.sing_lines(self.second_verse))
print(self.sing_lines(self.main_refrain))
print(self.sing_lines(self.main_refrain))
print(self.sing_line(self.what_is_code))
print(self.sing_line(self.what_is_code))
print(self.sing_lines(self.main_refrain))
print(self.sing_lines(self.tertiary_refrain))
print(self.sing_lines(self.third_verse))
print(self.sing_lines(self.main_refrain))
print(self.sing_lines(self.main_refrain))
print('Damn\n')
print(self.sing_lines(self.main_refrain))
print(self.sing_lines(self.main_refrain))
print(self.sing_lines(self.secondary_refrain))
print(self.sing_lines(self.secondary_refrain))
print(self.sing_line(self.what_is_code))
# If this module is being run as a standalone script, output the song
# immediately.
if __name__ == '__main__':
song = WhatIsCode()
song.sing()
| 28.988701 | 78 | 0.62327 | 3,397 | 0.662054 | 0 | 0 | 649 | 0.126486 | 0 | 0 | 3,043 | 0.593062 |
45796c368191af808f0981f9aa041b0bcb8462f3 | 10,651 | py | Python | frcnn/lib/datasets/cocoatts.py | visinf/style-seqcvae | c51dfeaf0e3c1e25dee19cbd20df2004d027fdee | [
"Apache-2.0"
] | null | null | null | frcnn/lib/datasets/cocoatts.py | visinf/style-seqcvae | c51dfeaf0e3c1e25dee19cbd20df2004d027fdee | [
"Apache-2.0"
] | null | null | null | frcnn/lib/datasets/cocoatts.py | visinf/style-seqcvae | c51dfeaf0e3c1e25dee19cbd20df2004d027fdee | [
"Apache-2.0"
] | null | null | null | import os
import pickle
import numpy as np
from datasets.config_attrib_selection import attrib_selection
def save_obj(obj, path):
with open(path, 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(path):
with open(path, 'rb') as f:
return pickle.load(f)
class CocoAttributesReader(object):
def __init__(self, attribs_dir_path: str):
self.attrib_weight_threshold = 0.1
self.attrib_min_appearance = 0
self.attribs_n_max_per_image = 200
# ATTENTION: image2obj_insts, obj_inst2attrib_inst, attrib_inst2attrib_vector still contain elements not appearing in image list
result_read_attributes = self.read_attributes(attribs_dir_path)
self.image_ids = set(result_read_attributes[0])
self.image2obj_insts = result_read_attributes[1]
self.obj_inst2attrib_inst = result_read_attributes[2]
self.attrib_inst2attrib_vector = result_read_attributes[3]
self.ignore_attrib_indices = result_read_attributes[4]
self.attrib_names = result_read_attributes[5]
self.attrib_image_count = result_read_attributes[6]
self.attrib2attrib_inst_count = result_read_attributes[7]
self.n_attribs = len(self.attrib_names)
self.att_counts = np.zeros(self.n_attribs)
for k,v in self.attrib2attrib_inst_count.items():
self.att_counts[k] = v
self.obj_inst2obj_id = load_obj(os.path.join(attribs_dir_path, "obj_inst2obj_id.pkl"))
self.obj_id2obj_name = load_obj(os.path.join(attribs_dir_path, "obj_id2obj_name.pkl"))
def __len__(self) -> int:
return len(self.image_ids)
def __getitem__(self, image_id: int):
obj_insts = self.image2obj_insts[image_id]
#print(obj_insts)
result = []
for obj_inst in obj_insts:
if(obj_inst in self.obj_inst2attrib_inst):
attrib_inst = self.obj_inst2attrib_inst[obj_inst]
try:
attrib_vec = self.attrib_inst2attrib_vector[attrib_inst]
#result.append([obj_inst, attrib_vec]) # attribs as sparse arrays
result.append([obj_inst, list(np.nonzero(attrib_vec)[0])]) # attribs as indizes
#if(attrib_vec.sum() > 0):
#result.append([obj_inst, [self.attrib_names[x] for x in np.nonzero(attrib_vec)[0]]]) # attribs as strings
#result.append([obj_inst, [[self.attrib_names[x], attrib_vec[x]] for x in np.nonzero(attrib_vec)[0]]])
except:
pass
return result
def filter_duplicates(self, result):
result_filtered = {}
for obj in result:
if(obj[0] not in result_filtered):
result_filtered[obj[0]] = obj[1]
else:
result_filtered_atts = [a[0] for a in result_filtered[obj[0]]]
for attrib in obj[1]:
try:
idx = result_filtered_atts.index(attrib[0])
result_filtered[obj[0]][idx][1] = max(result_filtered[obj[0]][idx][1], attrib[1])
except ValueError:
result_filtered[obj[0]].append(attrib)
return [[key, value] for key, value in result_filtered.items()]
def read_attributes(self, attribs_dir_path, ignore_attrib_indices=None):
attrib_inst2attrib_vector = load_obj(os.path.join(attribs_dir_path, "attrib_inst2attrib_vector.pkl"))
attrib_inst2obj_inst = load_obj(os.path.join(attribs_dir_path, "attrib_inst2obj_inst.pkl"))
obj_inst2attrib_inst = load_obj(os.path.join(attribs_dir_path, "obj_inst2attrib_inst.pkl"))
obj_inst2image = load_obj(os.path.join(attribs_dir_path, "obj_inst2image.pkl"))
image2obj_insts = load_obj(os.path.join(attribs_dir_path, "image2obj_insts.pkl"))
attrib2string = load_obj(os.path.join(attribs_dir_path, "attrib2string.pkl"))
attrib_names = []
for key in sorted(attrib2string.keys()):
attrib_names.append(attrib2string[key])
# =============================================================================
# for a in attrib_names:
# b = a.split(" ")[-1]
# if not b:
# b = a.split(" ")[-2]
# print(b + " " + b + " ")#, a.split(" "))
# =============================================================================
# remove ignored attributes from attribute name list
attrib_selection_list = np.array(list(attrib_selection.values()), dtype=int)
attrib_ignore_selection_idxs = np.argwhere(attrib_selection_list == 0)
attrib_names = np.delete(attrib_names, attrib_ignore_selection_idxs).tolist()
attrib2attrib_inst_count = {}
attrib_image_count = {}
attrib2images = {}
for att_id, atts in list(attrib_inst2attrib_vector.items()):
instance_id = attrib_inst2obj_inst[att_id]
try:
coco_id = obj_inst2image[instance_id]
except:
del attrib_inst2attrib_vector[att_id]
continue
# remove ignored attributes from attribute arrays
atts = np.delete(atts, attrib_ignore_selection_idxs)
#atts = (atts * attrib_selection_list)
idxs_larger = np.argwhere(atts >= self.attrib_weight_threshold)
idxs_larger = [idx[0] for idx in idxs_larger]
idxs_too_small = atts < self.attrib_weight_threshold
# set attribute values in attribute array to zero if smaller than threshold
atts[idxs_too_small] = 0.0
attrib_inst2attrib_vector[att_id] = atts
# add larger attributes to count dict and attrib2images dict
for idx in idxs_larger:
if(idx not in attrib2attrib_inst_count):
attrib2attrib_inst_count[idx] = 1
else:
attrib2attrib_inst_count[idx] += 1
if(idx not in attrib2images):
attrib2images[idx] = {coco_id}
else:
attrib2images[idx].add(coco_id)
# generate image count dict for attribute appearance
for att_id, image_ids in attrib2images.items():
attrib_image_count[att_id] = len(image_ids)
# detect attributes with count lower than threshold
if(ignore_attrib_indices is None):
ignore_attrib_indices = []
for att_id, count in attrib_image_count.items():
if(count < self.attrib_min_appearance):
ignore_attrib_indices.append([att_id])
elif(not ignore_attrib_indices):
raise ValueError("no ignore_attrib_indices is given.")
attrib_names = np.delete(attrib_names, ignore_attrib_indices).tolist()
for image_id, obj_insts in image2obj_insts.items():
attrib_insts = []
for obj_inst in obj_insts:
if(obj_inst in obj_inst2attrib_inst):
attrib_insts.append(obj_inst2attrib_inst[obj_inst])
attrib_vectors = []
rem_list = []
for attrib_inst in attrib_insts:
if(attrib_inst in attrib_inst2attrib_vector):
attrib_vectors.append(attrib_inst2attrib_vector[attrib_inst])
else:
rem_list.append(attrib_inst)
for attrib_inst in rem_list:
attrib_insts.remove(attrib_inst)
atts = np.sum(attrib_vectors, axis=0)
idxs_larger = np.argwhere(atts > 0)
idxs_larger = [idx[0] for idx in idxs_larger]
n_attribs = min(len(idxs_larger), self.attribs_n_max_per_image)
atts_count = np.ones(atts.shape) * 99999
for idx in idxs_larger:
atts_count[idx] = attrib_image_count[idx]
final_attribs_idxs = np.argsort(atts_count)[:n_attribs]
for attrib_inst in attrib_insts:
atts_new = np.zeros(atts.shape)
for idx in final_attribs_idxs:
atts_new[idx] = attrib_inst2attrib_vector[attrib_inst][idx]
attrib_inst2attrib_vector[attrib_inst] = atts_new
# remove attributes from dicts which appear in less than config.attrib_min_appearance
attrib2attrib_inst_count = {}
attrib2images = {}
for att_id, atts in attrib_inst2attrib_vector.items():
instance_id = attrib_inst2obj_inst[att_id]
coco_id = obj_inst2image[instance_id]
atts = np.delete(atts, ignore_attrib_indices)
attrib_inst2attrib_vector[att_id] = atts
idxs_larger = np.argwhere(atts > 0)
idxs_larger = [idx[0] for idx in idxs_larger]
for idx in idxs_larger:
if(idx not in attrib2attrib_inst_count):
attrib2attrib_inst_count[idx] = 1
else:
attrib2attrib_inst_count[idx] += 1
if(idx not in attrib2images):
attrib2images[idx] = {coco_id}
else:
attrib2images[idx].add(coco_id)
attrib_image_count = {}
for att_id, image_ids in attrib2images.items():
attrib_image_count[att_id] = len(image_ids)
# extract image id list only containing images with not ignored attributes and containing at leat one attribute
image_ids = set(image_id for set_ in attrib2images.values() for image_id in set_)
# ATTENTION: image2obj_insts, obj_inst2attrib_inst, attrib_inst2attrib_vector still contain elements not appearing in image list
return list(image_ids), image2obj_insts, obj_inst2attrib_inst, attrib_inst2attrib_vector, ignore_attrib_indices, attrib_names, attrib_image_count, attrib2attrib_inst_count
| 37.903915 | 179 | 0.5745 | 10,153 | 0.953244 | 0 | 0 | 0 | 0 | 0 | 0 | 1,751 | 0.164398 |
457a13fcc5619e763cff2978c285e3ac7191dd4c | 349 | py | Python | easy/count-as-i-count/main.py | khanh-alice/codingame-python | 3d84361db2e3371104db8d9befcf4dbb47f6ac6e | [
"Apache-2.0"
] | null | null | null | easy/count-as-i-count/main.py | khanh-alice/codingame-python | 3d84361db2e3371104db8d9befcf4dbb47f6ac6e | [
"Apache-2.0"
] | null | null | null | easy/count-as-i-count/main.py | khanh-alice/codingame-python | 3d84361db2e3371104db8d9befcf4dbb47f6ac6e | [
"Apache-2.0"
] | null | null | null | initialScore = int(input())
def count_solutions(score, turn):
if score > 50 or turn > 4:
return 0
if score == 50:
return 1
result = count_solutions(score + 1, turn + 1)
for i in range(2, 13):
result += 2 * count_solutions(score + i, turn + 1)
return result
print(count_solutions(initialScore, 0))
| 17.45 | 58 | 0.601719 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
457a7a556af7a4b160da5ec709717168732be108 | 559 | py | Python | python/udp_socket_emit.py | draconicfae/godot_daydream_controller | 81b6b78583264c9e5d383fe7691a6836deaea7a7 | [
"MIT"
] | null | null | null | python/udp_socket_emit.py | draconicfae/godot_daydream_controller | 81b6b78583264c9e5d383fe7691a6836deaea7a7 | [
"MIT"
] | null | null | null | python/udp_socket_emit.py | draconicfae/godot_daydream_controller | 81b6b78583264c9e5d383fe7691a6836deaea7a7 | [
"MIT"
] | null | null | null | import socket
import json
class udp_emit:
def __init__(self, host, port):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.connect((host, port))
def emit(self, datadict):
try:
self.sock.sendall(json.dumps(datadict).encode())
except:
pass
#uncomment if you want to be notified
#print("cannot send data over udp socket, the destination is either not listening yet or is refusing to connect. Check to see if it's running yet.")
| 34.9375 | 161 | 0.617174 | 531 | 0.949911 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.332737 |
457b9730f9e55cce329d265159d8b2efc07ce3bc | 133 | py | Python | recuEuclid.py | azyxb/info | d34555abe55895751272e0ad129c7fb79f9613b0 | [
"MIT"
] | 2 | 2019-12-14T10:54:38.000Z | 2020-03-30T22:57:11.000Z | recuEuclid.py | azyxb/info | d34555abe55895751272e0ad129c7fb79f9613b0 | [
"MIT"
] | null | null | null | recuEuclid.py | azyxb/info | d34555abe55895751272e0ad129c7fb79f9613b0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
def gcd(a, b):
if a == 0 :
return b
return gcd(b%a, a)
a = 12000
b = 8642
print(gcd(a, b))
| 12.090909 | 23 | 0.511278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.165414 |
457bd8f1d4de17115e3f02e7cca8a1fd7c15124e | 4,065 | py | Python | distributed.py | SagaFav/etlpy | 5cc98ecc2b8e6590a7410195174dbe85beeb4927 | [
"Apache-2.0"
] | 448 | 2016-03-17T13:00:57.000Z | 2022-02-17T06:00:25.000Z | distributed.py | Hanfee/etlpy | 5cc98ecc2b8e6590a7410195174dbe85beeb4927 | [
"Apache-2.0"
] | 6 | 2016-03-17T12:06:25.000Z | 2019-08-31T01:12:18.000Z | distributed.py | Hanfee/etlpy | 5cc98ecc2b8e6590a7410195174dbe85beeb4927 | [
"Apache-2.0"
] | 204 | 2016-03-17T04:00:00.000Z | 2022-03-22T10:28:05.000Z | import sys;
from queue import Queue
from multiprocessing.managers import BaseManager
import etl;
import json
import extends;
import time;
authkey= "etlpy".encode('utf-8')
timeout=1;
rpc_port=8888
class ETLJob:
def __init__(self,project,jobname,config,id):
self.project= project;
self.jobname=jobname;
self.config=config;
self.id= id;
class JobResult:
def __init__(self,name,count,id):
self.name=name;
self.count=count;
self.id=id;
class Master:
def __init__(self,project,jobname):
# 派发出去的作业队列
self.dispatched_job_queue = Queue()
# 完成的作业队列
self.finished_job_queue = Queue()
self.project= project;
self.jobname=jobname;
self.maxprocess= 10;
def get_dispatched_job_queue(self):
return self.dispatched_job_queue
def get_finished_job_queue(self):
return self.finished_job_queue
def start(self,skip=0):
# 把派发作业队列和完成作业队列注册到网络上
BaseManager.register('get_dispatched_job_queue', callable=self.get_dispatched_job_queue)
BaseManager.register('get_finished_job_queue', callable=self.get_finished_job_queue)
# 监听端口和启动服务
manager = BaseManager(address=('0.0.0.0', rpc_port), authkey=authkey)
manager.start()
# 使用上面注册的方法获取队列
dispatched_jobs = manager.get_dispatched_job_queue()
finished_jobs = manager.get_finished_job_queue()
job_id = 0
module= self.project.modules[self.jobname];
proj=json.loads(json.dumps(etl.convert_dict(self.project,self.project.__defaultdict__), ensure_ascii=False))
while True:
for task in etl.parallel_map(module):
job_id = job_id + 1
if job_id<skip:
continue
job = ETLJob(proj, self.jobname, task, job_id);
print('Dispatch job: %s' % job.id)
dispatched_jobs.put(job)
while not dispatched_jobs.empty():
job = finished_jobs.get(60)
print('Finished Job: %s, Count: %s' % (job.id, job.count))
key=input('press any key to repeat,c to cancel')
if key=='c':
manager.shutdown()
break
#manager.shutdown()
class Slave:
def __init__(self):
# 派发出去的作业队列
self.dispatched_job_queue = Queue()
# 完成的作业队列
self.finished_job_queue = Queue()
def start(self,execute= True,serverip='127.0.0.1',port=8888):
# 把派发作业队列和完成作业队列注册到网络上
BaseManager.register('get_dispatched_job_queue')
BaseManager.register('get_finished_job_queue')
server = serverip;
print('Connect to server %s...' % server)
manager = BaseManager(address=(server, port), authkey=authkey)
manager.connect()
# 使用上面注册的方法获取队列
dispatched_jobs = manager.get_dispatched_job_queue()
finished_jobs = manager.get_finished_job_queue()
# 运行作业并返回结果,这里只是模拟作业运行,所以返回的是接收到的作业
while True:
if dispatched_jobs.empty():
time.sleep(1)
print('queue is empty,wait 1 sec...')
continue;
job = dispatched_jobs.get(timeout=timeout)
print('Run job: %s ' % job.id)
project=job.project;
project= etl.LoadProject_dict(project);
module= project.modules[job.jobname];
count=0
try:
generator= etl.parallel_reduce(module,[ job.config],execute)
for r in generator:
count+=1;
except Exception as e:
print(e)
print('finish job,id %s, count %s'%(job.id,count))
resultjob= JobResult(job.jobname,count,job.id)
finished_jobs.put(resultjob)
if __name__ == '__main__':
ip='127.0.0.1'
port=8888;
argv=sys.argv;
if len(argv)>1:
ip=argv[1];
if len(argv)>2:
port=int(argv[2]);
slave= Slave();
slave.start(True,ip,port);
| 29.035714 | 116 | 0.597786 | 3,914 | 0.900806 | 0 | 0 | 0 | 0 | 0 | 0 | 798 | 0.183659 |
457c9e761df0197d87df1e4c38c0d31c4acad3b3 | 46 | py | Python | je_auto_control/utils/exception/__init__.py | JE-Chen/Python_JEAutoControl | 477bf9612e28e9ab6d0a8e269db2f699e50a3744 | [
"MIT"
] | 9 | 2020-10-12T06:33:36.000Z | 2021-09-13T07:07:36.000Z | je_auto_control/utils/exception/__init__.py | JE-Chen/Python_JEAutoControl | 477bf9612e28e9ab6d0a8e269db2f699e50a3744 | [
"MIT"
] | null | null | null | je_auto_control/utils/exception/__init__.py | JE-Chen/Python_JEAutoControl | 477bf9612e28e9ab6d0a8e269db2f699e50a3744 | [
"MIT"
] | null | null | null | from je_auto_control.utils.exception import *
| 23 | 45 | 0.847826 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
457d8ed76184da607ae120220f6d3c830a2b03ff | 399 | py | Python | kafka_streamer/topic/datatype/base.py | sam-mosleh/kafka-streamer | be977af192b33a335b42af0a42ad05d9f3f9fef4 | [
"BSD-3-Clause"
] | 1 | 2020-06-26T00:10:05.000Z | 2020-06-26T00:10:05.000Z | kafka_streamer/topic/datatype/base.py | sam-mosleh/kafka-streamer | be977af192b33a335b42af0a42ad05d9f3f9fef4 | [
"BSD-3-Clause"
] | null | null | null | kafka_streamer/topic/datatype/base.py | sam-mosleh/kafka-streamer | be977af192b33a335b42af0a42ad05d9f3f9fef4 | [
"BSD-3-Clause"
] | null | null | null | from abc import ABC, abstractmethod
from typing import Type, Union
from kafka_streamer.models import SchematicRecord, Serializable
class KafkaDataType(ABC):
_MAGIC_BYTE = 0
@abstractmethod
def deserialize(self, data: bytes):
pass
@abstractmethod
def serialize(
self, data: Union[SchematicRecord, Type[Serializable], bytes, None]
) -> bytes:
pass
| 21 | 75 | 0.696742 | 264 | 0.661654 | 0 | 0 | 207 | 0.518797 | 0 | 0 | 0 | 0 |
457fe543702bb54ebf9797e9d79e185259d5ef9b | 479 | py | Python | Searching and Sorting/playlist.py | mishrakeshav/CSES-Problem-Set | 7f7169b20af44430e9208ba22c122054cea23ca1 | [
"MIT"
] | null | null | null | Searching and Sorting/playlist.py | mishrakeshav/CSES-Problem-Set | 7f7169b20af44430e9208ba22c122054cea23ca1 | [
"MIT"
] | null | null | null | Searching and Sorting/playlist.py | mishrakeshav/CSES-Problem-Set | 7f7169b20af44430e9208ba22c122054cea23ca1 | [
"MIT"
] | null | null | null |
def solve():
n = int(input())
k = list(map(int,input().split()))
hashmap = dict()
j = 0
ans = 0
c = 0
for i in range(n):
if k[i] in hashmap and hashmap[k[i]] > 0:
while i > j and k[i] in hashmap and hashmap[k[i]] > 0:
hashmap[k[j]] -= 1
j += 1
c -= 1
hashmap[k[i]] = 1
c += 1
ans = max(ans,c)
print(ans)
if __name__ == '__main__':
solve() | 19.958333 | 66 | 0.409186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.020877 |
458102bbeb0db4cf5f121ed094ae66fd065c288b | 998 | py | Python | tools/extract-qa-from-squad.py | xuqingyang/qa-robot | 19315ec4b603d45e9ea6d2edd57a3148168fb51a | [
"MIT"
] | null | null | null | tools/extract-qa-from-squad.py | xuqingyang/qa-robot | 19315ec4b603d45e9ea6d2edd57a3148168fb51a | [
"MIT"
] | null | null | null | tools/extract-qa-from-squad.py | xuqingyang/qa-robot | 19315ec4b603d45e9ea6d2edd57a3148168fb51a | [
"MIT"
] | null | null | null | import json
import argparse
import pprint
import csv
parser = argparse.ArgumentParser(description="parse squad qa into scv")
parser.add_argument("--input", type=str)
parser.add_argument("--output", type=str)
args = parser.parse_args()
input_file = args.input
output_file = args.output
with open(input_file, 'r') as f:
data = json.load(f)
pprint.pprint(data['data'][0]['paragraphs'][0]['qas'])
print(len(data['data']))
i = 0
output_data = []
for d in data['data']:
for paragraph in d['paragraphs']:
for qa in paragraph['qas']:
if qa['answers']:
print(qa['question'])
print(qa['answers'][0]['text'])
output_data.append([str(i), qa['question'], qa['answers'][0]['text']])
i += 1
with open(output_file, 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
# write the header
writer.writerow(['id', 'question', 'answer'])
# write multiple rows
writer.writerows(output_data)
| 26.263158 | 86 | 0.619238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.230461 |
458163ee3d8d9bc98cadc044e2e7a1c59f34e1ba | 9,448 | py | Python | boardinghouse/middleware.py | luzfcb/django-boardinghouse | 4b9fc7b0e51b6f72abcd904152b92243cf6f0eb0 | [
"BSD-3-Clause"
] | null | null | null | boardinghouse/middleware.py | luzfcb/django-boardinghouse | 4b9fc7b0e51b6f72abcd904152b92243cf6f0eb0 | [
"BSD-3-Clause"
] | null | null | null | boardinghouse/middleware.py | luzfcb/django-boardinghouse | 4b9fc7b0e51b6f72abcd904152b92243cf6f0eb0 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import logging
import re
from django.contrib import messages
from django.db import ProgrammingError
from django.http import HttpResponse, HttpResponseForbidden, HttpResponseRedirect
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.utils import six
from .schema import (
TemplateSchemaActivation, Forbidden,
get_schema_model,
activate_schema, deactivate_schema,
)
from .signals import session_requesting_schema_change, session_schema_changed
logger = logging.getLogger('boardinghouse.middleware')
def change_schema(request, schema):
"""
Change the schema for the current request's session.
Note this does not actually _activate_ the schema, it only stores
the schema name in the current request's session.
"""
session = request.session
user = request.user
# Allow clearing out the current schema.
if not schema:
session.pop('schema', None)
return
# Anonymous users may not select a schema.
# Should this be selectable?
if user.is_anonymous():
session.pop('schema', None)
raise Forbidden()
# We actually want the schema name, so we can see if we
# don't actually need to change the schema at all (if the
# session is already set, then we assume that it's all good)
if isinstance(schema, six.string_types):
schema_name = schema
else:
schema_name = schema.schema
# Don't allow anyone, even superusers, to select the template schema.
if schema_name == '__template__':
raise TemplateSchemaActivation()
# If the schema is already set to this name for this session, then
# we can just exit early, saving some db access.
if schema_name == session.get('schema', None):
return
Schema = get_schema_model()
if user.is_superuser or user.is_staff:
# Just a sanity check: that the schema actually
# exists at all, when the superuser attempts to set
# the schema.
if schema_name == schema:
try:
schema = Schema.objects.get(schema=schema_name)
except Schema.DoesNotExist:
raise Forbidden()
else:
# If we were passed in a schema object, rather than a string,
# then we can check to see if that schema is active before
# having to hit the database.
if isinstance(schema, Schema):
# I'm not sure that it's logically possible to get this
# line to return True - we only pass in data from user.visible_schemata,
# which excludes inactives.
if not schema.is_active:
raise Forbidden()
# Ensure that this user has access to this schema,
# and that this schema is active. We can do this using the
# cache, which prevents hitting the database.
visible_schemata = [schema.schema for schema in user.visible_schemata]
if schema_name not in visible_schemata:
raise Forbidden()
# Allow 3rd-party applications to listen for an attempt to change
# the schema for a user/session, and prevent it from occurring by
# raising an exception. We will just pass that exception up the
# call stack.
session_requesting_schema_change.send(
sender=request,
schema=schema_name,
user=request.user,
session=request.session,
)
# Actually set the schema on the session.
session['schema'] = schema_name
# Allow 3rd-party applications to listen for a change, and act upon
# it accordingly.
session_schema_changed.send(
sender=request,
schema=schema_name,
user=request.user,
session=request.session,
)
class SchemaMiddleware:
"""
Middleware to set the postgres schema for the current request's session.
The schema that will be used is stored in the session. A lookup will
occur (but this could easily be cached) on each request.
There are three ways to change the schema as part of a request.
1. Request a page with a querystring containg a ``__schema`` value::
https://example.com/page/?__schema=<schema-name>
The schema will be changed (or cleared, if this user cannot view
that schema), and the page will be re-loaded (if it was a GET). This
method of changing schema allows you to have a link that changes the
current schema and then loads the data with the new schema active.
It is used within the admin for having a link to data from an
arbitrary schema in the ``LogEntry`` history.
This type of schema change request should not be done with a POST
request.
2. Add a request header::
X-Change-Schema: <schema-name>
This will not cause a redirect to the same page without query string. It
is the only way to do a schema change within a POST request, but could
be used for any request type.
3. Use a specific request::
https://example.com/__change_schema__/<schema-name>/
This is designed to be used from AJAX requests, or as part of
an API call, as it returns a status code (and a short message)
about the schema change request. If you were storing local data,
and did one of these, you are probably going to have to invalidate
much of that.
You could also come up with other methods.
"""
def process_request(self, request):
FORBIDDEN = HttpResponseForbidden(_('You may not select that schema'))
# Ways of changing the schema.
# 1. URL /__change_schema__/<name>/
# This will return a whole page.
# We don't need to activate, that happens on the next request.
if request.path.startswith('/__change_schema__/'):
schema = request.path.split('/')[2]
try:
change_schema(request, schema)
except Forbidden:
return FORBIDDEN
if 'schema' in request.session:
response = _('Schema changed to %s') % request.session['schema']
else:
response = _('Schema deselected')
return HttpResponse(response)
# 2. GET querystring ...?__schema=<name>
# This will change the query, and then redirect to the page
# without the schema name included.
elif request.GET.get('__schema', None) is not None:
schema = request.GET['__schema']
try:
change_schema(request, schema)
except Forbidden:
return FORBIDDEN
data = request.GET.copy()
data.pop('__schema')
if request.method == "GET":
# redirect so we strip the schema out of the querystring.
if data:
return redirect(request.path + '?' + data.urlencode())
return redirect(request.path)
# method == 'POST' or other
request.GET = data
# 3. Header "X-Change-Schema: <name>"
elif 'HTTP_X_CHANGE_SCHEMA' in request.META:
schema = request.META['HTTP_X_CHANGE_SCHEMA']
try:
change_schema(request, schema)
except Forbidden:
return FORBIDDEN
elif 'schema' not in request.session and len(request.user.visible_schemata) == 1:
# Can we not require a db hit each request here?
change_schema(request, request.user.visible_schemata[0])
if 'schema' in request.session:
activate_schema(request.session['schema'])
else:
deactivate_schema()
def process_exception(self, request, exception):
"""
In the case a request returned a DatabaseError, and there was no
schema set on ``request.session``, then look and see if the error
that was provided by the database may indicate that we should have
been looking inside a schema.
In the case we had a :class:`TemplateSchemaActivation` exception,
then we want to remove that key from the session.
"""
if isinstance(exception, ProgrammingError) and not request.session.get('schema'):
if re.search('relation ".*" does not exist', exception.args[0]):
# I'm not sure if this should be done or not, but it does
# fail without the if statement from django 1.8+
# if not transaction.get_autocommit():
# transaction.rollback()
# Should we return an error, or redirect? When should we
# do one or the other? For an API, we would want an error
# but for a regular user, a redirect may be better.
# Can we see if there is already a pending message for this
# request that has the same content as us?
messages.error(request,
_("You must select a schema to access that resource"),
fail_silently=True
)
return HttpResponseRedirect('..')
# I'm not sure we ever really hit this one, but it's worth keeping
# here just in case we've missed something.
if isinstance(exception, TemplateSchemaActivation):
request.session.pop('schema', None)
return HttpResponseForbidden(_('You may not select that schema'))
| 38.096774 | 89 | 0.63876 | 5,662 | 0.59928 | 0 | 0 | 0 | 0 | 0 | 0 | 5,031 | 0.532494 |
458184e9428ce6081bebdb6fd9e6c94b6f1c189a | 13,141 | py | Python | meta-nml/model.py | kevintli/mural | e18e7d1a72b561fab1b5da026806e3417a9c63db | [
"MIT"
] | 5 | 2021-09-23T07:35:58.000Z | 2022-01-07T21:23:06.000Z | meta-nml/model.py | kevintli/mural | e18e7d1a72b561fab1b5da026806e3417a9c63db | [
"MIT"
] | null | null | null | meta-nml/model.py | kevintli/mural | e18e7d1a72b561fab1b5da026806e3417a9c63db | [
"MIT"
] | 1 | 2021-12-10T20:01:16.000Z | 2021-12-10T20:01:16.000Z | import torch.nn as nn
import numpy as np
from collections import OrderedDict
from torchmeta.modules import (MetaModule, MetaConv2d, MetaBatchNorm2d,
MetaSequential, MetaLinear)
import torch
def conv_block(in_channels, out_channels, **kwargs):
return MetaSequential(OrderedDict([
('conv', MetaConv2d(in_channels, out_channels, **kwargs)),
# ('norm', nn.BatchNorm2d(out_channels, momentum=1.,
# track_running_stats=False)),
('relu', nn.ReLU()),
# ('pool', nn.MaxPool2d(2))
]))
class MetaConvModel(MetaModule):
"""4-layer Convolutional Neural Network architecture from [1].
Parameters
----------
in_channels : int
Number of channels for the input images.
out_features : int
Number of classes (output of the model).
hidden_size : int (default: 64)
Number of channels in the intermediate representations.
feature_size : int (default: 64)
Number of features returned by the convolutional head.
References
----------
.. [1] Finn C., Abbeel P., and Levine, S. (2017). Model-Agnostic Meta-Learning
for Fast Adaptation of Deep Networks. International Conference on
Machine Learning (ICML) (https://arxiv.org/abs/1703.03400)
"""
def __init__(self, in_channels, out_features, hidden_size=64, feature_size=64):
super(MetaConvModel, self).__init__()
self.in_channels = in_channels
self.out_features = out_features
self.hidden_size = hidden_size
self.feature_size = feature_size
self.features = MetaSequential(OrderedDict([
('layer1', conv_block(in_channels, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True)),
('layer2', conv_block(hidden_size, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True)),
('layer3', conv_block(hidden_size, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True)),
('layer4', conv_block(hidden_size, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True))
]))
self.classifier = MetaLinear(feature_size, out_features, bias=True)
def forward(self, inputs, params=None):
features = self.features(inputs, params=self.get_subdict(params, 'features'))
features = features.view((features.size(0), -1))
logits = self.classifier(features, params=self.get_subdict(params, 'classifier'))
return logits
class MetaToyConvModel(MetaModule):
def __init__(self, out_features, in_channels=1, hidden_size=64, feature_size=64):
super(MetaToyConvModel, self).__init__()
self.in_channels = in_channels
self.out_features = out_features
self.hidden_size = hidden_size
self.feature_size = feature_size
self.features = MetaSequential(OrderedDict([
('layer1', conv_block(in_channels, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True)),
('layer2', conv_block(hidden_size, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True)),
# ('layer3', conv_block(hidden_size, hidden_size, kernel_size=3,
# stride=1, padding=1, bias=True)),
# ('layer4', conv_block(hidden_size, hidden_size, kernel_size=3,
# stride=1, padding=1, bias=True))
]))
self.classifier = MetaLinear(feature_size, out_features, bias=True)
def forward(self, inputs, params=None):
inputs = torch.reshape(inputs, (-1, 1, 84, 84))
features = self.features(inputs, params=self.get_subdict(params, 'features'))
features = features.view((features.size(0), -1))
logits = self.classifier(features, params=self.get_subdict(params, 'classifier'))
return logits
def embedding(self, inputs, params=None):
if type(inputs) == np.ndarray:
inputs = torch.from_numpy(inputs)
inputs = torch.reshape(inputs, (-1, 1, 100, 100))
features = self.features(inputs, params=self.get_subdict(params, 'features'))
features = features.view((features.size(0), -1))
return features
class MetaMNISTConvModel(MetaModule):
def __init__(self, out_features, in_width=28, in_channels=1, hidden_size=32, mid_feats=512, feature_size=25088):
super(MetaMNISTConvModel, self).__init__()
self.in_width = in_width
self.in_channels = in_channels
self.out_features = out_features
self.hidden_size = hidden_size
self.feature_size = in_width * in_width * hidden_size
self.mid_feats = mid_feats
self.features = MetaSequential(OrderedDict([
('layer1', conv_block(in_channels, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True)),
('layer2', conv_block(hidden_size, hidden_size, kernel_size=3,
stride=1, padding=1, bias=True)),
]))
self.classifier_first = MetaLinear(self.feature_size, mid_feats, bias=True)
self.classifier = MetaLinear(mid_feats, out_features, bias=True)
def forward(self, inputs, params=None):
inputs = torch.reshape(inputs, (-1, self.in_channels, self.in_width, self.in_width) )
features = self.features(inputs, params=self.get_subdict(params, 'features'))
features = features.reshape((features.size(0), -1))
mid_logits = self.classifier_first(features, params=self.get_subdict(params, 'classifier_first'))
logits = self.classifier(mid_logits, params=self.get_subdict(params, 'classifier'))
return logits
def embedding(self, inputs, params=None):
inputs = torch.reshape(inputs, (-1, self.in_channels, self.in_width, self.in_width) )
features = self.features(inputs, params=self.get_subdict(params, 'features'))
features = features.view((features.size(0), -1))
mid_logits = self.classifier_first(features, params=self.get_subdict(params, 'classifier_first'))
return mid_logits
class MNISTConvModel(nn.Module):
def __init__(self, num_classes):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, num_classes)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
class Flatten(nn.Module):
def forward(self, input):
# print(input.shape)
return input.reshape(input.size(0), -1)
class UnFlatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), 20, 7, 7)
class VAE(nn.Module):
def __init__(self, in_width=28, z_dim=20, img_channels=1, h_dim=980):
super(VAE, self).__init__()
# in = img_channels x in_width x in_width
## encoder
self.in_width = in_width
self.img_channels = img_channels
def conv_output_dim(input_size, kernel_size, stride=1, padding=0, **kwargs):
from math import floor
return floor((input_size + 2 * padding - (kernel_size - 1) - 1) / stride + 1)
def conv_transpose_output_dim(input_size, kernel_size, stride=1, padding=0, dilation=1, **kwargs):
return (input_size - 1) * stride - 2 * padding + dilation * (kernel_size - 1) + 1
# (H −1)×stride[0]−2×padding[0]+dilation[0]×(kernel_size[0]−1)+output_padding[0]+1
## encoder
input_size = in_width
conv1_filters = 10
conv1_kwargs = dict(out_channels=10, kernel_size=3, stride=1, padding=1)
a1_size = conv_output_dim(input_size, **conv1_kwargs)
conv2_filters = 10
conv2_kwargs = dict(out_channels=10, kernel_size=4, stride=2, padding=1)
a2_size = conv_output_dim(a1_size, **conv2_kwargs)
conv3_filters = 20
conv3_kwargs = dict(out_channels=20, kernel_size=5, stride=2, padding=2)
a3_size = conv_output_dim(a2_size, **conv2_kwargs)
h_dim = a3_size ** 2 * conv3_filters
print(a3_size)
print(h_dim)
## decoder
deconv1_filters = 10
deconv1_kwargs = dict(kernel_size=5, stride=2, padding=2)
d1_size = conv_transpose_output_dim(a3_size, **deconv1_kwargs)
deconv2_filters = 10
deconv2_kwargs = dict(kernel_size=5, stride=2, padding=1)
d2_size = conv_transpose_output_dim(d1_size, **deconv2_kwargs)
deconv3_filters = 20
deconv3_kwargs = dict(kernel_size=6, stride=1, padding=2)
d3_size = conv_transpose_output_dim(d2_size, **deconv3_kwargs)
print(d1_size, d2_size, d3_size)
self.conv1 = nn.Sequential(
nn.Conv2d(img_channels, **conv1_kwargs),
nn.ReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(conv1_filters, **conv2_kwargs),
nn.ReLU()
)
self.conv3 = nn.Sequential(
nn.Conv2d(conv2_filters, **conv3_kwargs),
nn.ReLU()
)
self.to_dense = Flatten()
## map to latent z
self.fc11 = nn.Linear(h_dim, z_dim)
self.fc12 = nn.Linear(h_dim, z_dim)
## decoder
self.fc2 = nn.Linear(z_dim, h_dim)
self.reshape = UnFlatten()
self.deconv1 = nn.Sequential(
nn.ConvTranspose2d(20, 10, kernel_size=5, stride=2, padding=2),
nn.ReLU(),
)
self.deconv2 = nn.Sequential(
nn.ConvTranspose2d(10, 10, kernel_size=5, stride=2, padding=1),
nn.ReLU(),
)
self.deconv3 = nn.Sequential(
nn.ConvTranspose2d(10, img_channels, kernel_size=6, stride=1, padding=2),
nn.Sigmoid(),
)
def encode(self, x):
a1 = self.conv1(x)
a2 = self.conv2(a1)
a3 = self.conv3(a2)
h = self.to_dense(a3)
return self.fc11(h), self.fc12(h)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def decode(self, z):
h = self.reshape(self.fc2(z))
a1 = self.deconv1(h)
a2 = self.deconv2(a1)
a3 = self.deconv3(a2)
return a3
def forward(self, x):
x = torch.reshape(x, (-1, self.img_channels, self.in_width, self.in_width))
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
class MetaMLPModel(MetaModule):
"""Multi-layer Perceptron architecture from [1].
Parameters
----------
in_features : int
Number of input features.
out_features : int
Number of classes (output of the model).
hidden_sizes : list of int
Size of the intermediate representations. The length of this list
corresponds to the number of hidden layers.
References
----------
.. [1] Finn C., Abbeel P., and Levine, S. (2017). Model-Agnostic Meta-Learning
for Fast Adaptation of Deep Networks. International Conference on
Machine Learning (ICML) (https://arxiv.org/abs/1703.03400)
"""
def __init__(self, in_features, out_features, hidden_sizes):
super(MetaMLPModel, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.hidden_sizes = hidden_sizes
layer_sizes = [in_features] + hidden_sizes
self.features = MetaSequential(OrderedDict([('layer{0}'.format(i + 1),
MetaSequential(OrderedDict([
('linear', MetaLinear(hidden_size, layer_sizes[i + 1], bias=True)),
('relu', nn.ReLU())
]))) for (i, hidden_size) in enumerate(layer_sizes[:-1])]))
self.classifier = MetaLinear(hidden_sizes[-1], out_features, bias=True)
def forward(self, inputs, params=None):
features = self.features(inputs, params=self.get_subdict(params, 'features'))
logits = self.classifier(features, params=self.get_subdict(params, 'classifier'))
return logits
def ModelConvOmniglot(out_features, hidden_size=64):
return MetaConvModel(1, out_features, hidden_size=hidden_size,
feature_size=hidden_size)
def ModelConvMiniImagenet(out_features, hidden_size=64):
return MetaConvModel(3, out_features, hidden_size=hidden_size,
feature_size=5 * 5 * hidden_size)
def ModelMLPSinusoid(hidden_sizes=[40, 40]):
return MetaMLPModel(1, 1, hidden_sizes)
def ModelMLPToy2D(hidden_sizes=[1024, 1024]):
return MetaMLPModel(2, 2, hidden_sizes)
if __name__ == '__main__':
model = ModelMLPToy2D()
| 39.226866 | 116 | 0.623392 | 11,975 | 0.910646 | 0 | 0 | 0 | 0 | 0 | 0 | 2,161 | 0.164335 |
4581f801ff1eb9705ad068ec4480ee6ead8ec4c9 | 3,417 | py | Python | debug.py | davenewham/BlackBoard-Course-Downloader | 1f0a2fe11067488d8e2a46818857623c4c6849bd | [
"MIT"
] | 57 | 2019-06-11T16:19:39.000Z | 2022-03-29T01:38:47.000Z | debug.py | davenewham/BlackBoard-Course-Downloader | 1f0a2fe11067488d8e2a46818857623c4c6849bd | [
"MIT"
] | 14 | 2019-06-13T17:30:44.000Z | 2022-01-16T14:21:15.000Z | debug.py | davenewham/BlackBoard-Course-Downloader | 1f0a2fe11067488d8e2a46818857623c4c6849bd | [
"MIT"
] | 20 | 2019-11-24T11:28:12.000Z | 2022-02-16T07:29:56.000Z | from blackboard import BlackBoardContent, BlackBoardClient, BlackBoardAttachment, BlackBoardEndPoints, \
BlackBoardCourse, BlackBoardInstitute
import os
import re
import requests
import datetime
import xmltodict
import argparse
import sys
import json
import getpass
import main
def test():
args = main.handle_arguments(True)
# Institute Data
print("Dumping Institute Properties...")
institute_data = dict()
institute_vars = vars(args.institute)
for item in institute_vars:
institute_data[item] = institute_vars[item]
print("Dumped Institute Properties...")
# Client Data
client_data = dict()
client = BlackBoardClient(username=args.username, password=args.password, site=args.site, save_location=args.location, institute=args.institute)
attempt = client.login()
print(f"Client Login {'Successful' if attempt[0] else 'Failure'}...\nDumping Client Properties...")
client_data["public_api_available"] = client.public_endpoint_available()
client_data["login_endpoint"] = attempt[1].url
client_data["login_status_code"] = attempt[1].status_code
client_data["login_response"] = attempt[1].text
client_data["successful_login"] = attempt[0]
client_vars = vars(client)
for item in client_vars:
if item not in ('_BlackBoardClient__password', 'session', 'institute', 'api_version', 'thread_pool'):
client_data[item] = client_vars[item]
print("Dumped Client Properties...")
# Get Parent Course Data
course_data = {
'endpoint': '',
'status_code': '',
'response': '',
'courses': []
}
def get_courses():
"""
Get all Available Course Information for the Client and Record Details
"""
courses_request = client.send_get_request(BlackBoardEndPoints.get_user_courses(client.user_id))
courses = courses_request.json()
course_data['endpoint'] = courses_request.url
course_data['status_code'] = courses_request.status_code
course_data['response'] = courses
if "results" in courses:
for course in courses["results"]:
try:
course_request = client.send_get_request(BlackBoardEndPoints.get_course(course["courseId"]))
course = course_request.json()
bbcourse = BlackBoardCourse(client, course)
course_vars = vars(bbcourse)
course_sub_data = dict()
course_sub_data["course_endpoint"] = course_request.url
course_sub_data['status_code'] = course_request.status_code
for item in course_vars:
course_sub_data[item] = str(course_vars[item])
course_data['courses'].append(course_sub_data)
except Exception as e:
course_data['courses'].append({'error': str(e)})
print("Getting Course Data...")
get_courses()
print("Completed Course Data...")
dumps = {
'institute': institute_data,
'client': client_data,
'courses': course_data,
}
print("Preparing to Dump Debug...")
with open(os.path.abspath(os.path.join(client.base_path, "dump.json")), 'w+') as file:
print(f"Writing File: \"{file.name}\"...")
json.dump(dumps, file)
print("Done...")
if __name__ == "__main__":
test()
| 38.829545 | 148 | 0.642962 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 838 | 0.245244 |
458360f058f7cad1d028a83c610e0b3f7525ff76 | 1,246 | py | Python | easycron/easycron/plist.py | skeptycal/.dotfiles | ef6d4e47f77a12024587ed24a0c3048fe5b60ed1 | [
"MIT"
] | 5 | 2019-10-03T21:25:42.000Z | 2022-03-30T16:14:20.000Z | easycron/easycron/plist.py | skeptycal/.dotfiles | ef6d4e47f77a12024587ed24a0c3048fe5b60ed1 | [
"MIT"
] | 6 | 2019-07-11T00:23:08.000Z | 2020-12-15T06:21:19.000Z | easycron/easycron/plist.py | skeptycal/.dotfiles | ef6d4e47f77a12024587ed24a0c3048fe5b60ed1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import datetime
import plistlib
import tempfile
import time
from os import PathLike
from typing import Dict, Union
pl = dict(
aString="Doodah",
aList=["A", "B", 12, 32.1, [1, 2, 3]],
aFloat=0.1,
anInt=728,
aDict=dict(
anotherString="<hello & hi there!>",
aThirdString="M\xe4ssig, Ma\xdf",
aTrueValue=True,
aFalseValue=False,
),
someData=b"<binary gunk>",
someMoreData=b"<lots of binary gunk>" * 10,
aDate=datetime.datetime.fromtimestamp(time.mktime(time.gmtime())),
)
test_file_name: PathLike = 'some.random.plist'
def write_plist(fileName: PathLike) -> bool:
try:
with open(fileName, 'wb') as fp:
plistlib.dump(pl, fp)
return False
except:
return True
def read_plist(fileName: PathLike) -> Union[Dict, None]:
try:
with open(fileName, 'rb') as fp:
return plistlib.load(fp)
except:
return None
if __name__ == '__main__':
write_plist(fileName=test_file_name)
data = read_plist(fileName=test_file_name)
plistlib.
# with tempfile.NamedTemporaryFile() as output_file:
# plistlib. (d, output_file)
# output_file.seek(0)
# print(output_file.read())
| 23.074074 | 70 | 0.635634 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 293 | 0.235152 |
458445a4da8ce1297713dbab63280e457ebba263 | 414 | py | Python | symposion/schedule/migrations/0003_remove_presentation_additional_speakers.py | pyohio/symposion | f8ec9c7e7daab4658061867d1294c1c126dd2919 | [
"BSD-3-Clause"
] | null | null | null | symposion/schedule/migrations/0003_remove_presentation_additional_speakers.py | pyohio/symposion | f8ec9c7e7daab4658061867d1294c1c126dd2919 | [
"BSD-3-Clause"
] | 5 | 2015-07-16T19:46:00.000Z | 2018-03-11T05:58:48.000Z | symposion/schedule/migrations/0003_remove_presentation_additional_speakers.py | pyohio/symposion | f8ec9c7e7daab4658061867d1294c1c126dd2919 | [
"BSD-3-Clause"
] | 1 | 2017-01-27T21:18:26.000Z | 2017-01-27T21:18:26.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-06-23 06:06
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('symposion_schedule', '0002_slot_name'),
]
operations = [
migrations.RemoveField(
model_name='presentation',
name='additional_speakers',
),
]
| 20.7 | 49 | 0.63285 | 264 | 0.637681 | 0 | 0 | 0 | 0 | 0 | 0 | 142 | 0.342995 |
458576ab2c7f8650e836abcb0b08f9c1ccc55b11 | 425 | py | Python | setup.py | 3kwa/datoms | 108733769a940b799482270198aa500873aee01c | [
"Unlicense"
] | 4 | 2017-10-27T19:11:23.000Z | 2021-02-27T08:18:39.000Z | setup.py | 3kwa/datoms | 108733769a940b799482270198aa500873aee01c | [
"Unlicense"
] | null | null | null | setup.py | 3kwa/datoms | 108733769a940b799482270198aa500873aee01c | [
"Unlicense"
] | 1 | 2016-09-02T12:29:45.000Z | 2016-09-02T12:29:45.000Z | from setuptools import setup
setup(
name = 'datoms',
version = '0.1.0',
description = 'A simplistic, Datomic inspired, SQLite backed, REST influenced, schemaless auditable facts storage.',
py_modules = ['datoms'],
license = 'unlicense',
author = 'Eugene Van den Bulke',
author_email = 'eugene.vandenbulke@gmail.com',
url = 'https://github.com/3kwa/datoms',
install_requires = ['sql'],
)
| 28.333333 | 120 | 0.663529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.527059 |
45857d1509e13e72b89d409aa8c1b7c1595621d3 | 2,728 | py | Python | cocoa_folder/scripts/bot_bot_chat.py | s-akanksha/DialoGraph_ICLR21 | d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc | [
"Apache-2.0"
] | 12 | 2021-03-17T05:15:33.000Z | 2022-01-19T06:09:21.000Z | cocoa_folder/scripts/bot_bot_chat.py | s-akanksha/DialoGraph_ICLR21 | d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc | [
"Apache-2.0"
] | 2 | 2021-05-25T07:28:46.000Z | 2022-02-11T01:54:43.000Z | cocoa_folder/scripts/bot_bot_chat.py | s-akanksha/DialoGraph_ICLR21 | d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc | [
"Apache-2.0"
] | 4 | 2021-10-11T03:39:38.000Z | 2022-02-01T23:58:50.000Z | '''
Takes two agent implementations and generates the dialogues.
'''
import argparse
import random
import json
import numpy as np
from cocoa.core.util import read_json
from cocoa.core.schema import Schema
from cocoa.core.scenario_db import ScenarioDB, add_scenario_arguments
from cocoa.core.dataset import add_dataset_arguments
from core.scenario import Scenario
from core.controller import Controller
from systems import add_system_arguments, get_system
def generate_examples(agents, agent_names, scenarios, num_examples, max_turns):
examples = []
for i in range(num_examples):
scenario = scenarios[i % len(scenarios)]
# Each agent needs to play both buyer and seller
for j in (0, 1):
new_agents = [agents[j], agents[1-j]]
new_agent_names = [agent_names[j], agent_names[1-j]]
sessions = [new_agents[0].new_session(0, scenario.kbs[0]),
new_agents[1].new_session(1, scenario.kbs[1])]
controller = Controller(scenario, sessions, session_names=new_agent_names)
ex = controller.simulate(max_turns, verbose=args.verbose)
examples.append(ex)
return examples
if __name__ == '__main__':
parser = argparse.ArgumentParser(conflict_handler='resolve')
parser.add_argument('--agent', nargs=3, metavar=('type', 'checkpoint', 'name'), action='append', help='Agent parameters')
parser.add_argument('--max-turns', default=20, type=int, help='Maximum number of turns')
parser.add_argument('--num-examples', type=int)
parser.add_argument('--examples-path')
parser.add_argument('-v', '--verbose', default=False, action='store_true', help='whether or not to have verbose prints')
add_scenario_arguments(parser)
add_system_arguments(parser)
args = parser.parse_args()
schema = Schema(args.schema_path)
scenario_db = ScenarioDB.from_dict(schema, read_json(args.scenarios_path), Scenario)
agents = {}
for agent_params in args.agent:
agent_type, model_path, agent_name = agent_params
agents[agent_name] = get_system(agent_type, args, schema, model_path=model_path)
scenarios = scenario_db.scenarios_list
examples = []
for base_agent_name in ('sl-words',):
base_agent = agents[base_agent_name]
for agent_name, agent in agents.iteritems():
if agent_name != base_agent_name:
agents = [base_agent, agent]
agent_names = [base_agent_name, agent_name]
examples.extend(generate_examples(agents, agent_names, scenarios, args.num_examples, args.max_turns))
with open(args.examples_path, 'w') as out:
print >>out, json.dumps([e.to_dict() for e in examples])
| 41.333333 | 125 | 0.69868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 344 | 0.1261 |
4586103dff65390c36a39a858fcae5341e48ccd7 | 2,777 | py | Python | app/handlers/admins/moderation.py | vitaliy-ukiru/math-bot | 72c116b4f5a4aa6a5f8eaae67ecbbf3df821f9e9 | [
"MIT"
] | 1 | 2021-12-11T07:41:38.000Z | 2021-12-11T07:41:38.000Z | app/handlers/admins/moderation.py | vitaliy-ukiru/math-bot | 72c116b4f5a4aa6a5f8eaae67ecbbf3df821f9e9 | [
"MIT"
] | 8 | 2021-05-08T21:48:34.000Z | 2022-01-20T15:42:00.000Z | app/handlers/admins/moderation.py | vitaliy-ukiru/math-bot | 72c116b4f5a4aa6a5f8eaae67ecbbf3df821f9e9 | [
"MIT"
] | null | null | null | # Source: https://github.com/aiogram/bot/blob/master/aiogram_bot/handlers/simple_admin.py
import logging
from aiogram import types
from aiogram.utils import exceptions
from babel.dates import format_timedelta
from app.loader import dp
from app.utils.timedelta import parse_timedelta_from_message
logger = logging.getLogger()
@dp.message_handler(
commands=("ro", "mute"),
commands_prefix="/!",
is_reply=True,
user_can_restrict_members=True,
bot_can_restrict_members=True,
)
async def cmd_ro(message: types.Message, reply: types.Message):
duration = await parse_timedelta_from_message(message)
if not duration:
return
try: # Apply restriction
await message.chat.restrict(
reply.from_user.id, can_send_messages=False, until_date=duration
)
logger.info(
"User {user} restricted by {admin} for {duration}".format(
user=message.reply_to_message.from_user.id,
admin=message.from_user.id,
duration=duration,
)
)
except exceptions.BadRequest as e:
logger.error("Failed to restrict chat member: {error!r}", error=e)
return False
await message.reply_to_message.answer(
"<b>Read-only</b> активирован для пользователя {user}. Длительность: {duration}".format(
user=message.reply_to_message.from_user.get_mention(),
duration=format_timedelta(
duration, locale=message.from_user.locale, granularity="seconds", format="short"
),
)
)
return True
@dp.message_handler(
commands="ban",
commands_prefix="/!",
is_reply=True,
user_can_restrict_members=True,
bot_can_restrict_members=True,
)
async def cmd_ban(message: types.Message, reply: types.Message):
duration = await parse_timedelta_from_message(message)
if not duration:
return
try: # Apply restriction
await message.bot.ban_chat_member(message.chat.id, reply.from_user.id, until_date=duration)
logger.info(
"User {user} baned by {admin} for {duration}".format(
user=message.reply_to_message.from_user.id,
admin=message.from_user.id,
duration=duration,)
)
except exceptions.BadRequest as e:
logger.error("Failed to kick chat member: {error!r}", error=e)
return False
await message.reply_to_message.answer(
"Пользователь {user} <b>забанен</b> на {duration}".format(
user=message.reply_to_message.from_user.get_mention(),
duration=format_timedelta(
duration, locale=message.from_user.locale, granularity="seconds", format="short"
),
)
)
return True
| 31.556818 | 99 | 0.657184 | 0 | 0 | 0 | 0 | 2,500 | 0.881523 | 2,173 | 0.76622 | 548 | 0.19323 |
458860860a8c1e1c28fb5b0848e12367ffab7ad3 | 370 | py | Python | serial_scripts/system_test/flow_tests/ReleaseToFlowSetupRateMapping.py | atsgen/tf-test | 2748fcd81491450c75dadc71849d2a1c11061029 | [
"Apache-2.0"
] | 5 | 2020-09-29T00:36:57.000Z | 2022-02-16T06:51:32.000Z | serial_scripts/system_test/flow_tests/ReleaseToFlowSetupRateMapping.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
] | 27 | 2019-11-02T02:18:34.000Z | 2022-02-24T18:49:08.000Z | serial_scripts/system_test/flow_tests/ReleaseToFlowSetupRateMapping.py | vkolli/contrail-test-perf | db04b8924a2c330baabe3059788b149d957a7d67 | [
"Apache-2.0"
] | 20 | 2019-11-28T16:02:25.000Z | 2022-01-06T05:56:58.000Z | # Here the rate is set for Policy flows, local to a compute, which is
# lesser than policy flows across computes
expected_flow_setup_rate = {}
expected_flow_setup_rate['policy'] = {
'1.04': 6000, '1.05': 9000, '1.06': 10000, '1.10': 10000, '2.10': 13000}
expected_flow_setup_rate['nat'] = {
'1.04': 4200, '1.05': 6300, '1.06': 7500, '1.10': 7500, '2.10': 10000}
| 46.25 | 76 | 0.654054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 184 | 0.497297 |
458960f108b95d2cbc041786562002a68b17daea | 1,016 | py | Python | DP_MCP.py | man-o-war/DS-Algorithms | 12237fb42fb48e8e7a2cbcb8ecb0d86b87bb3fd3 | [
"MIT"
] | null | null | null | DP_MCP.py | man-o-war/DS-Algorithms | 12237fb42fb48e8e7a2cbcb8ecb0d86b87bb3fd3 | [
"MIT"
] | null | null | null | DP_MCP.py | man-o-war/DS-Algorithms | 12237fb42fb48e8e7a2cbcb8ecb0d86b87bb3fd3 | [
"MIT"
] | null | null | null | # Dynamic Programming Python implementation of Min Cost Path
# problem
R = 3
C = 3
def minCost(cost, m, n):
# Instead of following line, we can use int tc[m+1][n+1] or
# dynamically allocate memoery to save space. The following
# line is used to keep te program simple and make it working
# on all compilers.
tc = [[0 for x in range(C)] for x in range(R)]
tc[0][0] = cost[0][0]
# Initialize first column of total cost(tc) array
for i in range(1, m+1):
tc[i][0] = tc[i-1][0] + cost[i][0]
# Initialize first row of tc array
for j in range(1, n+1):
tc[0][j] = tc[0][j-1] + cost[0][j]
# Construct rest of the tc array
for i in range(1, m+1):
for j in range(1, n+1):
tc[i][j] = min(tc[i-1][j-1], tc[i-1][j], tc[i][j-1]) + cost[i][j]
return tc[m][n]
# Driver program to test above functions
cost = [[1, 2, 3],
[4, 8, 2],
[1, 5, 3]]
print(minCost(cost, 2, 2))
#O(mn) | 27.459459 | 78 | 0.540354 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 437 | 0.430118 |
458a562e22dcc2a1d067069337d7d494ddd4941f | 3,277 | py | Python | Views/Affichage/transitionView.py | yvesjordan06/automata-brains | 1c34dd9315fcee7ce1807a2b94a0ec48421d03b1 | [
"MIT"
] | 3 | 2020-01-31T15:54:48.000Z | 2020-02-01T10:01:35.000Z | Views/Affichage/transitionView.py | Tcomputer5/automata-brains | 6c2a7714d1fcb16763084a33a2f0f1364d4f8eb8 | [
"MIT"
] | null | null | null | Views/Affichage/transitionView.py | Tcomputer5/automata-brains | 6c2a7714d1fcb16763084a33a2f0f1364d4f8eb8 | [
"MIT"
] | 2 | 2020-02-01T09:59:51.000Z | 2020-02-01T10:02:12.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UI/transitionView.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QIcon
from Models.Automate import Automate
class Ui_Form(object):
def __init__(self, automate:Automate):
self.automate = automate
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(369, 279)
self.horizontalLayout = QtWidgets.QHBoxLayout(Form)
self.horizontalLayout.setObjectName("horizontalLayout")
self.tableWidget = QtWidgets.QTableWidget()
self.tableWidget.setObjectName("tableWidget")
self.horizontalLayout.addWidget(self.tableWidget)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
# Initial Set State
self.action_set_state()
self.automate.automate_modifier.connect(self.action_set_state)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
#self.groupBox.setTitle(_translate("Form", "Transition"))
__sortingEnabled = self.tableWidget.isSortingEnabled()
self.tableWidget.setSortingEnabled(False)
self.tableWidget.setSortingEnabled(__sortingEnabled)
def action_set_state(self):
self.tableWidget.clear()
etat_list = list(self.automate.etats)
alphabet = [symbole for symbole in self.automate.alphabet.list]
self.tableWidget.setColumnCount(len(self.automate.alphabet.list))
self.tableWidget.setRowCount(len(self.automate.etats))
row = 0
for etat in etat_list :
label = str(etat)
item = QtWidgets.QTableWidgetItem(label)
initial = False
final = False
if etat == self.automate.etat_initial:
initial = True
if etat in self.automate.etats_finaux:
final = True
if initial:
item = QtWidgets.QTableWidgetItem(QIcon("icons/initial.png"), label)
if final:
item = QtWidgets.QTableWidgetItem(QIcon("icons/final.png"), label)
if initial and final:
item = QtWidgets.QTableWidgetItem(QIcon("icons/icon.png"), label)
self.tableWidget.setVerticalHeaderItem(row, item)
row += 1
self.tableWidget.setHorizontalHeaderLabels(alphabet)
for t in self.automate.transitions:
print(t.est_epsilon())
if (t.est_epsilon()):
continue
alphabet_index = alphabet.index(t.symbole)
etat_index = etat_list.index(t.depart)
valeur_actu = self.tableWidget.item(etat_index,alphabet_index)
valeur_text = ' , ' + valeur_actu.text() if valeur_actu else ''
self.tableWidget.setItem(etat_index, alphabet_index, QtWidgets.QTableWidgetItem(f"{t.arrive}{valeur_text}"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| 32.77 | 120 | 0.64968 | 2,757 | 0.841318 | 0 | 0 | 0 | 0 | 0 | 0 | 418 | 0.127556 |
458b63ce9f8114bff2c526efb8cbbe3355958823 | 380 | py | Python | gellifinsta/migrations/0004_rename_local_fname_gellifinsta_file_path.py | vallka/djellifique | fb84fba6be413f9d38276d89ae84aeaff761218f | [
"MIT"
] | null | null | null | gellifinsta/migrations/0004_rename_local_fname_gellifinsta_file_path.py | vallka/djellifique | fb84fba6be413f9d38276d89ae84aeaff761218f | [
"MIT"
] | null | null | null | gellifinsta/migrations/0004_rename_local_fname_gellifinsta_file_path.py | vallka/djellifique | fb84fba6be413f9d38276d89ae84aeaff761218f | [
"MIT"
] | null | null | null | # Generated by Django 3.2.4 on 2021-06-23 14:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gellifinsta', '0003_auto_20210622_1928'),
]
operations = [
migrations.RenameField(
model_name='gellifinsta',
old_name='local_fname',
new_name='file_path',
),
]
| 20 | 51 | 0.602632 | 295 | 0.776316 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.321053 |
458ea89efaf4582b937e60a207ad5ec47d2a4440 | 3,881 | py | Python | python/etc/extract_final_branch_weights.py | AdamByerly/MMLCNNwHFCs | 499c275522656e4537c58c330476683e0a0f4bef | [
"Apache-2.0"
] | 38 | 2020-02-04T20:07:49.000Z | 2022-01-23T22:57:54.000Z | python/etc/extract_final_branch_weights.py | AdamByerly/MMLCNNwHFCs | 499c275522656e4537c58c330476683e0a0f4bef | [
"Apache-2.0"
] | 3 | 2020-04-23T16:51:27.000Z | 2020-07-19T12:26:39.000Z | python/etc/extract_final_branch_weights.py | AdamByerly/MMLCNNwHFCs | 499c275522656e4537c58c330476683e0a0f4bef | [
"Apache-2.0"
] | 9 | 2020-04-22T02:36:59.000Z | 2021-09-16T05:42:54.000Z | # Copyright 2021 Adam Byerly. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1"
import argparse
import numpy as np
from python.input.MNIST_input_pipeline import MNIST
from python.input.cifar10_input_pipeline import Cifar10
from python.input.cifar100_input_pipeline import Cifar100
from python.input.smallNORB_input_pipeline import smallNORB
from python.models.BranchingMerging import SmallImageBranchingMerging
import tensorflow as tf
def go(data_dir, log_dir, output_file, input_pipeline, merge_strategy,
use_hvcs=True, hvc_type=1, hvc_dims=None, total_convolutions=None,
branches_after=None):
files = []
for dirname, _, filenames in os.walk(log_dir):
file = list(set([os.path.join(dirname,
os.path.splitext(fn)[0]) for fn in filenames]))
if len(file) > 0:
files.append(file[0])
if input_pipeline == 3:
in_pipe = Cifar10(data_dir, False, 0)
elif input_pipeline == 4:
in_pipe = Cifar100(data_dir, False, 0)
elif input_pipeline == 5:
in_pipe = smallNORB(data_dir, False, 48, 32)
else:
in_pipe = MNIST(data_dir, False, 1)
branch_weights = []
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
print("Building model...")
model = SmallImageBranchingMerging(in_pipe.get_class_count(),
in_pipe.get_image_size(), in_pipe.get_image_channels(),
merge_strategy, use_hvcs, hvc_type, hvc_dims,
total_convolutions, branches_after, False)
for weights_file in files:
print("Restoring weights file: {}".format(weights_file))
ckpt = tf.train.Checkpoint(
vars=model.get_all_savable_variables())
ckpt.restore(weights_file).expect_partial()
branch_weights.append(model.branch_weights.variable.numpy())
print("Saving final branch weights...")
# (False Positive)
# noinspection PyTypeChecker
np.savetxt(output_file, np.array(branch_weights), delimiter=',', fmt='%0f')
print("Finished.")
################################################################################
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("--data_dir", default=r"../../../../Datasets/smallNORB_data")
p.add_argument("--log_dir", default=r"../../logs/20210609135430")
p.add_argument("--output_file",
default=r"../../logs/20210609135430/final_branch_weights.txt")
p.add_argument("--input_pipeline", default=5, type=int)
p.add_argument("--merge_strategy", default=2, type=float)
p.add_argument("--use_hvcs", default=True, type=bool)
p.add_argument("--hvc_type", default=2, type=int)
p.add_argument("--hvc_dims", default=[96, 144, 192], type=int)
p.add_argument("--total_convolutions", default=11, type=int)
p.add_argument("--branches_after", default=[4, 7, 10])
a = p.parse_args()
go(data_dir=a.data_dir, log_dir=a.log_dir, output_file=a.output_file,
input_pipeline=a.input_pipeline, merge_strategy=a.merge_strategy,
use_hvcs=a.use_hvcs, hvc_type=a.hvc_type, hvc_dims=a.hvc_dims,
total_convolutions=a.total_convolutions, branches_after=a.branches_after)
| 40.852632 | 80 | 0.667611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,188 | 0.306107 |
458ee5a0204805bfb05bd187554d1c93282f72a6 | 7,625 | py | Python | pykSpider/kSpider2/ks_clustering.py | mr-eyes/kSpider2 | 6049c23a5e781534331ba1abcdbf067151e274a2 | [
"MIT"
] | null | null | null | pykSpider/kSpider2/ks_clustering.py | mr-eyes/kSpider2 | 6049c23a5e781534331ba1abcdbf067151e274a2 | [
"MIT"
] | null | null | null | pykSpider/kSpider2/ks_clustering.py | mr-eyes/kSpider2 | 6049c23a5e781534331ba1abcdbf067151e274a2 | [
"MIT"
] | 1 | 2021-04-09T23:59:21.000Z | 2021-04-09T23:59:21.000Z | from __future__ import division
from collections import defaultdict
import itertools
import sys
import os
import sqlite3
import click
from kSpider2.click_context import cli
import glob
class kClusters:
source = []
target = []
source2 = []
target2 = []
seq_to_kmers = dict()
names_map = dict()
components = defaultdict(set)
def __init__(self, logger_obj, index_prefix, cut_off_threshold):
self.Logger = logger_obj
self.names_file = index_prefix + ".namesMap"
self.cut_off_threshold = cut_off_threshold
self.seqToKmers_file = index_prefix + "_kSpider_seqToKmersNo.tsv"
self.pairwise_file = index_prefix + "_kSpider_pairwise.tsv"
self.uncovered_seqs = set()
self.shared_kmers_threshold = 200
self.seq_to_clusterid = dict()
self.max_cluster_id = 0
self.Logger.INFO("Loading TSV pairwise file")
self.load_seq_to_kmers(self.seqToKmers_file)
self.tsv_get_namesmap()
def load_seq_to_kmers(self, tsv):
with open(tsv) as KMER_COUNT:
next(KMER_COUNT)
for line in KMER_COUNT:
seq_ID, no_of_kmers = tuple(line.strip().split('\t')[1:])
self.seq_to_kmers[int(seq_ID)] = int(no_of_kmers)
def ids_to_names(self, cluster):
new_cluster = []
for _id in cluster:
new_cluster.append(self.names_map[int(_id)])
return new_cluster
def tsv_get_namesmap(self):
with open(self.names_file, 'r') as namesMap:
next(namesMap) # skip the header
for row in namesMap:
row = row.strip().split()
self.names_map[int(row[0])] = row[1]
def tsv_build_graph(self):
with open(self.pairwise_file, 'r') as pairwise_tsv:
next(pairwise_tsv) # skip header
for row in pairwise_tsv:
row = row.strip().split()
seq1 = int(row[1])
seq2 = int(row[2])
shared_kmers = int(row[3])
containment = 0.0
min_seq = float(
min(self.seq_to_kmers[seq1], self.seq_to_kmers[seq2]))
containment = shared_kmers / min_seq
if containment < self.cut_off_threshold:
continue
if shared_kmers < self.shared_kmers_threshold:
self.source2.append(seq1)
self.target2.append(seq2)
elif shared_kmers >= self.shared_kmers_threshold:
self.source.append(seq1)
self.target.append(seq2)
# # For covering clusters with single sequence
uncovered_seqs_1 = set(self.names_map.keys()) - \
set(self.source).union(set(self.target))
for seq in uncovered_seqs_1:
self.uncovered_seqs.add(seq)
# OR:
# for i in range(1, len(self.names_map) + 1, 1):
# self.source.append(i)
# self.target.append(i)
def clustering(self):
registers = defaultdict(lambda: None)
def find(x):
l = registers[x]
if l is not None:
l = find(l)
registers[x] = l
return l
return x
def union(x, y):
lx, ly = find(x), find(y)
if lx != ly:
registers[lx] = ly
for i in range(len(self.source)):
union(self.source.pop(), self.target.pop())
for x in registers:
self.components[find(x)].add(x)
temp_components = self.components.copy()
self.components.clear()
for cluster_id, (k, v) in enumerate(temp_components.items(), 1):
self.components[cluster_id] = set(v)
for seq in v:
self.seq_to_clusterid[seq] = cluster_id
temp_components.clear()
self.post_clustering()
def post_clustering(self):
registers2 = defaultdict(lambda: None)
local_components = defaultdict(set)
covered_seqs = set()
def find(x):
l = registers2[x]
if l is not None:
l = find(l)
registers2[x] = l
return l
return x
def union(x, y):
lx, ly = find(x), find(y)
if lx != ly:
registers2[lx] = ly
for i in range(len(self.source2)):
union(self.source2.pop(), self.target2.pop())
for x in registers2:
local_components[find(x)].add(x)
self.components = dict(self.components)
covered_clusters = set()
for cluster2_id, (k, v) in enumerate(local_components.items(), 1):
for seq in v:
covered_seqs.add(seq)
for seq in v:
if seq in self.seq_to_clusterid:
cluster_id = self.seq_to_clusterid[seq]
to_be_added = set()
for i in v:
if i not in self.seq_to_clusterid:
to_be_added.add(i)
self.components[cluster_id] = self.components[cluster_id].union(
to_be_added)
covered_clusters.add(k)
continue
self.uncovered_seqs = self.uncovered_seqs - covered_seqs
uncovered_clusters = set(local_components.keys()) - covered_clusters
max_id = len(self.components)
for i, unc in enumerate(uncovered_clusters, 1):
max_id += 1
self.components[max_id] = local_components[unc]
for seq in self.uncovered_seqs:
max_id += 1
self.components[max_id] = {seq}
def export_kCluster(self):
kCluster_file_name = f"kSpider_{self.cut_off_threshold:.2f}%_"
kCluster_file_name += os.path.basename(
self.pairwise_file).split(".")[0]
kCluster_file_name += ".clusters.tsv"
with open(kCluster_file_name, 'w') as kClusters:
kClusters.write("kClust_id\tseqs_ids\n")
for cluster_id, (k, v) in enumerate(self.components.items(), 1):
kClusters.write(
f"{cluster_id}\t{'|'.join(self.ids_to_names(v))}\n")
self.Logger.INFO(f"Total Number Of Clusters: {cluster_id}")
"""
TODO:
New help messages
1. containment cutoff (sim_cutoff): cluster sequences with (containment > cutoff) where containment = shared kmers % to the total kmers in the smallest node.
2. connectivity cutoff (con_cutoff): cluster sequences with (connectivity > cutoff) where connectivity = shared kmers % to the total kmers in the largest node.
3. min count cutoff (min_count): the min kmers count of a node to connect two clusters, otherwise the node will be reported twice in both clusters.
"""
@cli.command(name="cluster", help_priority=5)
@click.option('-c', '--cutoff', required=False, type=click.FloatRange(0, 1, clamp=False), default=0.0, show_default=True, help="cluster sequences with (containment > cutoff)")
@click.option('-i', '--index-prefix', "index_prefix", required=True, type=click.STRING, help="kProcessor index file prefix")
@click.pass_context
def main(ctx, index_prefix, cutoff):
"""Sequence clustering."""
kCl = kClusters(logger_obj=ctx.obj,
index_prefix=index_prefix, cut_off_threshold=cutoff)
ctx.obj.INFO("Building the main graph...")
kCl.tsv_build_graph()
ctx.obj.INFO("Clustering...")
kCl.clustering()
ctx.obj.INFO("Exporting ...")
kCl.export_kCluster()
| 33.738938 | 175 | 0.579803 | 6,197 | 0.812721 | 0 | 0 | 736 | 0.096525 | 0 | 0 | 1,174 | 0.153967 |
458f46b3f7bac00aa3202fa59f430204a9e83498 | 2,041 | py | Python | workflow/scripts/build_primer_regions.py | kokyriakidis/dna-seq-varlociraptor | e9331a1bd8bbdf6592383353a0301148eea65e0c | [
"MIT"
] | null | null | null | workflow/scripts/build_primer_regions.py | kokyriakidis/dna-seq-varlociraptor | e9331a1bd8bbdf6592383353a0301148eea65e0c | [
"MIT"
] | null | null | null | workflow/scripts/build_primer_regions.py | kokyriakidis/dna-seq-varlociraptor | e9331a1bd8bbdf6592383353a0301148eea65e0c | [
"MIT"
] | null | null | null | import pandas as pd
def parse_bed(log_file, out):
print("chrom\tleft_start\tleft_end\tright_start\tright_end", file=out)
for data_primers in pd.read_csv(
snakemake.input[0],
sep="\t",
header=None,
chunksize=chunksize,
usecols=[0, 1, 2, 5],
):
for row in data_primers.iterrows():
row_id = row[0]
row = row[1]
if row[5] == "+":
print(
"{chrom}\t{start}\t{end}\t-1\t-1".format(
chrom=row[0], start=row[1]+1, end=row[2]
),
file=out,
)
elif row[5] == "-":
print(
"{chrom}\t-1\t-1\t{start}\t{end}".format(
chrom=row[0], start=row[1]+1, end=row[2]
),
file=out,
)
else:
print("Invalid strand in row {}".format(row_id), file=log_file)
def parse_bedpe(log_file, out):
for data_primers in pd.read_csv(
snakemake.input[0],
sep="\t",
header=None,
chunksize=chunksize,
usecols=[0, 1, 2, 3, 4, 5],
):
valid_primers = data_primers[0] == data_primers[3]
valid_data = data_primers[valid_primers].copy()
valid_data.iloc[:, [1, 4]] += 1
valid_data.drop(columns=[3], inplace=True)
valid_data.dropna(how="all", inplace=True)
valid_data.to_csv(
out,
sep="\t",
index=False,
header=["chrom", "left_start", "left_end", "right_start", "right_end"],
)
print(
data_primers[~valid_primers].to_csv(sep="\t", index=False, header=False),
file=log_file,
)
chunksize = 10 ** 6
with open(snakemake.output[0], "w") as out:
with open(snakemake.log[0], "w") as log_file:
if snakemake.input[0].endswith("bedpe"):
parse_bedpe(log_file, out)
else:
parse_bed(log_file, out)
| 30.924242 | 85 | 0.488486 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.11661 |
458fd8bb6dadc2e31622c1207043cbd32ef38957 | 2,039 | py | Python | utils/visualMap.py | TwelveYC/network-vi | d7a36c21a09eb86276e316193405267e3a9cc78d | [
"MIT"
] | 14 | 2020-05-13T10:04:02.000Z | 2020-12-27T05:42:05.000Z | utils/visualMap.py | TwelveYC/network-vi | d7a36c21a09eb86276e316193405267e3a9cc78d | [
"MIT"
] | null | null | null | utils/visualMap.py | TwelveYC/network-vi | d7a36c21a09eb86276e316193405267e3a9cc78d | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
class MapColorControl():
def __init__(self, colour_scheme, map_normalization,data):
self.colors = plt.get_cmap(colour_scheme)(range(256))[:,:3]
self.data = data
if self.data.min() <= 0:
self.data = self.data + abs(self.data.min()) + 1
if map_normalization == "Linear":
self.normNorm = colors.Normalize(vmin=self.data.min(),vmax=self.data.max())
elif map_normalization == "Logarithmic":
self.normNorm = colors.LogNorm(vmin=self.data.min(),vmax=self.data.max())
elif map_normalization == "Power-law":
self.normNorm = colors.PowerNorm(gamma=2,vmin=self.data.min(),vmax=self.data.max())
def get_map_data(self):
datum = np.round(self.normNorm(self.data) * 255)
return self.map(datum)
def map(self,infos):
datum = []
for index in infos:
datum.append(colors.rgb2hex(self.colors[int(index)]))
return datum
class MapControl():
def __init__(self,max_value,min_value,map_normalization,data):
self.data = data
if self.data.min() <=0:
self.data = self.data + abs(self.data.min()) +1
if map_normalization == "Linear":
self.normNorm = colors.Normalize(vmin=self.data.min(),vmax=self.data.max())
elif map_normalization == "Logarithmic":
self.normNorm = colors.LogNorm(vmin=self.data.min(),vmax=self.data.max())
elif map_normalization == "Power-law":
self.normNorm = colors.PowerNorm(gamma=2,vmin=self.data.min(),vmax=self.data.max())
self.maxValue = max_value
self.minValue = min_value
def get_map_data(self,is_round):
if is_round:
datum = np.round(self.normNorm(self.data) * (self.maxValue-self.minValue) + self.minValue,5)
else:
datum = np.round(self.normNorm(self.data) * (self.maxValue - self.minValue) + self.minValue)
return list(datum) | 41.612245 | 104 | 0.62972 | 1,948 | 0.95537 | 0 | 0 | 0 | 0 | 0 | 0 | 64 | 0.031388 |
459249d4734814fb5305d256f82cc0dc0641dd16 | 2,361 | py | Python | data/train/python/459249d4734814fb5305d256f82cc0dc0641dd16urls.py | harshp8l/deep-learning-lang-detection | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | [
"MIT"
] | 84 | 2017-10-25T15:49:21.000Z | 2021-11-28T21:25:54.000Z | data/train/python/459249d4734814fb5305d256f82cc0dc0641dd16urls.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 5 | 2018-03-29T11:50:46.000Z | 2021-04-26T13:33:18.000Z | data/train/python/459249d4734814fb5305d256f82cc0dc0641dd16urls.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 24 | 2017-11-22T08:31:00.000Z | 2022-03-27T01:22:31.000Z | from django.conf.urls import patterns, url
from lattice.views import (lattices)
from lattice.views import (saveLatticeInfo, saveLattice)
from lattice.views import (saveModel)
from lattice.views import (lattice_home, lattice_content_home, lattice_content_search, lattice_content_list, lattice_content_model_list, lattice_content_details, lattice_content_model_details)
from lattice.views import (lattice_modal, saveLatticeHelper, saveLatticeTypeHelper, saveLatticeStatusHelper, saveModelHelper, saveModelStatusHelper)
urlpatterns = patterns(
'',
# return raw data not thru html ui
url(r'^lattice/$',
lattices,
name='lattices'),
url(r'^lattice/savelatticeinfo/$',
saveLatticeInfo,
name='saveLatticeInfo'),
url(r'^lattice/savelattice$',
saveLattice,
name='saveLattice'),
url(r'^lattice/savemodel$',
saveModel,
name='saveModel'),
url(r'^lattice/web/$',
lattice_home,
name='lattice_home'),
url(r'^lattice/web/index.html$',
lattice_home,
name='lattice_home'),
url(r'^lattice/web/content.html$',
lattice_content_home,
name='lattice_content_home'),
url(r'^lattice/web/search.html$',
lattice_content_search,
name='lattice_content_search'),
url(r'^lattice/web/list.html$',
lattice_content_list,
name='lattice_content_list'),
url(r'^lattice/web/model_list.html$',
lattice_content_model_list,
name='lattice_content_model_list'),
url(r'^lattice/web/details.html$',
lattice_content_details,
name='lattice_content_details'),
url(r'^lattice/web/model_details.html$',
lattice_content_model_details,
name='lattice_content_model_details'),
url(r'^lattice/web/modal/',
lattice_modal,
name='lattice_modal'),
url(r'^lattice/savelatticetype$',
saveLatticeTypeHelper,
name='saveLatticTypeeHelper'),
url(r'^lattice/upload$',
saveLatticeHelper,
name='saveLatticeHelper'),
url(r'^lattice/savestatus$',
saveLatticeStatusHelper,
name='saveLatticeStatusHelper'),
url(r'^model/upload$',
saveModelHelper,
name='saveModelHelper'),
url(r'^model/savestatus$',
saveModelStatusHelper,
name='saveModelStatusHelper'),
)
| 30.662338 | 192 | 0.67302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 830 | 0.351546 |
4592ee2a9205ee3dd55eed2050ff0223402372a9 | 52 | py | Python | src/core/sessions/buffers/gui/configuration/twitter/__init__.py | Oire/TheQube | fcfd8a68b15948e0740642d635db24adef8cc314 | [
"MIT"
] | 21 | 2015-08-02T21:26:14.000Z | 2019-12-27T09:57:44.000Z | src/core/sessions/buffers/gui/configuration/twitter/__init__.py | Oire/TheQube | fcfd8a68b15948e0740642d635db24adef8cc314 | [
"MIT"
] | 34 | 2015-01-12T00:38:14.000Z | 2020-08-31T11:19:37.000Z | src/core/sessions/buffers/gui/configuration/twitter/__init__.py | Oire/TheQube | fcfd8a68b15948e0740642d635db24adef8cc314 | [
"MIT"
] | 15 | 2015-03-24T15:42:30.000Z | 2020-09-24T20:26:42.000Z | from main import BufferConfigDialog
import panels
| 17.333333 | 36 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
4593c16eb2e732096aaf3aa076d3366347a35a16 | 2,155 | py | Python | fluid/PaddleRec/ctr/network_conf.py | KaiyuYue/models | d5804a6afab46a31f260dc0696e26883b33bbceb | [
"Apache-2.0"
] | 1 | 2018-11-23T10:29:49.000Z | 2018-11-23T10:29:49.000Z | fluid/PaddleRec/ctr/network_conf.py | GuangyanZhang/models | 937566323fa228d74a544f16f3e030f4c0bbb3b9 | [
"Apache-2.0"
] | null | null | null | fluid/PaddleRec/ctr/network_conf.py | GuangyanZhang/models | 937566323fa228d74a544f16f3e030f4c0bbb3b9 | [
"Apache-2.0"
] | null | null | null | import paddle.fluid as fluid
import math
dense_feature_dim = 13
def ctr_dnn_model(embedding_size, sparse_feature_dim):
dense_input = fluid.layers.data(
name="dense_input", shape=[dense_feature_dim], dtype='float32')
sparse_input_ids = [
fluid.layers.data(
name="C" + str(i), shape=[1], lod_level=1, dtype='int64')
for i in range(1, 27)
]
def embedding_layer(input):
return fluid.layers.embedding(
input=input,
is_sparse=True,
# you need to patch https://github.com/PaddlePaddle/Paddle/pull/14190
# if you want to set is_distributed to True
is_distributed=False,
size=[sparse_feature_dim, embedding_size],
param_attr=fluid.ParamAttr(name="SparseFeatFactors", initializer=fluid.initializer.Uniform()))
sparse_embed_seq = map(embedding_layer, sparse_input_ids)
concated = fluid.layers.concat(sparse_embed_seq + [dense_input], axis=1)
fc1 = fluid.layers.fc(input=concated, size=400, act='relu',
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal(scale=1/math.sqrt(concated.shape[1]))))
fc2 = fluid.layers.fc(input=fc1, size=400, act='relu',
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal(scale=1/math.sqrt(fc1.shape[1]))))
fc3 = fluid.layers.fc(input=fc2, size=400, act='relu',
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal(scale=1/math.sqrt(fc2.shape[1]))))
predict = fluid.layers.fc(input=fc3, size=2, act='softmax',
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Normal(scale=1/math.sqrt(fc3.shape[1]))))
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
data_list = [dense_input] + sparse_input_ids + [label]
cost = fluid.layers.cross_entropy(input=predict, label=label)
avg_cost = fluid.layers.reduce_sum(cost)
accuracy = fluid.layers.accuracy(input=predict, label=label)
auc_var, batch_auc_var, auc_states = fluid.layers.auc(input=predict, label=label, num_thresholds=2**12, slide_steps=20)
return avg_cost, data_list, auc_var, batch_auc_var
| 45.851064 | 123 | 0.696984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.094664 |
4593e0391956e053099ff76cfe7c1d83b704418a | 638 | py | Python | hangman_game.py | praneethmolleti/utopia | 1855e4571307f8c52fbfc46a525aea0eb366bc79 | [
"MIT"
] | null | null | null | hangman_game.py | praneethmolleti/utopia | 1855e4571307f8c52fbfc46a525aea0eb366bc79 | [
"MIT"
] | null | null | null | hangman_game.py | praneethmolleti/utopia | 1855e4571307f8c52fbfc46a525aea0eb366bc79 | [
"MIT"
] | null | null | null | import time
name=input("Enter your name:")
print("hello",name,"time to play Hangman!")
time.sleep(1)
print("start guessing")
time.sleep(0.5)
word="secret"
guesses=""
turns=10
while turns>0:
failed=0
for i in word:
if i in guesses:
print(i)
else:
print("_")
failed+=1
if failed==0:
print("You Won")
break
guess=input("guess the character:")
guesses+=guess
if guess not in word:
turns-=1
print("Wrong")
print("You have",turns,"more guesses")
if turns==0:
print("You loose")
| 21.266667 | 47 | 0.523511 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 150 | 0.23511 |
4593eef79d646d64bcab1d1f22414e703f225f47 | 3,637 | py | Python | datawinners/submission/request_processor.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | 1 | 2015-11-02T09:11:12.000Z | 2015-11-02T09:11:12.000Z | datawinners/submission/request_processor.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | datawinners/submission/request_processor.py | ICT4H/dcs-web | fb0f53fad4401cfac1c1789ff28b9d5bda40c975 | [
"Apache-2.0"
] | null | null | null | import json
import logging
from django.conf import settings
from datawinners.feeds.database import get_feeds_db_for_org
from mangrove.transport import TransportInfo
from datawinners.accountmanagement.models import TEST_REPORTER_MOBILE_NUMBER, OrganizationSetting
from datawinners.messageprovider.messages import SMS
from datawinners.utils import get_organization, get_database_manager_for_org
logger = logging.getLogger("django")
class WebSMSDBMRequestProcessor(object):
def process(self, http_request, mangrove_request):
mangrove_request['dbm']=get_database_manager_for_org(mangrove_request['organization'])
mangrove_request['feeds_dbm'] = get_feeds_db_for_org(mangrove_request['organization'])
class WebSMSTransportInfoRequestProcessor(object):
def process(self, http_request, mangrove_request):
organization_settings = OrganizationSetting.objects.get(organization=mangrove_request['organization'])
_to = get_organization_number(organization_settings.get_organisation_sms_number()[0])
_from = TEST_REPORTER_MOBILE_NUMBER
mangrove_request['transport_info']=TransportInfo(SMS, _from, _to)
class WebSMSOrganizationFinderRequestProcessor(object):
def process(self, http_request, mangrove_request):
mangrove_request['organization'] = get_organization(http_request)
class SMSMessageRequestProcessor(object):
def process(self, http_request, mangrove_request):
if settings.USE_NEW_VUMI:
data = http_request.raw_post_data
params = json.loads(data)
message_ = params['content']
else:
message_ = http_request.POST['message']
mangrove_request['incoming_message']= message_
class SMSTransportInfoRequestProcessor(object):
def process(self, http_request, mangrove_request):
vumi_parameters = get_vumi_parameters(http_request)
mangrove_request['transport_info']=TransportInfo(SMS, vumi_parameters.from_number,
vumi_parameters.to_number)
class MangroveWebSMSRequestProcessor(object):
middlewares=[SMSMessageRequestProcessor(),WebSMSOrganizationFinderRequestProcessor(),WebSMSTransportInfoRequestProcessor(),WebSMSDBMRequestProcessor()]
def process(self, http_request, mangrove_request):
for middleware in self.middlewares:
middleware.process(http_request,mangrove_request)
def get_organization_number(organization_number):
return organization_number[0] if(isinstance(organization_number, list)) else organization_number
def try_get_value(request_params, key):
return request_params[key] if request_params.has_key(key) else None
def get_vumi_parameters(http_request):
http_request_post = http_request.POST
if settings.USE_NEW_VUMI:
data = http_request.raw_post_data
logger.info('http request raw post data: %s' % data)
params = json.loads(data)
from_addr_ = try_get_value(params, "from_addr")
to_addr_ = try_get_value(params, "to_addr")
return VumiParameters(from_number=from_addr_, to_number=to_addr_, content=params["content"], is_new_vumi = True)
else:
from_addr_ = try_get_value(http_request_post, "from_msisdn")
to_addr_ = try_get_value(http_request_post, "to_msisdn")
return VumiParameters(from_number=from_addr_, to_number=to_addr_, content=http_request_post["message"], is_new_vumi=False)
class VumiParameters(object):
def __init__(self, from_number, to_number, content, is_new_vumi):
self.from_number = from_number
self.to_number = to_number
self.content = content
self.is_new_vumi = is_new_vumi | 45.4625 | 155 | 0.768765 | 2,171 | 0.596921 | 0 | 0 | 0 | 0 | 0 | 0 | 242 | 0.066538 |
45940e2e0c52d01e63ad0d9f207ac4852d537161 | 4,664 | py | Python | tests/test_losses.py | p768lwy3/torecsys | 2251366268b4fbe6f8c3ab1628fa72a0db043dcd | [
"MIT"
] | 92 | 2019-08-15T11:03:50.000Z | 2022-03-12T01:21:05.000Z | tests/test_losses.py | p768lwy3/torecsys | 2251366268b4fbe6f8c3ab1628fa72a0db043dcd | [
"MIT"
] | 3 | 2020-03-11T08:57:50.000Z | 2021-01-06T01:39:47.000Z | tests/test_losses.py | p768lwy3/torecsys | 2251366268b4fbe6f8c3ab1628fa72a0db043dcd | [
"MIT"
] | 16 | 2019-10-12T11:28:53.000Z | 2022-03-28T14:04:12.000Z | import unittest
import torch
from parameterized import parameterized
from torecsys.losses import *
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
class AdaptiveHingeLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32,),
(16, 16,),
(32, 4,),
])
def test_forward(self, batch_size: int, num_neg: int):
criterion = AdaptiveHingeLoss()
criterion = criterion.to(device)
pos_out = torch.rand(batch_size, 1)
neg_out = torch.rand(batch_size, num_neg)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(pos_out, neg_out, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class BayesianPersonalizedRankingLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32,),
(16, 16,),
(32, 4,),
])
def test_forward(self, batch_size: int, num_neg: int):
criterion = BayesianPersonalizedRankingLoss(reduction='sum')
criterion = criterion.to(device)
pos_out = torch.rand(batch_size, 1)
neg_out = torch.rand(batch_size, num_neg)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(pos_out, neg_out, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class HingeLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32,),
(16, 16,),
(32, 4,),
])
def test_forward(self, batch_size: int, num_neg: int):
criterion = HingeLoss()
criterion = criterion.to(device)
pos_out = torch.rand(batch_size, 1)
neg_out = torch.rand(batch_size, num_neg)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(pos_out, neg_out, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class ListnetLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32,),
(16, 16,),
(32, 4,),
])
def test_forward(self, batch_size: int, length: int):
criterion = ListnetLoss()
criterion = criterion.to(device)
y_hat = torch.rand(batch_size, length)
y_true = torch.rand(batch_size, length)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(y_hat, y_true, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class PointwiseLogisticLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32,),
(16, 16,),
(32, 4,),
])
def test_forward(self, batch_size: int, num_neg: int):
criterion = PointwiseLogisticLoss()
criterion = criterion.to(device)
pos_out = torch.rand(batch_size, 1)
neg_out = torch.rand(batch_size, num_neg)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(pos_out, neg_out, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class SkipGramLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32, 32,),
(16, 64, 16,),
(32, 128, 4,),
])
def test_forward(self, batch_size: int, embed_size: int, num_neg: int):
criterion = SkipGramLoss()
criterion = criterion.to(device)
content_inp = torch.rand(batch_size, 1, embed_size)
pos_inp = torch.rand(batch_size, 1, embed_size)
neg_inp = torch.rand(batch_size, num_neg, embed_size)
loss = criterion(content_inp, pos_inp, neg_inp)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
class TripletLossTestCase(unittest.TestCase):
@parameterized.expand([
(4, 32, 32,),
(16, 64, 16,),
(32, 128, 4,),
])
def test_forward(self, batch_size: int, embed_size: int, num_neg: int):
criterion = TripletLoss(margin=1.0, reduction='sum')
criterion = criterion.to(device)
pos_out = torch.rand(batch_size, 1)
neg_out = torch.rand(batch_size, num_neg)
mask = torch.randint(0, 1, (batch_size,))
mask = mask == 1
loss = criterion(pos_out, neg_out, mask)
self.assertEqual(loss.size(), torch.Size([]))
print(f'Loss Size: {loss.size()}; Loss: {loss.item()}')
if __name__ == '__main__':
unittest.main()
| 30.684211 | 75 | 0.596698 | 4,434 | 0.950686 | 0 | 0 | 4,049 | 0.868139 | 0 | 0 | 369 | 0.079117 |
4595ee6a2050b46ad9613d645fcc6da9d6a3d73a | 653 | py | Python | src/jrtts/GlowTTS/Networks/build_model.py | tosaka-m/japanese_realtime_tts | f278005e6b79ecb975a6dc476aef3394a46214b3 | [
"MIT"
] | 5 | 2020-08-06T14:36:16.000Z | 2022-03-08T05:47:56.000Z | src/jrtts/GlowTTS/Networks/build_model.py | tosaka-m/japanese_realtime_tts | f278005e6b79ecb975a6dc476aef3394a46214b3 | [
"MIT"
] | null | null | null | src/jrtts/GlowTTS/Networks/build_model.py | tosaka-m/japanese_realtime_tts | f278005e6b79ecb975a6dc476aef3394a46214b3 | [
"MIT"
] | null | null | null | #coding:utf-8
import torch
from torch import nn
from .models import FlowGenerator
def build_model(model_params={}):
model = FlowGenerator(**model_params)
initialize(model)
return model
def initialize(model):
initrange = 0.1
bias_initrange = 0.001
parameters = model.parameters()
for param in parameters:
if len(param.shape) >= 2:
torch.nn.init.xavier_normal_(param)
else:
torch.nn.init.uniform_(param, (-1)*bias_initrange, bias_initrange)
for module in model.modules():
if isinstance(module, nn.Embedding):
module.weight.data.uniform_(-initrange, initrange)
| 27.208333 | 78 | 0.67075 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 | 0.019908 |
459604032bfdd5cf9d1499eab90812831118e9c0 | 5,387 | py | Python | layers.py | xiangsheng1325/fastgae_pytorch | 97592f2ac064aab99db9a6fec3f6757573a8db7d | [
"MIT"
] | 8 | 2021-01-18T03:14:35.000Z | 2022-03-28T08:35:15.000Z | layers.py | xiangsheng1325/fastgae_pytorch | 97592f2ac064aab99db9a6fec3f6757573a8db7d | [
"MIT"
] | null | null | null | layers.py | xiangsheng1325/fastgae_pytorch | 97592f2ac064aab99db9a6fec3f6757573a8db7d | [
"MIT"
] | 1 | 2022-03-20T09:48:46.000Z | 2022-03-20T09:48:46.000Z | import torch, math, copy
import scipy.sparse as sp
import numpy as np
from torch.nn.modules.module import Module
import torch.nn as nn
from torch.nn.parameter import Parameter
def normalize(adj, device='cpu'):
if isinstance(adj, torch.Tensor):
adj_ = adj.to(device)
elif isinstance(adj, sp.csr_matrix):
adj_ = torch.from_numpy(adj.toarray()).float().to(device)
elif isinstance(adj, np.ndarray):
adj_ = torch.from_numpy(adj).float().to(device)
else:
adj_ = adj.to(device)
adj_ = adj_ + torch.eye(adj_.shape[0]).to(device)
rowsum = adj_.sum(1)
degree_mat_inv_sqrt = torch.diag(torch.pow(rowsum, -0.5).flatten())
degree_mat_sqrt = torch.diag(torch.pow(rowsum, -0.5).flatten())
adj_normalized = torch.mm(torch.spmm(degree_mat_inv_sqrt, adj_), degree_mat_sqrt)
# return torch.from_numpy(adj_normalized).float().to(device_
return adj_normalized
def coo_to_csp(sp_coo):
num = sp_coo.shape[0]
row = sp_coo.row
col = sp_coo.col
sp_tensor = torch.sparse.FloatTensor(torch.LongTensor(np.stack([row, col])),
torch.tensor(sp_coo.data),
torch.Size([num, num]))
return sp_tensor
#def sp_diag(sp_tensor):
# sp_tensor = sp_tensor.to_dense()
# sp_array = sp_tensor.to('cpu').numpy()
# sp_diags = sp.diags(sp_array).tocoo()
# return coo_to_csp(sp_diags)
def sp_normalize(adj_def, device='cpu'):
"""
:param adj: scipy.sparse.coo_matrix
:param device: default as cpu
:return: normalized_adj:
"""
adj_ = sp.coo_matrix(adj_def)
adj_ = adj_ + sp.coo_matrix(sp.eye(adj_def.shape[0]), dtype=np.float32)
rowsum = np.array(adj_.sum(axis=1)).reshape(-1)
norm_unit = np.float_power(rowsum, -0.5).astype(np.float32)
degree_mat_inv_sqrt = sp.diags(norm_unit)
degree_mat_sqrt = copy.copy(degree_mat_inv_sqrt)
# degree_mat_sqrt = degree_mat_inv_sqrt.to_dense()
support = adj_.__matmul__(degree_mat_sqrt)
# support = coo_to_csp(support.tocoo())
# degree_mat_inv_sqrt = coo_to_csp(degree_mat_inv_sqrt.tocoo())
adj_normalized = degree_mat_inv_sqrt.__matmul__(support)
adj_normalized = coo_to_csp(adj_normalized.tocoo())
return adj_normalized, rowsum
# coo_adj = sp.coo_matrix(adj_normalized.to('cpu').numpy())
# return coo_to_csp(coo_adj).to(device), rowsum
class PairNorm(nn.Module):
def __init__(self, mode='PN', scale=1):
"""
mode:
'None' : No normalization
'PN' : Original version
'PN-SI' : Scale-Individually version
'PN-SCS' : Scale-and-Center-Simultaneously version
('SCS'-mode is not in the paper but we found it works well in practice,
especially for GCN and GAT.)
PairNorm is typically used after each graph convolution operation.
"""
assert mode in ['None', 'PN', 'PN-SI', 'PN-SCS']
super(PairNorm, self).__init__()
self.mode = mode
self.scale = scale
# Scale can be set based on origina data, and also the current feature lengths.
# We leave the experiments to future. A good pool we used for choosing scale:
# [0.1, 1, 10, 50, 100]
def forward(self, x):
if self.mode == 'None':
return x
col_mean = x.mean(dim=0)
if self.mode == 'PN':
x = x - col_mean
rownorm_mean = (1e-6 + x.pow(2).sum(dim=1).mean()).sqrt()
x = self.scale * x / rownorm_mean
if self.mode == 'PN-SI':
x = x - col_mean
rownorm_individual = (1e-6 + x.pow(2).sum(dim=1, keepdim=True)).sqrt()
x = self.scale * x / rownorm_individual
if self.mode == 'PN-SCS':
rownorm_individual = (1e-6 + x.pow(2).sum(dim=1, keepdim=True)).sqrt()
x = self.scale * x / rownorm_individual - col_mean
return x
class GraphConvolution(Module):
"""
Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
"""
def __init__(self, in_features, out_features, bias=True, mode='None', act=lambda x: x):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
self.pn = PairNorm(mode=mode)
self.act = act
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
support = torch.mm(input, self.weight)
output = torch.mm(adj, support)
if self.bias is not None:
output = output + self.bias
return self.act(self.pn(output))
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
| 36.646259 | 92 | 0.599963 | 2,909 | 0.540004 | 0 | 0 | 0 | 0 | 0 | 0 | 1,419 | 0.263412 |
45975eff20d2603fa6353e7112b6096bd8544f29 | 1,532 | py | Python | common/db.py | levinster82/GaragePi | 873f8dbe6a6abdd64095426fa14392d0bb7e66df | [
"MIT"
] | 34 | 2016-01-07T19:38:45.000Z | 2022-03-29T07:32:14.000Z | common/db.py | levinster82/GaragePi | 873f8dbe6a6abdd64095426fa14392d0bb7e66df | [
"MIT"
] | 17 | 2016-06-09T22:54:09.000Z | 2021-05-14T07:14:34.000Z | common/db.py | levinster82/GaragePi | 873f8dbe6a6abdd64095426fa14392d0bb7e66df | [
"MIT"
] | 25 | 2016-07-06T15:55:43.000Z | 2022-03-31T06:20:04.000Z | import os
from sqlite3 import dbapi2 as sqlite3
class GarageDb:
def __init__(self, instance_path, resource_path):
self.db_file = os.path.join(instance_path, 'history.db')
self.init_file = os.path.join(resource_path, 'schema.sql')
# Run init script to ensure database structure
conn = self.get_connection()
with open(self.init_file, mode='r') as f:
conn.cursor().executescript(f.read())
conn.commit()
conn.close()
def get_connection(self):
rv = sqlite3.connect(self.db_file)
rv.row_factory = sqlite3.Row
return rv
def record_event(self, user_agent: str, login: str, event: str, description: str):
conn = self.get_connection()
conn.execute('insert into entries (UserAgent, Login, Event, Description) values (?, ?, ?, ?)',
[user_agent, login, event, description])
conn.commit()
conn.close()
def read_history(self):
conn = self.get_connection()
cur = conn.execute('select datetime(timestamp, \'localtime\') as timestamp, event, description from entries order by timestamp desc')
records = cur.fetchmany(500)
conn.close()
return records
def read_full_history(self):
conn = self.get_connection()
cur = conn.execute('select datetime(timestamp, \'localtime\') as timestamp, event, description from entries order by timestamp desc')
records = cur.fetchall()
conn.close()
return records
| 37.365854 | 141 | 0.63577 | 1,482 | 0.967363 | 0 | 0 | 0 | 0 | 0 | 0 | 379 | 0.247389 |
459906e9c61347117a3af63208abe4d1417e1da0 | 541 | py | Python | environments/migrations/0003_auto_20201228_1616.py | Teosidonio/Data_Solution | 94a898b7939b3bdb0fe92df97aa833c1fc7394a3 | [
"MIT"
] | null | null | null | environments/migrations/0003_auto_20201228_1616.py | Teosidonio/Data_Solution | 94a898b7939b3bdb0fe92df97aa833c1fc7394a3 | [
"MIT"
] | null | null | null | environments/migrations/0003_auto_20201228_1616.py | Teosidonio/Data_Solution | 94a898b7939b3bdb0fe92df97aa833c1fc7394a3 | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2020-12-28 14:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('environments', '0002_auto_20201228_1548'),
]
operations = [
migrations.AlterModelOptions(
name='environments',
options={'ordering': ['date_created', 'name', 'env_stage', 'platform']},
),
migrations.RenameField(
model_name='environments',
old_name='stage',
new_name='env_stage',
),
]
| 23.521739 | 84 | 0.582255 | 458 | 0.84658 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.334566 |
459a1e6b7e0fdf087cd3f288de2eb4f069687538 | 722 | py | Python | code_all/day17/homework/exercise02.py | testcg/python | 4db4bd5d0e44af807d2df80cf8c8980b40cc03c4 | [
"MIT"
] | null | null | null | code_all/day17/homework/exercise02.py | testcg/python | 4db4bd5d0e44af807d2df80cf8c8980b40cc03c4 | [
"MIT"
] | null | null | null | code_all/day17/homework/exercise02.py | testcg/python | 4db4bd5d0e44af807d2df80cf8c8980b40cc03c4 | [
"MIT"
] | null | null | null | """
迭代器 --> yield
"""
class CommodityController:
def __init__(self):
self.__commoditys = []
def add_commodity(self, cmd):
self.__commoditys.append(cmd)
def __iter__(self):
index = 0
yield self.__commoditys[index]
index += 1
yield self.__commoditys[index]
index += 1
yield self.__commoditys[index]
controller = CommodityController()
controller.add_commodity("屠龙刀")
controller.add_commodity("倚天剑")
controller.add_commodity("芭比娃娃")
for item in controller:
print(item)
# iterator = controller.__iter__()
# while True:
# try:
# item = iterator.__next__()
# print(item)
# except StopIteration:
# break
| 19 | 38 | 0.621884 | 355 | 0.474599 | 195 | 0.260695 | 0 | 0 | 0 | 0 | 223 | 0.298128 |
459aedab06142ccd615ef56d1e0a1e0074661927 | 712 | py | Python | src/reporter/reporter/reports/estimate.py | tomasfarias/pipeline | e32f1d34fef1cc9fb534e244e71f41276a979002 | [
"MIT"
] | 1 | 2019-12-24T05:50:38.000Z | 2019-12-24T05:50:38.000Z | src/reporter/reporter/reports/estimate.py | tomasfarias/pipeline | e32f1d34fef1cc9fb534e244e71f41276a979002 | [
"MIT"
] | 3 | 2021-08-08T19:52:05.000Z | 2021-08-08T19:52:06.000Z | src/reporter/reporter/reports/estimate.py | tomasfarias/pipeline | e32f1d34fef1cc9fb534e244e71f41276a979002 | [
"MIT"
] | null | null | null | import pandas as pd
from reporter.report import Report
class Estimate(Report):
def run(self):
query = (
"select * from orders where status = 'CANCELLED' and "
f"updated_ts between '{self.start: %Y-%m-%d %H:%M:%S}'::timestamp and "
f"'{self.end: %Y-%m-%d %H:%M:%S}'::timestamp"
)
df = pd.read_sql_query(
query,
con=self.db_engine,
parse_dates=['created_ts', 'updated_ts'],
)
df = df.assign( # order age in minutes
age=(df['created_ts'] - df['updated_ts']).dt.total_seconds() // 60
)
df.groupby('age').size().reset_index(name='counts')
self.save_csv(df)
| 26.37037 | 83 | 0.532303 | 653 | 0.917135 | 0 | 0 | 0 | 0 | 0 | 0 | 253 | 0.355337 |