hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
221ee83a279f586d62cd3b5b659bf72ceddc7c10
| 8,125
|
py
|
Python
|
VQVAE/main.py
|
bipashasen/How2Sign-Blob
|
6e2af881d96d477fdb93104b8e53d943765c64ff
|
[
"MIT"
] | null | null | null |
VQVAE/main.py
|
bipashasen/How2Sign-Blob
|
6e2af881d96d477fdb93104b8e53d943765c64ff
|
[
"MIT"
] | null | null | null |
VQVAE/main.py
|
bipashasen/How2Sign-Blob
|
6e2af881d96d477fdb93104b8e53d943765c64ff
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import argparse
from tqdm import tqdm
import sys
import distributed as dist
import utils
from models.vqvae import VQVAE, VQVAE_Blob2Full
from models.discriminator import discriminator
visual_folder = '/home2/bipasha31/python_scripts/CurrentWork/samples/VQVAE'
os.makedirs(visual_folder, exist_ok=True)
verbose = False
save_idx_global = 0
save_at = 100
did = 0
models = {
'gan': 0,
'vae': 1
}
model_to_train = models['vae']
results = {
'n_updates': 0,
'recon_errors': [],
'loss_vals': [],
'perplexities': [],
'd_loss': []
}
device = 'cuda:0'
def main(args):
"""
Set up VQ-VAE model with components defined in ./models/ folder
"""
model = VQVAE(args.n_hiddens, args.n_residual_hiddens,
args.n_residual_layers, args.n_embeddings,
args.embedding_dim, args.beta, device)
if args.ckpt:
model.load_state_dict(torch.load(args.ckpt)['model'])
model = model.to(device)
if args.test:
loader = utils.load_data_and_data_loaders(args.dataset, args.batch_size, test=True)
test(loader, model)
return
"""
Load data and define batch data loaders
"""
items = utils.load_data_and_data_loaders(args.dataset, args.batch_size)
training_loader, validation_loader = items[2], items[3]
x_train_var = items[4]
"""
Set up optimizer and training loop
"""
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, amsgrad=True)
model.train()
if model_to_train == models['gan']:
train_vqgan(args, training_loader, validation_loader, x_train_var, model, optimizer)
else:
train(args, training_loader, validation_loader, x_train_var, model, optimizer)
def test(loader, model):
for i, data in enumerate(tqdm(loader)):
x, _ = data
x = x.to(device)
with torch.no_grad():
_ = model(x, save_idx=f'{i}', visual_folder=visual_folder)
def train(args, training_loader, validation_loader, x_train_var, model, optimizer):
global save_idx_global
for i in range(args.n_updates):
(x, _) = next(iter(training_loader))
x = x.to(device)
optimizer.zero_grad()
save_idx = None
embedding_loss, x_hat, perplexity = model(x)
recon_loss = torch.mean((x_hat - x)**2) / x_train_var
loss = recon_loss + embedding_loss
loss.backward()
optimizer.step()
results["recon_errors"].append(recon_loss.cpu().detach().numpy())
results["perplexities"].append(perplexity.cpu().detach().numpy())
results["loss_vals"].append(loss.cpu().detach().numpy())
results["n_updates"] = i
if i % save_at == 0:
save_idx = save_idx_global
save_idx_global += 1
model.eval()
with torch.no_grad():
for vi in tqdm(range(10)):
(x, _) = next(iter(validation_loader))
x = x.to(device)
_, _, _ = model(x, verbose=verbose, save_idx=f'{save_idx}_{vi}', visual_folder=visual_folder)
model.train()
if i % args.log_interval == 0 and dist.is_primary():
"""
save model and print values
"""
if args.save:
hyperparameters = args.__dict__
utils.save_model_and_results(
model, optimizer, results, hyperparameters, args.filename)
print('Update #', i, 'Recon Error:',
np.mean(results["recon_errors"][-args.log_interval:]),
'Loss', np.mean(results["loss_vals"][-args.log_interval:]),
'Perplexity:', np.mean(results["perplexities"][-args.log_interval:]))
def train_vqgan(args, training_loader, validation_loader, x_train_var, model, optimizer):
global save_idx_global
c_mse = nn.MSELoss()
disc = discriminator().to(device)
optim_D = optim.Adam(disc.parameters(), lr=args.learning_rate, amsgrad=True)
for i in range(args.n_updates):
(x, _) = next(iter(training_loader))
x = x.to(device)
optimizer.zero_grad()
optim_D.zero_grad()
save_idx = None
if i % save_at == 0 and i > 0:
save_idx = save_idx_global
save_idx_global += 1
embedding_loss, x_hat, perplexity = \
model(x, verbose=verbose, save_idx=save_idx, visual_folder=visual_folder)
recon_loss = torch.mean((x_hat - x)**2) / x_train_var
loss = recon_loss + embedding_loss
'''
adding the perceptual loss here - patch loss of real and fake
'''
B = args.batch_size
D = 16 * 16
ones = torch.ones((B, D), dtype=torch.float32, device=device)
zeros = torch.zeros((B, D), dtype=torch.float32, device=device)
if i % 2 == 0:
fake = disc(x_hat).view(B, D)
loss += c_mse(fake, ones)
else:
fake = disc(x_hat.clone().detach()).view(B, D)
real = disc(x).view(B, D)
d_loss = c_mse(real, ones) + c_mse(fake, zeros)
results["d_loss"].append(d_loss.cpu().detach().numpy())
d_loss.backward()
optim_D.step()
loss.backward()
optimizer.step()
results["recon_errors"].append(recon_loss.cpu().detach().numpy())
results["perplexities"].append(perplexity.cpu().detach().numpy())
results["loss_vals"].append(loss.cpu().detach().numpy())
results["n_updates"] = i
if i % args.log_interval == 0:
"""
save model and print values
"""
if args.save:
hyperparameters = args.__dict__
utils.save_model_and_results(
model, optimizer, results, hyperparameters, args.filename)
print('Update #', i, 'Recon Error:',
np.mean(results["recon_errors"][-args.log_interval:]),
'Loss', np.mean(results["loss_vals"][-args.log_interval:]),
'Discriminator Loss', np.mean(results['d_loss'][-args.log_interval:]),
'Perplexity:', np.mean(results["perplexities"][-args.log_interval:]), flush=True)
if __name__ == "__main__":
# train_vqgan()
# train_blob2full()
parser = argparse.ArgumentParser()
"""
Hyperparameters
"""
timestamp = utils.readable_timestamp()
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--n_updates", type=int, default=50000)
parser.add_argument("--n_hiddens", type=int, default=128)
parser.add_argument("--n_residual_hiddens", type=int, default=32)
parser.add_argument("--n_residual_layers", type=int, default=2)
parser.add_argument("--embedding_dim", type=int, default=64)
parser.add_argument("--n_embeddings", type=int, default=512)
parser.add_argument("--beta", type=float, default=.25)
parser.add_argument("--learning_rate", type=float, default=3e-4)
parser.add_argument("--ckpt", type=str)
parser.add_argument("--log_interval", type=int, default=3)
parser.add_argument("--save_at", type=int, default=100)
parser.add_argument("--device_id", type=int, default=0)
parser.add_argument("--dataset", type=str, default='HandGestures')
parser.add_argument("--test", action='store_true')
# whether or not to save model
parser.add_argument("-save", action="store_true")
parser.add_argument("--filename", type=str, default=timestamp)
args = parser.parse_args()
args.save = True
if args.save and dist.is_primary():
print('Results will be saved in ./results/vqvae_' + args.filename + '.pth')
args.n_gpu = torch.cuda.device_count()
port = (
2 ** 15
+ 2 ** 14
+ hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
)+1
print(f'port: {port}')
print(args)
dist.launch(main, args.n_gpu, 1, 0, f"tcp://127.0.0.1:{port}", args=(args,))
| 30.204461
| 113
| 0.606769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,329
| 0.163569
|
2221af1a0ee8e71a36084e82816e4e484658018d
| 1,245
|
py
|
Python
|
api/voters/tests/test_models.py
|
citizenlabsgr/voter-engagement
|
2d33eac1531471988543c6c3781b95ac73ec6dd9
|
[
"MIT"
] | 6
|
2017-11-10T00:50:17.000Z
|
2018-03-25T02:26:19.000Z
|
api/voters/tests/test_models.py
|
citizenlabsgr/voter-engagement
|
2d33eac1531471988543c6c3781b95ac73ec6dd9
|
[
"MIT"
] | 40
|
2017-10-25T16:16:55.000Z
|
2018-08-15T05:27:36.000Z
|
api/voters/tests/test_models.py
|
citizenlabsgr/voter-engagement
|
2d33eac1531471988543c6c3781b95ac73ec6dd9
|
[
"MIT"
] | 3
|
2017-11-22T01:50:41.000Z
|
2018-04-17T23:33:08.000Z
|
# pylint: disable=unused-variable,unused-argument,expression-not-assigned
from django.forms.models import model_to_dict
import arrow
import pytest
from expecter import expect
from api.elections.models import Election
from .. import models
@pytest.fixture
def info():
return models.Identity(
first_name="John",
last_name="Doe",
birth_date=arrow.get("1985-06-19"),
)
@pytest.fixture
def voter(info):
return models.Voter(
email="john@example.com",
**model_to_dict(info),
)
@pytest.fixture
def status(voter):
return models.Status(
voter=voter,
election=Election(name="Sample Election"),
)
def describe_registration_info():
def describe_birth_month():
def is_parsed_from_date(info):
expect(info.birth_month) == "June"
def describe_birth_year():
def is_parsed_from_date(info):
expect(info.birth_year) == 1985
def describe_voter():
def describe_str():
def is_based_on_name(voter):
expect(str(voter)) == "John Doe"
def describe_status():
def describe_str():
def is_based_on_voter_and_election(status):
expect(str(status)) == "Sample Election: John Doe"
| 18.863636
| 73
| 0.658635
| 0
| 0
| 0
| 0
| 423
| 0.339759
| 0
| 0
| 174
| 0.139759
|
22231fc92c2080f08011cca9cc3a336ee11e707e
| 404
|
py
|
Python
|
leetcode/easy/strobogrammatic-number.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 8
|
2019-05-14T12:50:29.000Z
|
2022-03-01T09:08:27.000Z
|
leetcode/easy/strobogrammatic-number.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 46
|
2019-03-24T20:59:29.000Z
|
2019-04-09T16:28:43.000Z
|
leetcode/easy/strobogrammatic-number.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 1
|
2022-01-28T12:46:29.000Z
|
2022-01-28T12:46:29.000Z
|
class Solution:
def isStrobogrammatic(self, num: str) -> bool:
strobogrammatic = {
'1': '1',
'0': '0',
'6': '9',
'9': '6',
'8': '8'
}
for idx, digit in enumerate(num):
if digit not in strobogrammatic or strobogrammatic[digit] != num[len(num) - idx -1]:
return False
return True
| 25.25
| 96
| 0.445545
| 403
| 0.997525
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 0.074257
|
22259d4822670697f3a83a96fc5c76baa093e86f
| 992
|
py
|
Python
|
app/main/helpers/direct_award_helpers.py
|
uk-gov-mirror/alphagov.digitalmarketplace-buyer-frontend
|
ec3751b6d24842cc53febb20391ae340c0fea756
|
[
"MIT"
] | 4
|
2017-10-12T16:15:01.000Z
|
2020-11-28T03:41:15.000Z
|
app/main/helpers/direct_award_helpers.py
|
uk-gov-mirror/alphagov.digitalmarketplace-buyer-frontend
|
ec3751b6d24842cc53febb20391ae340c0fea756
|
[
"MIT"
] | 615
|
2015-02-27T15:45:43.000Z
|
2021-07-01T10:09:55.000Z
|
app/main/helpers/direct_award_helpers.py
|
uk-gov-mirror/alphagov.digitalmarketplace-buyer-frontend
|
ec3751b6d24842cc53febb20391ae340c0fea756
|
[
"MIT"
] | 15
|
2015-06-30T14:35:20.000Z
|
2021-04-10T18:06:36.000Z
|
from operator import itemgetter
def is_direct_award_project_accessible(project, user_id):
return any([user['id'] == user_id for user in project['users']])
def get_direct_award_projects(data_api_client, user_id, return_type="all", sort_by_key=None, latest_first=None):
projects = data_api_client.find_direct_award_projects(user_id, latest_first=latest_first).get('projects', [])
res = {
"open_projects": [],
"closed_projects": [],
}
for project in projects:
if project['lockedAt'] is None:
res['open_projects'].append(project)
else:
res['closed_projects'].append(project)
if return_type == "all":
if sort_by_key:
res['open_projects'].sort(key=itemgetter(sort_by_key))
res['closed_projects'].sort(key=itemgetter(sort_by_key))
return res
else:
if sort_by_key:
res[return_type].sort(key=itemgetter(sort_by_key))
return res[return_type]
| 31
| 113
| 0.658266
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 137
| 0.138105
|
2225c8037c0f830751bb9a95f4a1c6bd17ba29de
| 824
|
py
|
Python
|
403-Frog-Jump/solution.py
|
Tanych/CodeTracking
|
86f1cb98de801f58c39d9a48ce9de12df7303d20
|
[
"MIT"
] | null | null | null |
403-Frog-Jump/solution.py
|
Tanych/CodeTracking
|
86f1cb98de801f58c39d9a48ce9de12df7303d20
|
[
"MIT"
] | null | null | null |
403-Frog-Jump/solution.py
|
Tanych/CodeTracking
|
86f1cb98de801f58c39d9a48ce9de12df7303d20
|
[
"MIT"
] | null | null | null |
class Solution(object):
def dfs(self,stones,graph,curpos,lastjump):
if curpos==stones[-1]:
return True
# since the jump need based on lastjump
# only forward,get rid of the stay at the same pos
rstart=max(curpos+lastjump-1,curpos+1)
rend=min(curpos+lastjump+1,stones[-1])+1
for nextpos in xrange(rstart,rend):
if nextpos in graph and self.dfs(stones,graph,nextpos,nextpos-curpos):
return True
return False
def canCross(self, stones):
"""
:type stones: List[int]
:rtype: bool
"""
if not stones:
return True
if stones[1]!=1:
return False
graph={val:idx for idx,val in enumerate(stones)}
return self.dfs(stones,graph,1,1)
| 32.96
| 82
| 0.566748
| 815
| 0.989078
| 0
| 0
| 0
| 0
| 0
| 0
| 157
| 0.190534
|
2227aded77b3fc2c225e7b80658dcf4702936914
| 2,261
|
py
|
Python
|
daemon/api/endpoints/partial/pod.py
|
vishalbelsare/jina
|
ae72cc5ce1f7e7f4c662e72e96ea21dddc28bf43
|
[
"Apache-2.0"
] | 2
|
2021-06-28T16:25:09.000Z
|
2021-06-28T16:26:41.000Z
|
daemon/api/endpoints/partial/pod.py
|
vishalbelsare/jina
|
ae72cc5ce1f7e7f4c662e72e96ea21dddc28bf43
|
[
"Apache-2.0"
] | null | null | null |
daemon/api/endpoints/partial/pod.py
|
vishalbelsare/jina
|
ae72cc5ce1f7e7f4c662e72e96ea21dddc28bf43
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional, Dict, Any
from fastapi import APIRouter
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
from ....excepts import PartialDaemon400Exception
from ....models import PodModel
from ....models.partial import PartialStoreItem
from ....stores import partial_store as store
router = APIRouter(prefix='/pod', tags=['pod'])
@router.get(
path='',
summary='Get status of a running Pod',
response_model=PartialStoreItem,
)
async def _status():
"""
.. #noqa: DAR101
.. #noqa: DAR201"""
return store.item
@router.post(
path='',
summary='Create a Pod',
description='Create a Pod and add it to the store',
status_code=201,
response_model=PartialStoreItem,
)
async def _create(pod: 'PodModel'):
"""
.. #noqa: DAR101
.. #noqa: DAR201"""
try:
args = ArgNamespace.kwargs2namespace(pod.dict(), set_pod_parser())
return store.add(args)
except Exception as ex:
raise PartialDaemon400Exception from ex
@router.put(
path='/rolling_update',
summary='Run a rolling_update operation on the Pod object',
response_model=PartialStoreItem,
)
async def rolling_update(uses_with: Optional[Dict[str, Any]] = None):
"""
.. #noqa: DAR101
.. #noqa: DAR201
"""
try:
return await store.rolling_update(uses_with=uses_with)
except ValueError as ex:
raise PartialDaemon400Exception from ex
@router.put(
path='/scale',
summary='Run a scale operation on the Pod object',
response_model=PartialStoreItem,
)
async def scale(replicas: int):
"""
.. #noqa: DAR101
.. #noqa: DAR201
"""
try:
return await store.scale(replicas=replicas)
except ValueError as ex:
raise PartialDaemon400Exception from ex
@router.delete(
path='',
summary='Terminate the running Pod',
description='Terminate a running Pod and release its resources',
)
async def _delete():
"""
.. #noqa: DAR101
.. #noqa: DAR201"""
try:
store.delete()
except Exception as ex:
raise PartialDaemon400Exception from ex
@router.on_event('shutdown')
def _shutdown():
"""
.. #noqa: DAR101
.. #noqa: DAR201"""
store.delete()
| 21.951456
| 74
| 0.657231
| 0
| 0
| 0
| 0
| 1,871
| 0.82751
| 1,063
| 0.470146
| 616
| 0.272446
|
2227e03ea94ec70b6e6c0445734948d8034414f4
| 708
|
py
|
Python
|
runehistory_api/app/config.py
|
RuneHistory/runehistory-api
|
4e857c7fdbdf585d57cf4c7fe6214b565ac37a22
|
[
"MIT"
] | null | null | null |
runehistory_api/app/config.py
|
RuneHistory/runehistory-api
|
4e857c7fdbdf585d57cf4c7fe6214b565ac37a22
|
[
"MIT"
] | 6
|
2018-06-14T13:58:43.000Z
|
2018-07-16T14:02:24.000Z
|
runehistory_api/app/config.py
|
RuneHistory/runehistory-api
|
4e857c7fdbdf585d57cf4c7fe6214b565ac37a22
|
[
"MIT"
] | null | null | null |
import yaml
class Config:
def __init__(self, path: str):
self.path = path
self.cfg = {}
self.parse()
def parse(self):
with open(self.path, 'r') as f:
self.cfg = yaml.load(f)
@property
def secret(self) -> str:
return self.cfg.get('secret')
@property
def db_connection_string(self) -> str:
return self.cfg.get('db_connection_string')
@property
def db_host(self) -> str:
return self.cfg.get('db_host', '127.0.0.1')
@property
def db_port(self) -> int:
return self.cfg.get('db_port', 27017)
@property
def db_name(self) -> int:
return self.cfg.get('db_name', 'runehistory')
| 21.454545
| 53
| 0.572034
| 693
| 0.978814
| 0
| 0
| 449
| 0.634181
| 0
| 0
| 84
| 0.118644
|
222831d9b44232f8e0bea417d43c813b4bde54d1
| 4,379
|
py
|
Python
|
csv_filter/__init__.py
|
mooore-digital/csv_filter
|
80afed0e4b366d195c5a90fb96ab2bf01620e3bf
|
[
"MIT"
] | 1
|
2018-08-13T05:51:21.000Z
|
2018-08-13T05:51:21.000Z
|
csv_filter/__init__.py
|
mooore-digital/csv_filter
|
80afed0e4b366d195c5a90fb96ab2bf01620e3bf
|
[
"MIT"
] | null | null | null |
csv_filter/__init__.py
|
mooore-digital/csv_filter
|
80afed0e4b366d195c5a90fb96ab2bf01620e3bf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import csv
import logging
import os
import re
import sys
DELIMITER = ','
class CsvFilter:
def __init__(
self,
file=None,
deduplicate=False,
filter_query=None,
filter_inverse=False,
ignore_case=False,
verbose=False,
delimiter=DELIMITER
):
self.file = file
self.deduplicate = deduplicate
self.filter = filter_query
self.filter_inverse = filter_inverse
self.ignore_case = ignore_case
self.verbose = verbose
self.delimiter = delimiter
self.logger = logging.getLogger('deduplicate')
if self.verbose:
self.logger.setLevel(logging.DEBUG)
def apply(self):
base_path = os.getcwd()
source_path = base_path + '/' + self.file
destination_path = source_path.replace('.csv', '.filtered.csv')
filtered_items = self.filter_items(source_path)
self.store_items(destination_path, filtered_items)
def filter_items(self, file_path):
result = []
deduplicate_column_index = False
deduplicate_key_values = []
filter_column = False
filter_column_index = False
filter_pattern = False
counter = 0
re_flags = 0
if self.ignore_case:
re_flags = re.IGNORECASE
if self.verbose:
print('* Filtering file', file_path)
if self.filter:
filter_match = re.match('^(.+)=(.+)$', self.filter)
if filter_match:
filter_column = filter_match.group(1)
filter_pattern = filter_match.group(2)
with open(file_path, 'rt') as csv_file:
for row in csv.reader(csv_file, delimiter=self.delimiter):
if counter == 0:
if self.deduplicate:
deduplicate_column_index = row.index(self.deduplicate)
if filter_column:
filter_column_index = row.index(filter_column)
counter += 1
result.append(row)
continue
valid = False
if self.deduplicate and deduplicate_column_index is not False:
value = row[deduplicate_column_index]
if self.ignore_case:
value = value.lower()
if value in deduplicate_key_values:
valid = False
else:
deduplicate_key_values.append(value)
if filter_column_index is not False:
value = row[filter_column_index]
if bool(re.match(filter_pattern, value, re_flags)) is not self.filter_inverse:
valid = True
if valid:
result.append(row)
counter += 1
if self.verbose:
print('* Filtered', counter, 'items to', len(result))
return result
def store_items(self, file_path, items):
if self.verbose:
print('* Storing items to', file_path)
with open(file_path, 'wt') as csv_file:
writer = csv.writer(csv_file, delimiter=self.delimiter)
for row in items:
writer.writerow(row)
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--file', '-f', help='File to filter')
parser.add_argument('--deduplicate', help='Deduplication column to be applied', default=False)
parser.add_argument('--filter', help='Filter to be applied', default=False)
parser.add_argument('--filter_inverse', action='store_true', help='Inverse filter matches', default=False)
parser.add_argument('--ignore_case', '-i', action='store_true', help='Match values case insensitive', default=False)
parser.add_argument('--verbose', '-v', action='store_true', help='Enable verbose')
return parser.parse_args()
def main():
args = parse_arguments()
CsvFilter(
file=args.file,
deduplicate=args.deduplicate,
filter_query=args.filter,
filter_inverse=args.filter_inverse,
ignore_case=args.ignore_case,
verbose=args.verbose
).apply()
return 0
if __name__ == '__main__':
sys.exit(main())
| 31.731884
| 120
| 0.577529
| 3,265
| 0.745604
| 0
| 0
| 0
| 0
| 0
| 0
| 423
| 0.096597
|
2228fe251332d78679abc83f54675d08f42975f5
| 1,435
|
py
|
Python
|
scientist/__init__.py
|
boxed/scientist
|
aadaf5713a4092abc51f1978474ce74d94e56799
|
[
"BSD-3-Clause"
] | null | null | null |
scientist/__init__.py
|
boxed/scientist
|
aadaf5713a4092abc51f1978474ce74d94e56799
|
[
"BSD-3-Clause"
] | null | null | null |
scientist/__init__.py
|
boxed/scientist
|
aadaf5713a4092abc51f1978474ce74d94e56799
|
[
"BSD-3-Clause"
] | null | null | null |
def check_candidate(a, candidate, callback_when_different, *args, **kwargs):
control_result = None
candidate_result = None
control_exception = None
candidate_exception = None
reason = None
try:
control_result = a(*args, **kwargs)
except BaseException as e:
control_exception = e
try:
candidate_result = candidate(*args, **kwargs)
if control_exception is not None:
reason = 'old code raised, new did not'
elif control_result != candidate_result:
reason = 'different results'
except BaseException as e:
candidate_exception = e
if control_exception is None:
reason = 'new code raised, old did not'
else:
if type(control_exception) != type(candidate_exception):
reason = 'new and old both raised exception, but different types'
elif control_exception.args != candidate_exception.args:
reason = 'new and old both raised exception, but with different data'
if reason is not None:
callback_when_different(
control_result=control_result,
candidate_result=candidate_result,
control_exception=control_exception,
candidate_exception=candidate_exception,
reason=reason,
)
if control_exception is not None:
raise control_exception
return control_result
| 32.613636
| 85
| 0.643206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 195
| 0.135889
|
22298775a674168a052235a68913e1eaa95ece94
| 9,883
|
py
|
Python
|
bluebottle/impact/tests/test_api.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 10
|
2015-05-28T18:26:40.000Z
|
2021-09-06T10:07:03.000Z
|
bluebottle/impact/tests/test_api.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 762
|
2015-01-15T10:00:59.000Z
|
2022-03-31T15:35:14.000Z
|
bluebottle/impact/tests/test_api.py
|
terrameijar/bluebottle
|
b4f5ba9c4f03e678fdd36091b29240307ea69ffd
|
[
"BSD-3-Clause"
] | 9
|
2015-02-20T13:19:30.000Z
|
2022-03-08T14:09:17.000Z
|
# coding=utf-8
from builtins import str
import json
from django.contrib.auth.models import Group, Permission
from django.urls import reverse
from rest_framework import status
from bluebottle.impact.models import ImpactGoal
from bluebottle.impact.tests.factories import (
ImpactTypeFactory, ImpactGoalFactory
)
from bluebottle.time_based.tests.factories import DateActivityFactory
from bluebottle.members.models import MemberPlatformSettings
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.utils import BluebottleTestCase, JSONAPITestClient
class ImpactTypeListAPITestCase(BluebottleTestCase):
def setUp(self):
super(ImpactTypeListAPITestCase, self).setUp()
self.client = JSONAPITestClient()
self.types = ImpactTypeFactory.create_batch(10)
self.url = reverse('impact-type-list')
self.user = BlueBottleUserFactory()
def test_get(self):
response = self.client.get(self.url, user=self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()['data']), len(self.types))
resource = response.json()['data'][0]['attributes']
self.assertTrue('slug' in resource)
self.assertTrue('name' in resource)
self.assertTrue('unit' in resource)
self.assertTrue('text' in resource)
self.assertTrue('text-with-target' in resource)
self.assertTrue('text-passed' in resource)
resource_type = response.json()['data'][0]['type']
self.assertEqual(resource_type, 'activities/impact-types')
def test_get_anonymous(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()['data']), len(self.types))
def test_get_only_active(self):
self.types[0].active = False
self.types[0].save()
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()['data']), len(self.types) - 1)
def test_get_closed(self):
MemberPlatformSettings.objects.update(closed=True)
response = self.client.get(self.url, user=self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_closed_anonymous(self):
MemberPlatformSettings.objects.update(closed=True)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_post(self):
response = self.client.post(self.url, user=self.user)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class ImpactGoalListAPITestCase(BluebottleTestCase):
def setUp(self):
super(ImpactGoalListAPITestCase, self).setUp()
self.client = JSONAPITestClient()
self.activity = DateActivityFactory.create()
self.type = ImpactTypeFactory.create()
self.url = reverse('impact-goal-list')
self.data = {
'data': {
'type': 'activities/impact-goals',
'attributes': {
'target': 1.5
},
'relationships': {
'activity': {
'data': {
'type': 'activities/time-based/dates',
'id': self.activity.pk
},
},
'type': {
'data': {
'type': 'activities/impact-types',
'id': self.type.pk
},
}
}
}
}
def test_create(self):
response = self.client.post(
self.url,
json.dumps(self.data),
user=self.activity.owner
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
resource_type = response.json()['data']['type']
self.assertEqual(resource_type, 'activities/impact-goals')
goal = ImpactGoal.objects.get(pk=response.json()['data']['id'])
self.assertEqual(
goal.target, self.data['data']['attributes']['target']
)
self.assertEqual(goal.type, self.type)
self.assertEqual(goal.activity, self.activity)
def test_create_no_target(self):
del self.data['data']['attributes']['target']
response = self.client.post(
self.url,
json.dumps(self.data),
user=self.activity.owner
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
resource_type = response.json()['data']['type']
self.assertEqual(resource_type, 'activities/impact-goals')
goal = ImpactGoal.objects.get(pk=response.json()['data']['id'])
self.assertEqual(
goal.target, None
)
self.assertEqual(goal.type, self.type)
self.assertEqual(goal.activity, self.activity)
def test_create_non_owner(self):
response = self.client.post(
self.url,
json.dumps(self.data),
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_create_anonymous(self):
response = self.client.post(
self.url,
json.dumps(self.data),
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
class ImpactGoalDetailsAPITestCase(BluebottleTestCase):
def setUp(self):
super(ImpactGoalDetailsAPITestCase, self).setUp()
self.client = JSONAPITestClient()
self.activity = DateActivityFactory.create()
self.type = ImpactTypeFactory.create()
self.goal = ImpactGoalFactory(type=self.type, activity=self.activity)
self.url = reverse('impact-goal-details', args=(self.goal.pk, ))
self.data = {
'data': {
'type': 'activities/impact-goals',
'id': self.goal.pk,
'attributes': {
'target': 1.5
},
}
}
def test_get(self):
response = self.client.get(
self.url,
user=self.activity.owner
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.json()['data']
self.assertEqual(data['type'], 'activities/impact-goals')
self.assertEqual(
data['attributes']['target'], self.goal.target
)
self.assertEqual(
data['relationships']['type']['data']['id'],
str(self.goal.type.pk)
)
self.assertEqual(
data['relationships']['activity']['data']['id'],
str(self.goal.activity.pk)
)
def test_get_incomplete(self):
self.goal.target = None
self.goal.save()
response = self.client.get(
self.url,
user=self.activity.owner
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.json()['data']
self.assertEqual(data['meta']['required'], [])
def test_get_non_owner(self):
response = self.client.get(
self.url,
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_anonymous(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_closed_anonymous(self):
anonymous = Group.objects.get(name='Anonymous')
anonymous.permissions.remove(
Permission.objects.get(codename='api_read_dateactivity')
)
MemberPlatformSettings.objects.update(closed=True)
response = self.client.get(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_update(self):
response = self.client.patch(
self.url,
data=json.dumps(self.data),
user=self.activity.owner
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = response.json()['data']
self.assertEqual(data['type'], 'activities/impact-goals')
self.assertEqual(
data['attributes']['target'],
self.data['data']['attributes']['target']
)
self.goal.refresh_from_db()
self.assertEqual(
self.goal.target,
self.data['data']['attributes']['target']
)
def test_update_other_user(self):
response = self.client.patch(
self.url,
data=json.dumps(self.data),
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_update_anonymous(self):
response = self.client.patch(
self.url,
data=json.dumps(self.data)
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_delete(self):
response = self.client.delete(
self.url,
user=self.activity.owner
)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
with self.assertRaises(ImpactGoal.DoesNotExist):
ImpactGoal.objects.get(pk=self.goal.pk)
def test_delete_other_user(self):
response = self.client.delete(
self.url,
user=BlueBottleUserFactory.create()
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_delete_anonymous(self):
response = self.client.delete(self.url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
| 33.16443
| 82
| 0.610746
| 9,281
| 0.939087
| 0
| 0
| 0
| 0
| 0
| 0
| 868
| 0.087828
|
222b145f8daf822353fc31ee9861239abfadffb3
| 11,613
|
py
|
Python
|
dotmotif/parsers/v2/test_v2_parser.py
|
aplbrain/dotmotif
|
db093ddad7308756e9cf7ee01199f0dca1369872
|
[
"Apache-2.0"
] | 28
|
2020-06-12T20:46:15.000Z
|
2022-02-05T18:33:46.000Z
|
dotmotif/parsers/v2/test_v2_parser.py
|
aplbrain/dotmotif
|
db093ddad7308756e9cf7ee01199f0dca1369872
|
[
"Apache-2.0"
] | 26
|
2020-06-09T20:09:32.000Z
|
2022-02-01T18:22:20.000Z
|
dotmotif/parsers/v2/test_v2_parser.py
|
aplbrain/dotmotif
|
db093ddad7308756e9cf7ee01199f0dca1369872
|
[
"Apache-2.0"
] | 4
|
2021-03-08T02:47:49.000Z
|
2021-09-13T19:16:29.000Z
|
from . import ParserV2
import dotmotif
import unittest
_THREE_CYCLE = """A -> B\nB -> C\nC -> A\n"""
_THREE_CYCLE_NEG = """A !> B\nB !> C\nC !> A\n"""
_THREE_CYCLE_INH = """A -| B\nB -| C\nC -| A\n"""
_THREE_CYCLE_NEG_INH = """A !| B\nB !| C\nC !| A\n"""
_ABC_TO_D = """\nA -> D\nB -> D\nC -> D\n"""
_THREE_CYCLE_CSV = """\nA,B\nB,C\nC,A\n"""
_THREE_CYCLE_NEG_CSV = """\nA,B\nB,C\nC,A\n"""
class TestDotmotif_Parserv2_DM(unittest.TestCase):
def test_sanity(self):
self.assertEqual(1, 1)
def test_dm_parser(self):
dm = dotmotif.Motif(_THREE_CYCLE)
self.assertEqual(len(dm._g.edges()), 3)
self.assertEqual(len(dm._g.nodes()), 3)
def test_dm_parser_actions(self):
dm = dotmotif.Motif(_THREE_CYCLE)
self.assertEqual([e[2]["action"] for e in dm._g.edges(data=True)], ["SYN"] * 3)
dm = dotmotif.Motif(_THREE_CYCLE_INH)
self.assertEqual([e[2]["action"] for e in dm._g.edges(data=True)], ["INH"] * 3)
def test_dm_parser_edge_exists(self):
dm = dotmotif.Motif(_THREE_CYCLE)
self.assertEqual([e[2]["exists"] for e in dm._g.edges(data=True)], [True] * 3)
dm = dotmotif.Motif(_THREE_CYCLE_NEG)
self.assertEqual([e[2]["exists"] for e in dm._g.edges(data=True)], [False] * 3)
dm = dotmotif.Motif(_THREE_CYCLE_NEG_INH)
self.assertEqual([e[2]["exists"] for e in dm._g.edges(data=True)], [False] * 3)
class TestDotmotif_Parserv2_DM_Macros(unittest.TestCase):
def test_macro_not_added(self):
exp = """\
edge(A, B) {
A -> B
}
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm._g.edges()), 0)
def test_simple_macro(self):
exp = """\
edge(A, B) {
A -> B
}
edge(C, D)
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm._g.edges()), 1)
def test_simple_macro_construction(self):
exp = """\
edge(A, B) {
A -> B
}
edge(C, D)
"""
dm = dotmotif.Motif(exp)
exp_edge = list(dm._g.edges(data=True))[0]
self.assertEqual(exp_edge[0], "C")
self.assertEqual(exp_edge[1], "D")
def test_multiline_macro_construction(self):
exp = """\
dualedge(A, B) {
A -> B
B -> A
}
dualedge(C, D)
"""
dm = dotmotif.Motif(exp)
exp_edge = list(dm._g.edges(data=True))[0]
self.assertEqual(exp_edge[0], "C")
self.assertEqual(exp_edge[1], "D")
def test_undefined_macro(self):
exp = """\
dualedge(A, B) {
A -> B
B -> A
}
foo(C, D)
"""
# with self.assertRaises(ValueError):
with self.assertRaises(Exception):
dotmotif.Motif(exp)
def test_wrong_args_macro(self):
exp = """\
edge(A, B) {
A -> B
B -> A
}
edge(C, D, E)
"""
# with self.assertRaises(ValueError):
with self.assertRaises(Exception):
dotmotif.Motif(exp)
def test_more_complex_macro(self):
exp = """\
tri(A, B, C) {
A -> B
B -> C
C -> A
}
tri(C, D, E)
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 3)
def test_macro_reuse(self):
exp = """\
tri(A, B, C) {
A -> B
B -> C
C -> A
}
tri(C, D, E)
tri(F, G, H)
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 6)
def test_conflicting_macro_invalid_edge_throws(self):
exp = """\
tri(A, B, C) {
A -> B
B -> C
C -> A
}
nontri(A, B, C) {
A !> B
B !> C
C !> A
}
tri(C, D, E)
nontri(D, E, F)
"""
# with self.assertRaises(dotmotif.validators.DisagreeingEdgesValidatorError):
with self.assertRaises(Exception):
dotmotif.Motif(exp)
def test_nested_macros(self):
exp = """\
dualedge(A, B) {
A -> B
B -> A
}
dualtri(A, B, C) {
dualedge(A, B)
dualedge(B, C)
dualedge(C, A)
}
dualtri(foo, bar, baz)
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 6)
def test_deeply_nested_macros(self):
exp = """\
edge(A, B) {
A -> B
}
dualedge(A, B) {
edge(A, B)
edge(B, A)
}
dualtri(A, B, C) {
dualedge(A, B)
dualedge(B, C)
dualedge(C, A)
}
dualtri(foo, bar, baz)
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 6)
def test_clustercuss_macros_no_repeats(self):
exp = """\
edge(A, B) {
A -> B
}
dualedge(A, B) {
edge(A, B)
edge(B, A)
}
dualtri(A, B, C) {
dualedge(A, B)
dualedge(B, C)
dualedge(C, A)
}
dualtri(foo, bar, baz)
dualtri(foo, bar, baf)
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 10)
def test_comment_in_macro(self):
exp = """\
# Outside comment
edge(A, B) {
# Inside comment
A -> B
}
dualedge(A, B) {
# Nested-inside comment
edge(A, B)
edge(B, A)
}
dualedge(foo, bar)
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 2)
def test_combo_macro(self):
exp = """\
edge(A, B) {
A -> B
}
dualedge(A, B) {
# Nested-inside comment!
edge(A, B)
B -> A
}
dualedge(foo, bar)
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 2)
def test_comment_macro_inline(self):
exp = """\
edge(A, B) {
A -> B
}
dualedge(A, B) {
# Nested-inside comment!
edge(A, B) # inline comment
B -> A
}
dualedge(foo, bar) # inline comment
# standalone comment
foo -> bar # inline comment
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 2)
def test_alphanumeric_variables(self):
exp = """\
edge(A, B) {
A -> B
}
dualedge(A1, B) {
# Nested-inside comment!
edge(A1, B) # inline comment
B -> A1
}
dualedge(foo_1, bar_2) # inline comment
# standalone comment
foo_1 -> bar_2 # inline comment
"""
dm = dotmotif.Motif(exp)
edges = list(dm._g.edges(data=True))
self.assertEqual(len(edges), 2)
self.assertEqual(list(dm._g.nodes()), ["foo_1", "bar_2"])
self.assertEqual(type(list(dm._g.nodes())[0]), str)
new_exp = """
L1 -> Mi1
L1 -> Tm3
L3 -> Mi9
"""
dm = dotmotif.Motif(new_exp)
self.assertEqual(list(dm._g.nodes()), ["L1", "Mi1", "Tm3", "L3", "Mi9"])
class TestDotmotif_Parserv2_DM_EdgeAttributes(unittest.TestCase):
def test_basic_edge_attr(self):
exp = """\
Aa -> Ba [type == 1]
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm._g.edges()), 1)
u, v, d = list(dm._g.edges(["Aa", "Bb"], data=True))[0]
self.assertEqual(type(list(dm._g.nodes())[0]), str)
self.assertEqual(type(list(dm._g.nodes())[1]), str)
self.assertEqual(d["constraints"]["type"], {"==": [1]})
def test_edge_multi_attr(self):
exp = """\
Aa -> Ba [type != 1, type != 12]
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm._g.edges()), 1)
u, v, d = list(dm._g.edges(data=True))[0]
self.assertEqual(d["constraints"]["type"], {"!=": [1, 12]})
def test_edge_macro_attr(self):
exp = """\
macro(Aa, Ba) {
Aa -> Ba [type != 1, type != 12]
}
macro(X, Y)
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm._g.edges()), 1)
u, v, d = list(dm._g.edges(data=True))[0]
self.assertEqual(d["constraints"]["type"], {"!=": [1, 12]})
class TestDotmotif_Parserv2_DM_NodeAttributes(unittest.TestCase):
def test_basic_node_attr(self):
exp = """\
Aa -> Ba
Aa.type = "excitatory"
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm.list_node_constraints()), 1)
self.assertEqual(list(dm.list_node_constraints().keys()), ["Aa"])
def test_node_multi_attr(self):
exp = """\
Aa -> Ba
Aa.type = "excitatory"
Aa.size = 4.5
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm.list_node_constraints()), 1)
self.assertEqual(len(dm.list_node_constraints()["Aa"]), 2)
self.assertEqual(dm.list_node_constraints()["Aa"]["type"]["="], ["excitatory"])
self.assertEqual(dm.list_node_constraints()["Aa"]["size"]["="], [4.5])
self.assertEqual(list(dm.list_node_constraints().keys()), ["Aa"])
def test_multi_node_attr(self):
exp = """\
Aa -> Ba
Aa.type = "excitatory"
Ba.size=4.0
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm.list_node_constraints()), 2)
self.assertEqual(list(dm.list_node_constraints().keys()), ["Aa", "Ba"])
def test_node_macro_attr(self):
exp = """\
macro(A) {
A.type = "excitatory"
A.size >= 4.0
}
Aaa -> Ba
macro(Aaa)
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm.list_node_constraints()), 1)
self.assertEqual(list(dm.list_node_constraints().keys()), ["Aaa"])
exp = """\
macro(A) {
A.type = "excitatory"
A.size >= 4.0
}
Aaa -> Ba
macro(Aaa)
macro(Ba)
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm.list_node_constraints()), 2)
self.assertEqual(list(dm.list_node_constraints().keys()), ["Aaa", "Ba"])
class TestDynamicNodeConstraints(unittest.TestCase):
def test_dynamic_constraints(self):
"""
Test that comparisons may be made between variables, e.g.:
A.type != B.type
"""
exp = """\
A -> B
A.radius < B.radius
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm.list_dynamic_node_constraints()), 1)
def test_dynamic_constraints_in_macro(self):
"""
Test that comparisons may be made between variables in a macro, e.g.:
A.type != B.type
"""
exp = """\
macro(A, B) {
A.radius > B.radius
}
macro(A, B)
A -> B
"""
dm = dotmotif.Motif(exp)
self.assertEqual(len(dm.list_dynamic_node_constraints()), 1)
| 26.819861
| 87
| 0.490313
| 11,205
| 0.964867
| 0
| 0
| 0
| 0
| 0
| 0
| 4,870
| 0.419358
|
222b699bb098dde76e50ecba30e5ab86e3537dcc
| 1,382
|
py
|
Python
|
examples/classification_mnist/main.py
|
yassersouri/fandak
|
2bbadb6d78fcf73dc39f5342aa34c53fa3341c5a
|
[
"MIT"
] | 15
|
2019-07-12T14:04:46.000Z
|
2020-08-04T12:30:30.000Z
|
examples/classification_mnist/main.py
|
yassersouri/fandak
|
2bbadb6d78fcf73dc39f5342aa34c53fa3341c5a
|
[
"MIT"
] | 2
|
2019-07-12T17:06:56.000Z
|
2019-07-17T12:05:32.000Z
|
examples/classification_mnist/main.py
|
yassersouri/fandak
|
2bbadb6d78fcf73dc39f5342aa34c53fa3341c5a
|
[
"MIT"
] | null | null | null |
from typing import List
import click
import torch
from fandak.utils import common_config
from fandak.utils import set_seed
from fandak.utils.config import update_config
from proj.config import get_config_defaults
from proj.datasets import MNISTClassification
from proj.evaluators import ValidationEvaluator
from proj.models import MLPModel
from proj.trainers import SimpleTrainer
@click.command()
@common_config
@click.option("--exp-name", default="")
def main(file_configs: List[str], set_configs: List[str], exp_name: str):
cfg = update_config(
default_config=get_config_defaults(),
file_configs=file_configs,
set_configs=set_configs,
)
if exp_name != "":
cfg.defrost()
cfg.experiment_name = exp_name
cfg.freeze()
print(cfg)
# set_seed(cfg.system.seed)
device = torch.device(cfg.system.device)
train_db = MNISTClassification(cfg, train=True)
test_db = MNISTClassification(cfg, train=False)
if cfg.model.name == "MLP":
model = MLPModel(cfg)
else:
raise Exception("Invalid model name (%s)" % cfg.model.name)
evaluators = [ValidationEvaluator(cfg, test_db, model, device)]
trainer = SimpleTrainer(
cfg, cfg.experiment_name, train_db, model, device, evaluators
)
trainer.train()
trainer.save_training()
if __name__ == "__main__":
main()
| 26.075472
| 73
| 0.708394
| 0
| 0
| 0
| 0
| 957
| 0.692475
| 0
| 0
| 83
| 0.060058
|
222b80300db760788fdc862f944935f9de93f40f
| 864
|
py
|
Python
|
tests/test_problem_solving_algorithms_sorting.py
|
mxdzi/hackerrank
|
4455f73e4479a4204b2e1167253f6a02351aa5b7
|
[
"MIT"
] | null | null | null |
tests/test_problem_solving_algorithms_sorting.py
|
mxdzi/hackerrank
|
4455f73e4479a4204b2e1167253f6a02351aa5b7
|
[
"MIT"
] | null | null | null |
tests/test_problem_solving_algorithms_sorting.py
|
mxdzi/hackerrank
|
4455f73e4479a4204b2e1167253f6a02351aa5b7
|
[
"MIT"
] | null | null | null |
from problem_solving.algorithms.sorting import *
def test_q1_big_sorting(capsys, monkeypatch):
inputs = ["6",
"31415926535897932384626433832795",
"1",
"3",
"10",
"3",
"5"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q1_big_sorting.main()
captured = capsys.readouterr()
output = ("1\n"
"3\n"
"3\n"
"5\n"
"10\n"
"31415926535897932384626433832795\n")
assert captured.out == output
def test_q12_find_the_median(capsys, monkeypatch):
inputs = ["7",
"0 1 2 4 6 5 3"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q12_find_the_median.main()
captured = capsys.readouterr()
output = "3\n"
assert captured.out == output
| 25.411765
| 64
| 0.545139
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 170
| 0.196759
|
222dd53901bfb2ab9baf636ea45e6459defef6a1
| 9,975
|
py
|
Python
|
runOtakuBot.py
|
Eagleheardt/otakuBot
|
6f8576423bb1b0701d5a60095bed7552b2711bab
|
[
"Unlicense"
] | null | null | null |
runOtakuBot.py
|
Eagleheardt/otakuBot
|
6f8576423bb1b0701d5a60095bed7552b2711bab
|
[
"Unlicense"
] | null | null | null |
runOtakuBot.py
|
Eagleheardt/otakuBot
|
6f8576423bb1b0701d5a60095bed7552b2711bab
|
[
"Unlicense"
] | null | null | null |
import sqlite3
from sqlite3 import Error
import os
import time
import datetime
import re
import random
import schedule
import cryptography
from apscheduler.schedulers.background import BackgroundScheduler
from slackclient import SlackClient
from cryptography.fernet import Fernet
conn = sqlite3.connect('/home/ubuntu/otakuBot/data/anime.db')
serverCursor = conn.cursor()
keyFile = open('/home/ubuntu/otakuBot/data/otakubot_token.key', 'rb')
key = keyFile.read()
keyFile.close()
f = Fernet(key)
encryptedTokenFile = open('/home/ubuntu/otakuBot/data/otakubot_token.encrypted', 'rb')
encryptedToken = encryptedTokenFile.read()
decryptedToken = f.decrypt(encryptedToken)
SLACK_BOT_TOKEN = decryptedToken.decode()
# instantiate Slack client
slack_client = SlackClient(SLACK_BOT_TOKEN)
# starterbot's user ID in Slack: value is assigned after the bot starts up
otakuBotID = None
# constants
RTM_READ_DELAY = 0.5 # 0.5 second delay in reading events
def stdOut(s):
curDate = datetime.datetime.today().strftime('%Y-%m-%d')
curTime = datetime.datetime.now().strftime('%H:%M:%S')
logFile = open((("/home/ubuntu/logs/{0}.log").format(curDate)),"a")
logFile.write(("{0}: {1}\n").format(curTime,s))
logFile.close()
return
def logIt():
curDate = datetime.datetime.today().strftime('%Y-%m-%d')
curTime = datetime.datetime.now().strftime('%H:%M:%S')
logFile = open((("/home/ubuntu/logs/{0}.log").format(curDate)),"a")
logFile.write(("{0}: Otaku 15 minute check in!\n").format(curTime))
logFile.close()
return
schedule.every(15).minutes.do(logIt)
def SQLReturn(aConn,sqlCmd):
reportCur = aConn.cursor()
reportCur.execute(sqlCmd)
SQLResults = reportCur.fetchall()
reportCur.close()
return SQLResults
def insertQuote (aUser,theQuote):
newCur = conn.cursor()
newCur.execute(("""
INSERT INTO
Quotes (User, Words)
VALUES
('{0}','{1}');
""").format(aUser,theQuote))
newCur.close()
conn.commit()
return
def insertAniMusic (aUser,theLink):
newCur = conn.cursor()
newCur.execute(("""
INSERT INTO
Music (Category, User, Link)
VALUES
('Anime','{0}','{1}');
""").format(aUser,theLink))
newCur.close()
conn.commit()
return
def insertEngMusic (aUser,theLink):
newCur = conn.cursor()
newCur.execute(("""
INSERT INTO
Music (Category, User, Link)
VALUES
('English','{0}','{1}');
""").format(aUser,theLink))
newCur.close()
conn.commit()
return
def insertIcon (aUser,theLink):
newCur = conn.cursor()
newCur.execute(("""
INSERT INTO
Music (Category, User, Link)
VALUES
('Iconic','{0}','{1}');
""").format(aUser,theLink))
newCur.close()
conn.commit()
return
def deleteQuote (quoteID):
newCur = conn.cursor()
newCur.execute(("""
DELETE
FROM
Quotes
WHERE
ID == {0};
""").format(quoteID))
newCur.close()
conn.commit()
return
def getQuote(aConn):
sqlCmd = "SELECT Words FROM Quotes;"
results = SQLReturn(aConn,sqlCmd)
allQuotes = []
for quote in results:
allQuotes.append(quote)
return (random.choice(allQuotes))
def getAniMusic(aConn):
sqlCmd = "SELECT Link FROM Music WHERE Category = 'Anime';"
results = SQLReturn(aConn,sqlCmd)
allQuotes = []
for quote in results:
allQuotes.append(quote)
return (random.choice(allQuotes))
def getEngMusic(aConn):
sqlCmd = "SELECT Link FROM Music WHERE Category = 'English';"
results = SQLReturn(aConn,sqlCmd)
allQuotes = []
for quote in results:
allQuotes.append(quote)
return (random.choice(allQuotes))
def getIconic(aConn):
sqlCmd = "SELECT Link FROM Music WHERE Category = 'Iconic';"
results = SQLReturn(aConn,sqlCmd)
allQuotes = []
for quote in results:
allQuotes.append(quote)
return (random.choice(allQuotes))
def getAllQuotes(aConn):
sqlCmd = "SELECT ID, Words FROM Quotes;"
results = SQLReturn(aConn,sqlCmd)
allQuotes = []
for quote in results:
allQuotes.append(quote)
newStr = "All the Quotes\n"
for item in allQuotes:
i = 1
for place in item:
if i == 1:
newStr += "ID: " + str(place) + "\n"
if i == 2:
newStr += "Words: " + str(place) + "\n\n"
i += 1
return newStr
def EODReportRange (date1, date2): # Gets a range summary of the VM number and status reported
cmd = (("""
SELECT
ServerNumber as [Server]
, ServerStatus as [Status]
, count(ServerStatus) as [Amount]
FROM
Status
WHERE
date(TimeStamp) BETWEEN '{0}' AND '{1}'
AND ServerNumber IN('1','2','3','4','17')
GROUP BY
ServerNumber
,ServerStatus
""").format(date1, date2))
results = SQLReturn(conn,cmd)
newStr = "Report for: " + date1 + " to " + date2 + "\n"
for row in results:
i = 1
for item in row:
if i == 1:
newStr += "VM" + str(item) + " - "
if i == 2:
newStr += "Status: " + str(item) + " - "
if i == 3:
if item != 1:
newStr += "Reported: " + str(item) + " times"
else:
newStr += "Reported: " + str(item) + " time"
i += 1
newStr += "\n"
return newStr
def parseSlackInput(aText):
if aText and len(aText) > 0:
item = aText[0]
if 'text' in item:
msg = item['text'].strip(' ')
chn = item['channel']
usr = item['user']
stp = item['ts']
return [str(msg),str(chn),str(usr),str(stp)]
else:
return [None,None,None,None]
def inChannelResponse(channel,response):
slack_client.api_call(
"chat.postMessage",
channel=channel,
text=response,
as_user=True
)
return
def threadedResponse(channel,response,stamp):
slack_client.api_call(
"chat.postMessage",
channel=channel,
text=response,
thread_ts=stamp,
as_user=True
)
return
def directResponse(someUser,text):
slack_client.api_call(
"chat.postMessage",
channel=someUser,
text=text,
as_user=True
)
return
def parseQuote(someMsg):
starter,theQuote = someMsg.split(' ', 1)
return theQuote
def handle_command(command, channel, aUser, tStamp):
"""
Executes bot command if the command is known
"""
#command = command.lower()
response = None
# This is where you start to implement more commands!
if command.lower().startswith("!help"):
response = """I'm Otaku Bot!
I don't do a lot yet. But watch out! I'm just getting started!
!addquote[SPACE][A quote of your choice!] - I will remember your quote!
!quote - I will reply with a random quote!
!addAniMusic[SPACE][Link to a Japanese anime song] - I will remember your music!
!addEngMusic[SPACE][Link to an English anime song] - I will remember your music!
!addIconic[SPACE][Link to an iconic anime moment] - I will remember your moment!
!animusic - I will reply with a Japanese anime song from memory!
!engmusic - I will reply with an English anime song from memory!
!iconic - I will show you an iconic anime moment!
"""
inChannelResponse(channel,response)
return
if command.lower().startswith("!addquote"):
newQuote = str(command[10:])
insertQuote(aUser,newQuote)
threadedResponse(channel,"I'll try to remember: " + newQuote ,tStamp)
stdOut("Quote Added: " + newQuote)
return
if command.lower().startswith("!quote"):
aQuote = getQuote(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!animusic"):
aQuote = getAniMusic(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!engmusic"):
aQuote = getEngMusic(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!iconic"):
aQuote = getIconic(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!onepunch"):
inChannelResponse(channel,"https://www.youtube.com/watch?v=_TUTJ0klnKk")
return
if command.lower().startswith("!addanimusic"):
newQuote = str(command[13:])
insertAniMusic(aUser,newQuote)
threadedResponse(channel,"I'll add this to the Anime music section: " + newQuote ,tStamp)
stdOut("Anime Music Added: " + newQuote)
return
if command.lower().startswith("!addengmusic"):
newQuote = str(command[13:])
insertEngMusic(aUser,newQuote)
threadedResponse(channel,"I'll add this to the English music section: " + newQuote ,tStamp)
stdOut("English Music Added: " + newQuote)
return
if command.lower().startswith("!addiconic"):
newQuote = str(command[11:])
insertIcon(aUser,newQuote)
threadedResponse(channel,"I'll add this to the Iconic moments section: " + newQuote ,tStamp)
stdOut("Iconic Moment Added: " + newQuote)
return
if command.lower().startswith("!delquote"):
if aUser == "UC176R92M":
num = command[10:]
deleteQuote(num)
inChannelResponse(channel,"You have removed a quote.")
else:
inChannelResponse(channel,"You don't have permission to do that!")
return
if command.lower().startswith("!getquotes"):
if aUser == "UC176R92M":
inChannelResponse(channel,getAllQuotes(conn))
else:
inChannelResponse(channel,"You don't have permission to do that!")
return
if command.startswith("!test"):
return
response = (("""Text:{0}
Channel:{1}
TS:{2}
User:{3}
""").format(command,channel,tStamp,aUser))
inChannelResponse(channel,response)
return
return
# Sends the response back to the channel
if __name__ == "__main__":
if slack_client.rtm_connect(with_team_state=False):
stdOut("Otaku Bot connected and running!")
# Read bot's user ID by calling Web API method `auth.test`
otakuBotID = slack_client.api_call("auth.test")["user_id"]
while True:
try:
command, channel,usr,stp = parseSlackInput(slack_client.rtm_read())
if command:
handle_command(command, channel,usr,stp)
except:
pass
schedule.run_pending()
time.sleep(RTM_READ_DELAY)
else:
stdOut("Connection failed. Exception traceback printed above.")
| 27.631579
| 95
| 0.665063
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,305
| 0.331328
|
222fcf6d9b65f24912507aa874036c0ee4a1261b
| 1,555
|
py
|
Python
|
ocr.py
|
tunc2112/uet-img-processing
|
6a191fe6927d7d0844742240cf4a39587c965d16
|
[
"MIT"
] | null | null | null |
ocr.py
|
tunc2112/uet-img-processing
|
6a191fe6927d7d0844742240cf4a39587c965d16
|
[
"MIT"
] | null | null | null |
ocr.py
|
tunc2112/uet-img-processing
|
6a191fe6927d7d0844742240cf4a39587c965d16
|
[
"MIT"
] | null | null | null |
from PIL import Image
import cv2
import pytesseract
import tesserocr
from pyocr import pyocr
from pyocr import builders
import sys
import os
def get_image_filename(img_id):
filename = "img_src/src{0:0>3}".format(img_id)
for ext in [".png", ".jpg", ".jpeg"]:
if os.path.exists(os.path.join(os.getcwd(), filename + ext)):
return filename + ext
def write_output(libname, img_id, text):
filename = "./output/output_{1}_{0:0>3}.txt".format(img_id, libname)
with open(filename, "w") as f:
f.write(text)
def ocr_pytesseract(img_id):
img_filename = './' + get_image_filename(img_id)
img = cv2.imread(img_filename, cv2.IMREAD_COLOR)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bilateralFilter(gray, 11, 17, 17)
original = pytesseract.image_to_string(gray, config='')
write_output(img, original)
def ocr_pyocr(img_id):
img_filename = './' + get_image_filename(img_id)
img = cv2.imread(img_filename, cv2.IMREAD_COLOR)
tools = pyocr.get_available_tools()
if len(tools) == 0:
print("No OCR tool found")
sys.exit(1)
tool = tools[0]
langs = tool.get_available_languages()
lang = langs[0]
txt = tool.image_to_string(
Image.open(img_filename),
lang=lang,
builder=pyocr.tesseract.builders.TextBuilder()
)
write_output(img_id, txt)
def ocr_tesseract(img_id):
img_filename = './' + get_image_filename(img_id)
txt = tesserocr.image_to_text(Image.open(img_filename))
write_output(img_id, txt)
| 27.280702
| 72
| 0.674598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 108
| 0.069453
|
2230323f70e41224c93df8ff861946c38acbb05d
| 6,718
|
py
|
Python
|
object_detection/det_heads/retinaNet_head/retinanet_head.py
|
no-name-xiaosheng/PaddleViT
|
50226a3be5095b3727d3c62d2eab23ef1e9612ec
|
[
"Apache-2.0"
] | 993
|
2021-08-30T01:58:57.000Z
|
2022-03-31T14:03:07.000Z
|
object_detection/det_heads/retinaNet_head/retinanet_head.py
|
Dongsheng-Bi/PaddleViT
|
c90a6c8dc3787e69cef3a37b9a260bd59eeff1f7
|
[
"Apache-2.0"
] | 120
|
2021-09-03T13:05:32.000Z
|
2022-03-29T02:08:22.000Z
|
object_detection/det_heads/retinaNet_head/retinanet_head.py
|
Dongsheng-Bi/PaddleViT
|
c90a6c8dc3787e69cef3a37b9a260bd59eeff1f7
|
[
"Apache-2.0"
] | 253
|
2021-08-30T08:50:27.000Z
|
2022-03-26T09:21:08.000Z
|
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
import paddle.nn as nn
from paddle.nn.initializer import Normal, Constant
from retinanet_loss import RetinaNetLoss
from post_process import RetinaNetPostProcess
from det_utils.generator_utils import AnchorGenerator
class RetinaNetHead(nn.Layer):
'''
The head used in RetinaNet for object classification and box regression.
It has two subnets for the two tasks, with a common structure but separate parameters.
'''
def __init__(self, config):
'''
Args:
input_shape (List[ShapeSpec]): input shape.
num_classes (int): number of classes. Used to label background proposals.
num_anchors (int): number of generated anchors.
conv_dims (List[int]): dimensions for each convolution layer.
norm (str or callable):
Normalization for conv layers except for the two output layers.
See :func:`detectron2.layers.get_norm` for supported types.
loss_func (class): the class is used to compute loss.
prior_prob (float): Prior weight for computing bias.
'''
super(RetinaNetHead, self).__init__()
num_convs = config.RETINANET.NUM_CONVS
input_channels = config.RETINANET.INPUT_CHANNELS
norm = config.RETINANET.NORM
prior_prob = config.RETINANET.PRIOR_PROB
self.num_classes = config.RETINANET.NUM_CLASSES
self.get_loss = RetinaNetLoss(
focal_loss_alpha=config.RETINANET.FOCAL_LOSS_ALPHA,
focal_loss_gamma=config.RETINANET.FOCAL_LOSS_GAMMA,
smoothl1_loss_delta=config.RETINANET.SMOOTHL1_LOSS_DELTA,
positive_thresh=config.RETINANET.POSITIVE_THRESH,
negative_thresh=config.RETINANET.NEGATIVE_THRESH,
allow_low_quality=config.RETINANET.ALLOW_LOW_QUALITY,
num_classes=config.RETINANET.NUM_CLASSES,
weights=config.RETINANET.WEIGHTS
)
self.postprocess = RetinaNetPostProcess(
score_threshold=config.RETINANET.SCORE_THRESH,
keep_top_k=config.RETINANET.KEEP_TOPK,
nms_top_k=config.RETINANET.NMS_TOPK,
nms_threshold=config.RETINANET.NMS_THRESH,
bbox_reg_weights=config.RETINANET.WEIGHTS
)
self.anchor_generator = AnchorGenerator(anchor_sizes=config.RETINANET.ANCHOR_SIZE,
aspect_ratios=config.RETINANET.ASPECT_RATIOS,
strides=config.RETINANET.STRIDES,
offset=config.RETINANET.OFFSET)
num_anchors = self.anchor_generator.num_anchors
conv_dims = [input_channels] * num_convs
cls_net = []
reg_net = []
for in_channels, out_channels in zip(
[input_channels] + list(conv_dims), conv_dims
):
cls_net.append(
nn.Conv2D(in_channels, out_channels, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01)))
)
if norm == "bn":
cls_net.append(nn.BatchNorm2D(out_channels))
cls_net.append(nn.ReLU())
reg_net.append(
nn.Conv2D(in_channels, out_channels, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01)))
)
if norm == "bn":
reg_net.append(nn.BatchNorm2D(out_channels))
reg_net.append(nn.ReLU())
self.cls_net = nn.Sequential(*cls_net)
self.reg_net = nn.Sequential(*reg_net)
bias_value = -math.log((1 - prior_prob) / prior_prob)
self.cls_score = nn.Conv2D(
conv_dims[-1], num_anchors * self.num_classes, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01)),
bias_attr=paddle.ParamAttr(initializer=Constant(bias_value))
)
self.bbox_pred = nn.Conv2D(
conv_dims[-1], num_anchors * 4, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01))
)
def forward(self, feats, inputs):
'''
Returns:
loss_dict (dict) | pred_result(tensor), bbox_num(tensor):
loss_dict: contains cls_losses and reg_losses.
pred_result: the shape is [M, 6], M is the number of final preds,
Each row has 6 values: [label, score, xmin, ymin, xmax, ymax]
bbox_num: the shape is [N], N is the num of batch_size,
bbox_num[i] means the i'th img have bbox_num[i] boxes.
'''
anchors = self.anchor_generator(feats)
pred_scores = []
pred_boxes = []
for feat in feats:
pred_scores.append(self.cls_score(self.cls_net(feat)))
pred_boxes.append(self.bbox_pred(self.reg_net(feat)))
pred_scores_list = [
transpose_to_bs_hwa_k(s, self.num_classes) for s in pred_scores
]
pred_boxes_list = [
transpose_to_bs_hwa_k(s, 4) for s in pred_boxes
]
if self.training:
anchors = paddle.concat(anchors)
loss_dict = self.get_loss(anchors, [pred_scores_list, pred_boxes_list], inputs)
return loss_dict
else:
img_whwh = paddle.concat([inputs["imgs_shape"][:, 1:2],
inputs["imgs_shape"][:, 0:1]], axis=-1)
pred_result, bbox_num = self.postprocess(
pred_scores_list,
pred_boxes_list,
anchors,
inputs["scale_factor_wh"],
img_whwh
)
return pred_result, bbox_num
def transpose_to_bs_hwa_k(tensor, k):
assert tensor.dim() == 4
bs, _, h, w = tensor.shape
tensor = tensor.reshape([bs, -1, k, h, w])
tensor = tensor.transpose([0, 3, 4, 1, 2])
return tensor.reshape([bs, -1, k])
| 40.227545
| 94
| 0.620125
| 5,631
| 0.838196
| 0
| 0
| 0
| 0
| 0
| 0
| 1,915
| 0.285055
|
223135074d80c82a77c3e9b47c439c7c3abe7792
| 632
|
py
|
Python
|
brp/formutils.py
|
chop-dbhi/biorepo-portal
|
7db13c40b2b9d62af43a28e4af08c2472b98fc96
|
[
"BSD-2-Clause"
] | 6
|
2016-10-26T19:51:11.000Z
|
2021-03-18T16:05:55.000Z
|
brp/formutils.py
|
chop-dbhi/biorepo-portal
|
7db13c40b2b9d62af43a28e4af08c2472b98fc96
|
[
"BSD-2-Clause"
] | 207
|
2015-09-24T17:41:37.000Z
|
2021-05-18T18:14:08.000Z
|
brp/formutils.py
|
chop-dbhi/biorepo-portal
|
7db13c40b2b9d62af43a28e4af08c2472b98fc96
|
[
"BSD-2-Clause"
] | 8
|
2016-04-27T19:04:50.000Z
|
2020-08-24T02:33:05.000Z
|
from django import template
from django.forms import widgets
register = template.Library()
@register.inclusion_tag('formfield.html')
def formfield(field):
widget = field.field.widget
type_ = None
if isinstance(widget, widgets.Input):
type_ = 'input'
elif isinstance(widget, widgets.Textarea):
type_ = 'textarea'
elif isinstance(widget, widgets.Select):
type_ = 'select'
elif isinstance(widget, widgets.CheckboxInput):
type_ = 'checkbox'
elif isinstance(widget, widgets.RadioInput):
type_ = 'radio'
return {'field': field, 'form': field.form, 'type': type_}
| 28.727273
| 62
| 0.675633
| 0
| 0
| 0
| 0
| 538
| 0.851266
| 0
| 0
| 77
| 0.121835
|
2231aae6662593f94c1874f0078bab296c0ac96f
| 2,104
|
py
|
Python
|
SGE/src/configs/rng_seeds.py
|
dabingrosewood/MasterThesisProj
|
7e40fa2395468a1bccef429362a61ed8515ecc11
|
[
"MIT"
] | null | null | null |
SGE/src/configs/rng_seeds.py
|
dabingrosewood/MasterThesisProj
|
7e40fa2395468a1bccef429362a61ed8515ecc11
|
[
"MIT"
] | null | null | null |
SGE/src/configs/rng_seeds.py
|
dabingrosewood/MasterThesisProj
|
7e40fa2395468a1bccef429362a61ed8515ecc11
|
[
"MIT"
] | null | null | null |
# CONFIG
seeds = [6598903756360202179, 2908409715321502665, 6126375328734039552, 1447957147463681860, 8611858271322161001, 1129180857020570158, 6362222119948958210, 7116573423379052515, 6183551438103583226, 4025455056998962241, 3253052445978017587, 8447055112402476503, 5958072666039141800, 704315598608973559, 1273141716491599966, 8030825590436937002, 6692768176035969914, 8405559442957414941, 5375803109627817298, 1491660193757141856, 3436611086188602011, 3271002097187013328, 4006294871837743001, 7473817498436254932, 7891796310200224764, 3130952787727334893, 697469171142516880, 133987617360269051, 1978176412643604703, 3541943493395593807, 5679145832406031548, 5942005640162452699, 5170695982942106620, 3168218038949114546, 9211443340810713278, 675545486074597116, 3672488441186673791, 6678020899892900267, 2416379871103035344, 8662874560817543122, 2122645477319220395, 2405200782555244715, 6145921643610737337, 5436563232962849112, 8616414727199277108, 3514934091557929937, 6828532625327352397, 4198622582999611227, 1404664771100695607, 2109913995355226572, 7499239331133290294, 1663854912663070382, 8773050872378084951, 847059168652279875, 2080440852605950627, 842456810578794799, 2969610112218411619, 8028963261673713765, 8849431138779094918, 6906452636298562639, 8279891918456160432, 3007521703390185509, 7384090506069372457, 2587992914778556505, 7951640286729988102, 812903075765965116, 4795333953396378316, 1140497104356211676, 8624839892588303806, 5867085452069993348, 8978621560802611959, 8687506047153117100, 1433098622112610322, 2329673189788559167, 1697681906179453583, 1151871187140419944, 7331838985682630168, 2010690807327394179, 8961362099735442061, 3782928183186245068, 8730275423842935904, 2250089307129376711, 6729072114456627667, 6426359511845339057, 1543504526754215874, 6764758859303816569, 438430728757175362, 850249168946095159, 7241624624529922339, 633139235530929889, 8443344843613690342, 5097223086273121, 3838826661110586915, 7425568686759148634, 5814866864074983273, 5375799982976616117, 6540402714944055605, 448708351215739494, 5101380446889426970, 8035666378249198606]
| 701.333333
| 2,094
| 0.901616
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.003802
|
223331808a66e2aa15f291c872b40388de56a8a3
| 2,793
|
py
|
Python
|
learning/modules/visitation_softmax.py
|
esteng/guiding-multi-step
|
3f0db0ba70b5851cc83878f4ed48cf82342a2ddf
|
[
"BSD-2-Clause"
] | null | null | null |
learning/modules/visitation_softmax.py
|
esteng/guiding-multi-step
|
3f0db0ba70b5851cc83878f4ed48cf82342a2ddf
|
[
"BSD-2-Clause"
] | null | null | null |
learning/modules/visitation_softmax.py
|
esteng/guiding-multi-step
|
3f0db0ba70b5851cc83878f4ed48cf82342a2ddf
|
[
"BSD-2-Clause"
] | null | null | null |
import torch
import torch.nn as nn
import numpy as np
class VisitationSoftmax(nn.Module):
def __init__(self, log=False):
super(VisitationSoftmax, self).__init__()
self.log = log
self.logsoftmax = nn.LogSoftmax()
self.softmax = nn.Softmax(dim=1)
def forward(self, visitation_distributions, goal_outside_score=None):
"""
Applies softmax on visitation distributions, while handling the case where we assign additional
probability of the goal being outside of the observed map region.
:param visitation_distributions:
:return: Nx3xHxW tensor where first channel is probability over visited locations, second channel is probability of stop locations,
third channel is a copy of the same value indicating the probability that goal location is not visible
"""
batch_size = visitation_distributions.size(0)
num_channels = visitation_distributions.size(1)
assert num_channels == 2, "Must have 2 channels: visitation distribution scores and goal distribution scores"
height = visitation_distributions.size(2)
width = visitation_distributions.size(3)
visitation_dist_scores = visitation_distributions[:, 0, :, :]
goal_inside_dist_scores = visitation_distributions[:, 1, :, :]
softmax_func = self.log_softmax if self.log else self.softmax
# Visitation distribution: Flatten, softmax, reshape back
visitation_dist = softmax_func(visitation_dist_scores.view(batch_size, width*height)).view(visitation_dist_scores.size())
# We are modelling OOB probability
if goal_outside_score is not None:
# Goal distribution: Flatten, append outside score, softmax, split off outside score, reshape back
goal_scores_full = torch.cat([goal_inside_dist_scores.view(batch_size, width*height),goal_outside_score[:, np.newaxis]], dim=1)
goal_dist_full = softmax_func(goal_scores_full)
goal_inside_partial_dist = goal_dist_full[:, :-1].view(goal_inside_dist_scores.size())
goal_outside_prob_or_logprob = goal_dist_full[:, -1]
# Re-assemble back into the Bx2xHxW tensor representation
visitation_prob_or_log_prob_out = torch.stack([visitation_dist, goal_inside_partial_dist], dim=1)
return visitation_prob_or_log_prob_out, goal_outside_prob_or_logprob
else:
goal_dist = softmax_func(goal_inside_dist_scores.view(batch_size, width * height)).view(
goal_inside_dist_scores.size())
# Re-assemble back into the Bx2xHxW tensor representation
visitation_prob_or_log_prob_out = torch.stack([visitation_dist, goal_dist], dim=1)
return visitation_prob_or_log_prob_out
| 52.698113
| 139
| 0.713928
| 2,737
| 0.97995
| 0
| 0
| 0
| 0
| 0
| 0
| 871
| 0.311851
|
223355bd5379be3ac4c24bf1261412562ebdf029
| 96
|
py
|
Python
|
baekjoon/easy-math/17362-finger.py
|
honux77/algorithm
|
2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee
|
[
"MIT"
] | 2
|
2019-02-08T01:23:07.000Z
|
2020-11-19T12:23:52.000Z
|
baekjoon/easy-math/17362-finger.py
|
honux77/algorithm
|
2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee
|
[
"MIT"
] | null | null | null |
baekjoon/easy-math/17362-finger.py
|
honux77/algorithm
|
2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee
|
[
"MIT"
] | null | null | null |
n = int(input()) % 8
if n == 0:
print(2)
elif n <= 5:
print(n)
else:
print(10 - n)
| 12
| 20
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
22350d67f0af834116a7d33c446043e6c69e8a30
| 908
|
py
|
Python
|
Module/nikodou_information.py
|
RyoTozawa/LineBot
|
14b34094f9a27650d412128334c3f09c7444ffc9
|
[
"MIT"
] | 1
|
2018-10-16T07:50:59.000Z
|
2018-10-16T07:50:59.000Z
|
Module/nikodou_information.py
|
RyoTozawa/LineBot
|
14b34094f9a27650d412128334c3f09c7444ffc9
|
[
"MIT"
] | 1
|
2018-04-09T11:03:25.000Z
|
2018-04-09T11:03:25.000Z
|
Module/nikodou_information.py
|
tozastation/Line-Bot
|
14b34094f9a27650d412128334c3f09c7444ffc9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# coding:utf-8
import urllib
from xml.etree import ElementTree
import xml.dom.minidom as md
class Niko(object):
def __init__(self):
self.url_ranking = 'http://www.nicovideo.jp/ranking/fav/hourly/sing?rss=2.0&lang=ja-jp'
self.url_news = 'http://news.nicovideo.jp/categories/10?rss=2.0'
def send_niko_list(self, tag, category):
if category in 'ranking':
response = urllib.request.urlopen(self.url_ranking)
elif category in 'news':
response = urllib.request.urlopen(self.url_news)
root = ElementTree.fromstring(response.read())
document = md.parseString(ElementTree.tostring(root, 'utf-8'))
send_list = []
for a in document.getElementsByTagName(tag):
send_list.append(a.toxml().rstrip('</'+tag+'>').lstrip('</'+tag+'>'))
del send_list[0]
return send_list
| 29.290323
| 95
| 0.643172
| 787
| 0.86674
| 0
| 0
| 0
| 0
| 0
| 0
| 188
| 0.207048
|
2235a02d239f6f03f7a8b8fc3826bf3189e27e0c
| 927
|
py
|
Python
|
climbing/add_new_climbs.py
|
JiriKalvoda/slama.dev
|
4856d246858dd98a1852365b028873b61f5a6775
|
[
"MIT"
] | 7
|
2019-09-15T19:55:11.000Z
|
2021-12-27T11:40:20.000Z
|
climbing/add_new_climbs.py
|
JiriKalvoda/slama.dev
|
4856d246858dd98a1852365b028873b61f5a6775
|
[
"MIT"
] | 38
|
2020-05-18T12:49:13.000Z
|
2022-03-23T12:51:28.000Z
|
climbing/add_new_climbs.py
|
JiriKalvoda/slama.dev
|
4856d246858dd98a1852365b028873b61f5a6775
|
[
"MIT"
] | 17
|
2020-02-16T19:49:25.000Z
|
2022-02-06T14:28:35.000Z
|
#!/usr/bin/env python3
import os
import shutil
from subprocess import Popen, PIPE
from datetime import date
import yaml
os.chdir(os.path.dirname(os.path.realpath(__file__)))
CLIMBING_FOLDER = "."
CLIMBING_VIDEOS_FOLDER = os.path.join(CLIMBING_FOLDER, "videos")
CLIMBING_INFO = os.path.join(CLIMBING_FOLDER, "videos.yaml")
config = {}
if os.path.exists(CLIMBING_INFO):
with open(CLIMBING_INFO, "r") as f:
config = yaml.safe_load(f.read())
files = os.listdir(CLIMBING_VIDEOS_FOLDER)
for file in files:
if file.lower().endswith(".mp4") and file not in config:
print(f"adding new file {file}.")
config[file] = {
"color": "TODO",
"date": date.today(),
"zone": "TODO",
"new": None,
"rotate": "left",
"encode": None,
"trim": "TODO",
}
with open(CLIMBING_INFO, "w") as f:
f.write(yaml.dump(config))
| 24.394737
| 64
| 0.614887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 154
| 0.166127
|
22369660633e2973cf659ea963259b0f27b54f98
| 952
|
py
|
Python
|
posts/models.py
|
dnetochaves/blog
|
e04fda385490b671540b671631726584a533369c
|
[
"MIT"
] | null | null | null |
posts/models.py
|
dnetochaves/blog
|
e04fda385490b671540b671631726584a533369c
|
[
"MIT"
] | null | null | null |
posts/models.py
|
dnetochaves/blog
|
e04fda385490b671540b671631726584a533369c
|
[
"MIT"
] | null | null | null |
from django.db import models
from categorias.models import Categoria
from django.contrib.auth.models import User
from django.utils import timezone
# Create your models here.
class Post(models.Model):
titulo_post = models.CharField(max_length=50, verbose_name='Titulo')
autor_post = models.ForeignKey(User, on_delete=models.DO_NOTHING, verbose_name='Autor')
data_post = models.DateTimeField(default=timezone.now, verbose_name='Data')
conteudo_post = models.TextField(verbose_name='Conteudo')
exerto_post = models.TextField(verbose_name='Exerto')
categoria_post = models.ForeignKey(
Categoria, on_delete=models.DO_NOTHING, blank=True, null=True, verbose_name='Categoria')
imagem_post = models.ImageField(upload_to='post_img', blank=True, null=True, verbose_name='Imagem')
publicacao_post = models.BooleanField(default=False, verbose_name='Publicado')
def __str__(self):
return self.titulo_post
| 43.272727
| 103
| 0.765756
| 774
| 0.813025
| 0
| 0
| 0
| 0
| 0
| 0
| 105
| 0.110294
|
2237660edc2b315c6d1a8e947bbdd55091f794e0
| 2,765
|
py
|
Python
|
src/ros_vision_interaction/examples/example_interaction.py
|
HaaaO/vision-project
|
72256af07834195cfe52ac344aee5effcd0da978
|
[
"MIT"
] | null | null | null |
src/ros_vision_interaction/examples/example_interaction.py
|
HaaaO/vision-project
|
72256af07834195cfe52ac344aee5effcd0da978
|
[
"MIT"
] | 21
|
2020-09-09T18:55:58.000Z
|
2021-07-26T19:42:46.000Z
|
src/ros_vision_interaction/examples/example_interaction.py
|
HaaaO/vision-project
|
72256af07834195cfe52ac344aee5effcd0da978
|
[
"MIT"
] | 6
|
2020-12-20T17:19:29.000Z
|
2021-08-09T22:33:04.000Z
|
#!/usr/bin/env python
import datetime
import logging
import os
import random
import rospy
import schedule
from interaction_engine.cordial_interface import CordialInterface
from interaction_engine.database import Database
from interaction_engine.int_engine import InteractionEngine
from interaction_engine.message import Message
from interaction_engine.state import State
from interaction_engine.state_collection import StateCollection
from cordial_msgs.msg import AskOnGuiAction, AskOnGuiGoal, MouseEvent
from std_msgs.msg import Bool
logging.basicConfig(level=logging.INFO)
class Keys:
GREETING = "greeting"
HOW_ARE_YOU = "how are you"
TAKE_CARE = "take care"
WHEN_TO_TALK = "when to talk"
greeting = State(
name=Keys.GREETING,
message_type=Message.Type.MULTIPLE_CHOICE_ONE_COLUMN,
content="Hello!",
next_states=[Keys.HOW_ARE_YOU],
transitions={"Hello!": Keys.HOW_ARE_YOU, "Hi!": Keys.HOW_ARE_YOU}
)
how_are_you = State(
name=Keys.HOW_ARE_YOU,
message_type=Message.Type.MULTIPLE_CHOICE_ONE_COLUMN,
content="How are you doing today?",
next_states=[Keys.TAKE_CARE],
transitions={
"Pretty good.": Keys.TAKE_CARE,
"Great!": Keys.TAKE_CARE,
"Not too good.": Keys.TAKE_CARE
}
)
take_care = State(
name=Keys.TAKE_CARE,
message_type=Message.Type.MULTIPLE_CHOICE_ONE_COLUMN,
content="Don't forget to drink enough water and get enough sleep!",
next_states=[Keys.WHEN_TO_TALK],
transitions={"Next": Keys.WHEN_TO_TALK}
)
when_to_talk = State(
name=Keys.WHEN_TO_TALK,
message_type=Message.Type.TIME_ENTRY,
content="When would you like to talk tomorrow?",
next_states=["exit"],
args=["15", "15:15"]
)
state_collection = StateCollection(
name="example interaction",
init_state_name=Keys.WHEN_TO_TALK,
states=[
greeting,
how_are_you,
take_care,
when_to_talk
]
)
cwd = os.getcwd()
database_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"example_interaction_database.json"
)
default_database_keys = [
Keys.GREETING,
Keys.HOW_ARE_YOU,
Keys.TAKE_CARE,
Keys.WHEN_TO_TALK
]
database_manager = Database(
database_file_name=database_file,
default_database_keys=default_database_keys
)
interface = CordialInterface(
action_name="cordial/say_and_ask_on_gui",
seconds_until_timeout=None
)
interaction_engine = InteractionEngine(
state_collection=state_collection,
database_manager=database_manager,
interface=interface
)
if __name__ == "__main__":
while not rospy.is_shutdown():
rospy.logdebug("Scheduled interaction running")
interaction_engine.run()
rospy.sleep(5)
| 24.469027
| 71
| 0.725859
| 131
| 0.047378
| 0
| 0
| 0
| 0
| 0
| 0
| 398
| 0.143942
|
2237956981da3e82e0d6350f1b78b20897718d48
| 2,441
|
py
|
Python
|
explicalib/distribution/multiclass_distribution.py
|
euranova/estimating_eces
|
9bfa81dd7a39ebe069c5b11b8e7a9bf9017e9350
|
[
"MIT"
] | 2
|
2021-11-30T18:44:11.000Z
|
2021-11-30T18:44:19.000Z
|
explicalib/distribution/multiclass_distribution.py
|
euranova/estimating_eces
|
9bfa81dd7a39ebe069c5b11b8e7a9bf9017e9350
|
[
"MIT"
] | null | null | null |
explicalib/distribution/multiclass_distribution.py
|
euranova/estimating_eces
|
9bfa81dd7a39ebe069c5b11b8e7a9bf9017e9350
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@author: nicolas.posocco
"""
from abc import ABC
import numpy as np
class MulticlassDistribution(ABC):
def __init__(self):
"""
Initializes the distribution, allowing later sampling and posterior probabilities calculations.
"""
pass
def sample(self, n_samples, return_posterior=True, reproducible=None):
"""
Samples n_samples times from the distribution, and their label.
Returns also the array of posterior probabilities if return_posterior=True.
"""
# n_samples
assert type(n_samples) is int, "n_samples should be an integer."
assert n_samples > 0, "n_samples should be positive."
# return_posterior
assert type(return_posterior) is bool, "return_posterior should be a boolean."
# reproducible
assert type(reproducible) is np.random.RandomState, "reproducible should be a np.random.RandomState object."
raise NotImplementedError
def posteriors(self, X):
# X
assert type(X) is np.ndarray, "X should be a numpy array."
assert X.ndim == 2, "X should be of shape (n_samples, n_features), here is of shape {}".format(X.shape)
raise NotImplementedError
def get_bayes_classifier(self):
"""
Instanciates the optimal Bayes classifier for this distribution.
"""
return BayesClassifier(distribution=self)
class BayesClassifier(ABC):
def __init__(self, distribution):
# distribution
assert isinstance(distribution,
MulticlassDistribution), "distribution should inherit from MulticlassDistribution."
self.distribution = distribution
def fit(self, X):
pass
def predict_proba(self, X):
# X
assert type(X) is np.ndarray, "X should be a numpy array, here is a {}.".format(type(X))
assert X.ndim == 2, "X should be of shape (n_samples, n_features), here is of shape {}".format(X.shape)
posteriors = self.distribution.posteriors(X)
return posteriors
def predict(self, X):
# X
assert type(X) is np.ndarray, "X should be a numpy array, here is a {}.".format(type(X))
assert X.ndim == 2, "X should be of shape (n_samples, n_features), here is of shape {}".format(X.shape)
posteriors = self.predict_proba(X)
return np.argmax(posteriors, axis=0)
| 29.059524
| 116
| 0.643179
| 2,339
| 0.958214
| 0
| 0
| 0
| 0
| 0
| 0
| 1,029
| 0.421549
|
22386109daaa28a3082c4a5bbbaea3c931eb1b4c
| 3,169
|
py
|
Python
|
test.py
|
JulioPDX/ci_cd_dev
|
e9b72c1b16e9f05a5e93e22f045fda605aac509f
|
[
"MIT"
] | 6
|
2021-10-31T18:36:49.000Z
|
2022-03-14T02:26:37.000Z
|
test.py
|
JulioPDX/ci_cd_dev
|
e9b72c1b16e9f05a5e93e22f045fda605aac509f
|
[
"MIT"
] | null | null | null |
test.py
|
JulioPDX/ci_cd_dev
|
e9b72c1b16e9f05a5e93e22f045fda605aac509f
|
[
"MIT"
] | 2
|
2022-02-10T16:58:46.000Z
|
2022-03-07T05:00:57.000Z
|
#!/usr/bin/env python
"""Script used to test the network with batfish"""
from pybatfish.client.commands import *
from pybatfish.question import load_questions
from pybatfish.client.asserts import (
assert_no_duplicate_router_ids,
assert_no_incompatible_bgp_sessions,
assert_no_incompatible_ospf_sessions,
assert_no_unestablished_bgp_sessions,
assert_no_undefined_references,
)
from rich.console import Console
console = Console(color_system="truecolor")
def test_duplicate_rtr_ids(snap):
"""Testing for duplicate router IDs"""
console.print(
":white_exclamation_mark: [bold yellow]Testing for duplicate router IDs[/bold yellow] :white_exclamation_mark:"
)
assert_no_duplicate_router_ids(
snapshot=snap,
protocols={"ospf", "bgp"},
)
console.print(
":green_heart: [bold green]No duplicate router IDs found[/bold green] :green_heart:"
)
def test_bgp_compatibility(snap):
"""Testing for incompatible BGP sessions"""
console.print(
":white_exclamation_mark: [bold yellow]Testing for incompatible BGP sessions[/bold yellow] :white_exclamation_mark:"
)
assert_no_incompatible_bgp_sessions(
snapshot=snap,
)
console.print(
":green_heart: [bold green]All BGP sessions compatible![/bold green] :green_heart:"
)
def test_ospf_compatibility(snap):
"""Testing for incompatible OSPF sessions"""
console.print(
":white_exclamation_mark: [bold yellow]Testing for incompatible OSPF sessions[/bold yellow] :white_exclamation_mark:"
)
assert_no_incompatible_ospf_sessions(
snapshot=snap,
)
console.print(
":green_heart: [bold green]All OSPF sessions compatible![/bold green] :green_heart:"
)
def test_bgp_unestablished(snap):
"""Testing for BGP sessions that are not established"""
console.print(
":white_exclamation_mark: [bold yellow]Testing for unestablished BGP sessions[/bold yellow] :white_exclamation_mark:"
)
assert_no_unestablished_bgp_sessions(
snapshot=snap,
)
console.print(
":green_heart: [bold green]All BGP sessions are established![/bold green] :green_heart:"
)
def test_undefined_references(snap):
"""Testing for any undefined references"""
console.print(
":white_exclamation_mark: [bold yellow]Testing for undefined references[/bold yellow] :white_exclamation_mark:"
)
assert_no_undefined_references(
snapshot=snap,
)
console.print(
":green_heart: [bold green]No undefined refences found![/bold green] :green_heart:"
)
def main():
"""init all the things"""
NETWORK_NAME = "PDX_NET"
SNAPSHOT_NAME = "snapshot00"
SNAPSHOT_DIR = "./snapshots"
bf_session.host = "192.168.10.193"
bf_set_network(NETWORK_NAME)
init_snap = bf_init_snapshot(SNAPSHOT_DIR, name=SNAPSHOT_NAME, overwrite=True)
load_questions()
test_duplicate_rtr_ids(init_snap)
test_bgp_compatibility(init_snap)
test_ospf_compatibility(init_snap)
test_bgp_unestablished(init_snap)
test_undefined_references(init_snap)
if __name__ == "__main__":
main()
| 30.471154
| 125
| 0.716945
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,394
| 0.439886
|
22386711171a0a872717803582d333bc6bde0602
| 1,176
|
py
|
Python
|
algs15_priority_queue/circular_queue.py
|
zhubaiyuan/learning-algorithms
|
ea9ee674878d535a9e9987c0d948c0357e0ed4da
|
[
"MIT"
] | null | null | null |
algs15_priority_queue/circular_queue.py
|
zhubaiyuan/learning-algorithms
|
ea9ee674878d535a9e9987c0d948c0357e0ed4da
|
[
"MIT"
] | null | null | null |
algs15_priority_queue/circular_queue.py
|
zhubaiyuan/learning-algorithms
|
ea9ee674878d535a9e9987c0d948c0357e0ed4da
|
[
"MIT"
] | null | null | null |
"""
A fixed-capacity queue implemented as circular queue.
Queue can become full.
* enqueue is O(1)
* dequeue is O(1)
"""
class Queue:
"""
Implementation of a Queue using a circular buffer.
"""
def __init__(self, size):
self.size = size
self.storage = [None] * size
self.first = 0
self.last = 0
self.N = 0
def is_empty(self):
"""
Determine if queue is empty.
"""
return self.N == 0
def is_full(self):
"""
Determine if queue is full.
"""
return self.N == self.size
def enqueue(self, item):
"""
Enqueue new item to end of queue.
"""
if self.is_full():
raise RuntimeError('Queue is full')
self.storage[self.last] = item
self.N += 1
self.last = (self.last + 1) % self.size
def dequeue(self):
"""
Remove and return first item from queue.
"""
if self.is_empty():
raise RuntimeError('Queue is empty')
val = self.storage[self.first]
self.N -= 1
self.first = (self.first + 1) % self.size
return val
| 22.188679
| 54
| 0.519558
| 1,052
| 0.894558
| 0
| 0
| 0
| 0
| 0
| 0
| 441
| 0.375
|
223a35394d8e357d916a263b18714241694b5330
| 4,300
|
py
|
Python
|
pos_evaluation/create_train_dev_old.py
|
ayyoobimani/GNN-POSTAG
|
47eb4bc6d64de565e87ee7cb8e9c5020d936138c
|
[
"MIT"
] | null | null | null |
pos_evaluation/create_train_dev_old.py
|
ayyoobimani/GNN-POSTAG
|
47eb4bc6d64de565e87ee7cb8e9c5020d936138c
|
[
"MIT"
] | null | null | null |
pos_evaluation/create_train_dev_old.py
|
ayyoobimani/GNN-POSTAG
|
47eb4bc6d64de565e87ee7cb8e9c5020d936138c
|
[
"MIT"
] | null | null | null |
"""
Create train and dev set from bronze data
Example call:
$ python3 create_train_dev.py --pos /mounts/work/ayyoob/results/gnn_align/yoruba/pos_tags_tam-x-bible-newworld_posfeatFalse_transformerFalse_trainWEFalse_maskLangTrue.pickle --bible tam-x-bible-newworld.txt --bronze 1 --lang tam
$ python3 create_train_dev.py --pos /mounts/work/ayyoob/results/gnn_align/yoruba/pos_tags_fin-x-bible-helfi_posfeatFalse_transformerFalse_trainWEFalse_maskLangTrue.pickle --bible fin-x-bible-helfi.txt --bronze 1 --lang fin
"""
import torch
import random
import argparse
def load_bible(filename):
bible = {}
with open(filename) as f:
for line in f:
if line.startswith("#"):
continue
l = line.strip().split("\t")
if len(l)<2:
continue
bible[l[0]] = l[1]
print("Len bible "+filename+" = "+str(len(bible)))
# print(bible)
return bible
def get_sets(pos_tags_file, bible_file, bronze, lang):
postag_map = {"ADJ": 0, "ADP": 1, "ADV": 2, "AUX": 3, "CCONJ": 4, "DET": 5, "INTJ": 6, "NOUN": 7, "NUM": 8, "PART": 9, "PRON": 10,
"PROPN": 11, "PUNCT": 12, "SCONJ": 13, "SYM": 14, "VERB": 15, "X": 16}
inv_postag_map = {v: k for k, v in postag_map.items()}
# pos_tags = torch.load('/mounts/work/ayyoob/results/gnn_align/yoruba/pos_tags_freeze-embedding_noLang.pickle')
# bronze 1
# pos_tags = torch.load('/mounts/work/ayyoob/results/gnn_align/yoruba/pos_tags_posfeatFalse_transformerFalse_trainWEFalse_maskLangTrue.pickle')
# bronze 2
# pos_tags = torch.load('/mounts/work/ayyoob/results/gnn_align/yoruba/pos_tags_posfeatTrue_transformerFalse_trainWEFalse_maskLangTrue.pickle')
# bronze 3
pos_tags = torch.load(pos_tags_file)
# bible = load_bible("/nfs/datc/pbc/"+bible_file)
bible = load_bible("/mounts/Users/student/ayyoob/Dokumente/code/pbc_utils/data/helfi/"+bible_file)
# train = open("yoruba_silver_train.txt", "w+")
# dev = open("yoruba_a_silver_train.txt", "w+")
# train = open("yoruba_bronze1_train.txt", "w+")
# dev = open("yoruba_bronze1_dev.txt", "w+")
train = open(lang+"_bronze"+bronze+"_train.txt", "w+")
dev = open(lang+"_bronze"+bronze+"_dev.txt", "w+")
ids_file = open(lang+"_ids.txt", "w+") ### To Fix: USE SAME IDS
print(len(pos_tags))
# shuffle pos_tags file and take 80% train, 10% dev
ids = list(pos_tags.keys())
random.shuffle(ids)
n_train = int(len(ids)*90/100)
print("Len dev: ", len(ids)-n_train)
ids = ids[0:n_train]
print("Len train: ", n_train)
for verse_id in pos_tags:
# print(verse_id)
if verse_id in ids:
flag = 1
ids_file.write(verse_id+"\n")
else:
flag = 0
bible_verse = bible[verse_id].split(" ")
# get pairs word-tag
for i in range(len(bible_verse)):
if i in pos_tags[verse_id]:
if flag:
train.write(verse_id+"\t"+bible_verse[i]+"\t"+inv_postag_map[pos_tags[verse_id][i]]+"\n")
else:
dev.write(verse_id+"\t"+bible_verse[i]+"\t"+inv_postag_map[pos_tags[verse_id][i]]+"\n")
else:
if flag:
train.write(verse_id+"\t"+bible_verse[i]+"\t"+"X"+"\n")
else:
dev.write(verse_id+"\t"+bible_verse[i]+"\t"+"X"+"\n")
if flag: # add space in between sentences
train.write("\n")
else:
dev.write("\n")
train.close()
dev.close()
ids_file.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--pos", default=None, type=str, required=True, help="Bronze pickle")
parser.add_argument("--bible", default=None, type=str, required=True, help="Bible file")
parser.add_argument("--bronze", default=None, type=int, required=True, help="Specify bronze number [1,2,3]")
parser.add_argument("--lang", default=None, type=str, required=True, help="Language (3 letters)")
args = parser.parse_args()
pos_tags_file = args.pos
bible_file = args.bible
bronze = str(args.bronze)
lang = args.lang
get_sets(pos_tags_file, bible_file, bronze, lang)
if __name__ == "__main__":
main()
| 38.738739
| 228
| 0.621163
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,803
| 0.419302
|
223b3e8319f85381a1c34694d9c35926bb3d9b11
| 1,076
|
py
|
Python
|
lib/spack/spack/test/permissions.py
|
SimeonEhrig/spack
|
7fe0230492ecf0e497a84d578ea163570cf460eb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2
|
2016-01-12T20:14:40.000Z
|
2017-06-16T07:03:46.000Z
|
lib/spack/spack/test/permissions.py
|
SimeonEhrig/spack
|
7fe0230492ecf0e497a84d578ea163570cf460eb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75
|
2016-07-27T11:43:00.000Z
|
2020-12-08T15:56:53.000Z
|
lib/spack/spack/test/permissions.py
|
SimeonEhrig/spack
|
7fe0230492ecf0e497a84d578ea163570cf460eb
|
[
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8
|
2015-10-16T13:51:49.000Z
|
2021-10-18T13:58:03.000Z
|
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import pytest
import stat
from spack.hooks.permissions_setters import (
chmod_real_entries, InvalidPermissionsError
)
import llnl.util.filesystem as fs
def test_chmod_real_entries_ignores_suid_sgid(tmpdir):
path = str(tmpdir.join('file').ensure())
mode = stat.S_ISUID | stat.S_ISGID | stat.S_ISVTX
os.chmod(path, mode)
mode = os.stat(path).st_mode # adds a high bit we aren't concerned with
perms = stat.S_IRWXU
chmod_real_entries(path, perms)
assert os.stat(path).st_mode == mode | perms & ~stat.S_IXUSR
def test_chmod_rejects_group_writable_suid(tmpdir):
path = str(tmpdir.join('file').ensure())
mode = stat.S_ISUID | stat.S_ISGID | stat.S_ISVTX
fs.chmod_x(path, mode)
perms = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO
with pytest.raises(InvalidPermissionsError):
chmod_real_entries(path, perms)
| 29.888889
| 76
| 0.736059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 247
| 0.229554
|
223b618424ceff584aa410ff8121dcf69f5567f4
| 323
|
py
|
Python
|
data_statistics/variable_statistics.py
|
go-jugo/ml_event_prediction_trainer
|
0d644b737afdef078ad5b6fc2b7e2549b964b56f
|
[
"Apache-2.0"
] | null | null | null |
data_statistics/variable_statistics.py
|
go-jugo/ml_event_prediction_trainer
|
0d644b737afdef078ad5b6fc2b7e2549b964b56f
|
[
"Apache-2.0"
] | null | null | null |
data_statistics/variable_statistics.py
|
go-jugo/ml_event_prediction_trainer
|
0d644b737afdef078ad5b6fc2b7e2549b964b56f
|
[
"Apache-2.0"
] | null | null | null |
import dask.dataframe as dd
def descibe_dataframe(df):
error_cols = [col for col in df.columns if 'errorCode' in col]
df = df.categorize(columns=error_cols)
statistics = df.describe(include='all').compute()
statistics = statistics.T
statistics.to_excel('../statistics/variables_statistics.xlsx')
| 40.375
| 67
| 0.718266
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 57
| 0.176471
|
223b7dfcfebf8324a056e2a31a6551d0d0397ac2
| 393
|
py
|
Python
|
code/com/caicongyang/python/study/base/pandas_sql.py
|
caicongyang/python-study
|
e5db4d1b033d183da7e9af6a8c930bcaba2962f7
|
[
"Apache-2.0"
] | null | null | null |
code/com/caicongyang/python/study/base/pandas_sql.py
|
caicongyang/python-study
|
e5db4d1b033d183da7e9af6a8c930bcaba2962f7
|
[
"Apache-2.0"
] | null | null | null |
code/com/caicongyang/python/study/base/pandas_sql.py
|
caicongyang/python-study
|
e5db4d1b033d183da7e9af6a8c930bcaba2962f7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
'''
如何用sql的方式打开pandas
'''
from pandas import DataFrame, Series
from pandasql import sqldf, load_meat, load_births
df1 = DataFrame({'name': ['jack', 'tony', 'pony'], 'data1': range(3)})
print(df1)
sql = "select * from df1 where name = 'jack'"
pysqldf = lambda sql: sqldf(sql, globals());
print(pysqldf(sql))
| 19.65
| 70
| 0.676845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 151
| 0.369193
|
223ccb03fea812be5bc8d09d17bc6aa157ee7449
| 5,016
|
py
|
Python
|
tests/test_publish_parq.py
|
jacobtobias/s3parq
|
0a56fbc7d93168c68e823f05b073b8d03e67a665
|
[
"MIT"
] | null | null | null |
tests/test_publish_parq.py
|
jacobtobias/s3parq
|
0a56fbc7d93168c68e823f05b073b8d03e67a665
|
[
"MIT"
] | null | null | null |
tests/test_publish_parq.py
|
jacobtobias/s3parq
|
0a56fbc7d93168c68e823f05b073b8d03e67a665
|
[
"MIT"
] | null | null | null |
import pytest
from mock import patch
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import boto3
from string import ascii_lowercase
import random
from dfmock import DFMock
import s3parq.publish_parq as parq
import s3fs
from moto import mock_s3
@mock_s3
class Test:
def setup_s3(self):
def rand_string():
return ''.join([random.choice(ascii_lowercase) for x in range(0, 10)])
bucket = rand_string()
key = rand_string()
s3_client = boto3.client('s3')
s3_client.create_bucket(Bucket=bucket)
return bucket, key
def setup_df(self):
df = DFMock(count=100)
df.columns = {"grouped_col": {"option_count": 4, "option_type": "string"},
"text_col": "string",
"int_col": "integer",
"float_col": "float"}
df.generate_dataframe()
df.dataframe
return tuple(df.columns.keys()), df.dataframe
def test_works_without_partitions(self):
columns, dataframe = self.setup_df()
bucket, key = self.setup_s3()
partitions = []
parq.publish(bucket=bucket, key=key,
dataframe=dataframe, partitions=partitions)
def test_accepts_valid_partitions(self):
columns, dataframe = self.setup_df()
parq.check_partitions(columns, dataframe)
def test_reject_non_column_partitions(self):
columns, dataframe = self.setup_df()
with pytest.raises(ValueError):
parq.check_partitions(('banana',), dataframe)
def test_reject_timedelta_dataframes(self):
columns, dataframe = self.setup_df()
bucket, key = self.setup_s3()
partitions = ['text_col']
dataframe['time_col'] = pd.Timedelta('1 days')
with pytest.raises(NotImplementedError):
parq.publish(bucket=bucket, key=key,
dataframe=dataframe, partitions=partitions)
def test_reject_protected_name_partitions(self):
assert parq._check_partition_compatibility("shoe")
assert parq._check_partition_compatibility("all") is False
def test_generates_partitions_in_order(self):
columns, dataframe = self.setup_df()
bucket, key = self.setup_s3()
partitions = columns[:1]
with patch('s3parq.publish_parq.boto3', return_value=True) as mock_boto3:
with patch('s3parq.publish_parq.pq.write_to_dataset', return_value=None) as mock_method:
parq._gen_parquet_to_s3(bucket, key, dataframe, partitions)
arg, kwarg = mock_method.call_args
assert kwarg['partition_cols'] == partitions
def test_input_equals_output(self):
columns, dataframe = self.setup_df()
bucket, key = self.setup_s3()
s3_path = f"s3://{bucket}/{key}"
partitions = [columns[0]]
parq.publish(bucket=bucket, key=key,
dataframe=dataframe, partitions=partitions)
from_s3 = pq.ParquetDataset(s3_path, filesystem=s3fs.S3FileSystem())
s3pd = from_s3.read().to_pandas()
pre_df = dataframe
assert set(zip(s3pd.int_col, s3pd.float_col, s3pd.text_col, s3pd.grouped_col)) - \
set(zip(dataframe.int_col, dataframe.float_col,
dataframe.text_col, dataframe.grouped_col)) == set()
def test_reject_empty_dataframe(self):
dataframe = pd.DataFrame()
bucket, key = self.setup_s3()
s3_path = f"s3://{bucket}/{key}"
with pytest.raises(ValueError):
parq.publish(bucket=bucket, key=key,
dataframe=dataframe, partitions=[])
def test_set_metadata_correctly(self):
columns, dataframe = self.setup_df()
bucket, key = self.setup_s3()
s3_client = boto3.client('s3')
partitions = ['grouped_col']
parq.publish(bucket=bucket, key=key,
dataframe=dataframe, partitions=partitions)
for obj in s3_client.list_objects(Bucket=bucket)['Contents']:
if obj['Key'].endswith(".parquet"):
meta = s3_client.get_object(
Bucket=bucket, Key=obj['Key'])['Metadata']
assert meta['partition_data_types'] == str(
{"grouped_col": "string"})
'''
## timedeltas no good
def test_timedeltas_rejected(self):
bucket = MockHelper().random_name()
key = MockHelper().random_name()
s3_client = boto3.client('s3')
s3_client.create_bucket(Bucket=bucket)
df = DFMock(count=100)
df.columns = {"timedelta_col": "timedelta", "int_col": "int", "float_col": "float",
"bool_col": "boolean", "grouped_col": {"option_count": 4, "option_type": "string"}}
df.generate_dataframe()
with pytest.raises(NotImplementedError):
parq = pub_parq.S3PublishParq(
dataframe=df.dataframe, bucket=bucket, key=key, partitions=['grouped_col'])
'''
| 36.347826
| 105
| 0.622608
| 4,737
| 0.944378
| 0
| 0
| 4,746
| 0.946172
| 0
| 0
| 1,077
| 0.214713
|
223dcc250b1cfb0dc6bb2a2c2757035efcba4a73
| 2,936
|
py
|
Python
|
docs/examples/examplesCategorization.py
|
benstear/pyiomica
|
bc26032b610fc911cc03b54115d6abdf53a56fce
|
[
"MIT"
] | 12
|
2019-11-22T21:44:13.000Z
|
2022-03-06T21:46:31.000Z
|
docs/examples/examplesCategorization.py
|
benstear/pyiomica
|
bc26032b610fc911cc03b54115d6abdf53a56fce
|
[
"MIT"
] | null | null | null |
docs/examples/examplesCategorization.py
|
benstear/pyiomica
|
bc26032b610fc911cc03b54115d6abdf53a56fce
|
[
"MIT"
] | 5
|
2019-07-25T21:03:42.000Z
|
2021-06-09T02:14:30.000Z
|
#import sys
#sys.path.append("../..")
import pyiomica as pio
from pyiomica import categorizationFunctions as cf
if __name__ == '__main__':
# Unzip example data
with pio.zipfile.ZipFile(pio.os.path.join(pio.ConstantPyIOmicaExamplesDirectory, 'SLV.zip'), "r") as zipFile:
zipFile.extractall(path=pio.ConstantPyIOmicaExamplesDirectory)
# Process sample dataset SLV_Hourly1
# Name of the fisrt data set
dataName = 'SLV_Hourly1TimeSeries'
# Define a directory name where results are be saved
saveDir = pio.os.path.join('results', dataName, '')
# Directory name where example data is (*.csv files)
dataDir = pio.os.path.join(pio.ConstantPyIOmicaExamplesDirectory, 'SLV')
# Read the example data into a DataFrame
df_data = pio.pd.read_csv(pio.os.path.join(dataDir, dataName + '.csv'), index_col=[0,1,2], header=0)
# Calculate time series categorization
cf.calculateTimeSeriesCategorization(df_data, dataName, saveDir, NumberOfRandomSamples = 10**5)
# Cluster the time series categorization results
cf.clusterTimeSeriesCategorization(dataName, saveDir)
# Make plots of the clustered time series categorization
cf.visualizeTimeSeriesCategorization(dataName, saveDir)
# Process sample dataset SLV_Hourly2, in the same way as SLV_Hourly1 above
dataName = 'SLV_Hourly2TimeSeries'
saveDir = pio.os.path.join('results', dataName, '')
dataDir = pio.os.path.join(pio.ConstantPyIOmicaExamplesDirectory, 'SLV')
df_data = pio.pd.read_csv(pio.os.path.join(dataDir, dataName + '.csv'), index_col=[0,1,2], header=0)
cf.calculateTimeSeriesCategorization(df_data, dataName, saveDir, NumberOfRandomSamples = 10**5)
cf.clusterTimeSeriesCategorization(dataName, saveDir)
cf.visualizeTimeSeriesCategorization(dataName, saveDir)
# Import data storage submodule to read results of processing sample datasets SLV_Hourly1 and SLV_Hourly2
from pyiomica import dataStorage as ds
# Use results from processing sample datasets SLV_Hourly1 and SLV_Hourly2 to calculate "Delta"
dataName = 'SLV_Hourly1TimeSeries'
df_data_processed_H1 = ds.read(dataName+'_df_data_transformed', hdf5fileName=pio.os.path.join('results',dataName,dataName+'.h5'))
dataName = 'SLV_Hourly2TimeSeries'
df_data_processed_H2 = ds.read(dataName+'_df_data_transformed', hdf5fileName=pio.os.path.join('results',dataName,dataName+'.h5'))
dataName = 'SLV_Delta'
saveDir = pio.os.path.join('results', dataName, '')
df_data = df_data_processed_H2.compareTwoTimeSeries(df_data_processed_H1, compareAllLevelsInIndex=False, mergeFunction=pio.np.median).fillna(0.)
cf.calculateTimeSeriesCategorization(df_data, dataName, saveDir, NumberOfRandomSamples = 10**5)
cf.clusterTimeSeriesCategorization(dataName, saveDir)
cf.visualizeTimeSeriesCategorization(dataName, saveDir)
| 47.354839
| 149
| 0.742166
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 947
| 0.322548
|
223df95481a53841ba1260a2ab6e7adbdff16f31
| 2,612
|
py
|
Python
|
tworaven_apps/eventdata_queries/initialization/icews_unique_count.py
|
TwoRavens/TwoRavens
|
e5f820557d6646df525ceed15e17d79f4159cf0a
|
[
"Apache-2.0"
] | 20
|
2017-12-11T07:26:06.000Z
|
2021-11-22T16:16:20.000Z
|
tworaven_apps/eventdata_queries/initialization/icews_unique_count.py
|
TwoRavens/TwoRavens
|
e5f820557d6646df525ceed15e17d79f4159cf0a
|
[
"Apache-2.0"
] | 849
|
2017-10-20T18:21:18.000Z
|
2022-02-18T02:45:44.000Z
|
tworaven_apps/eventdata_queries/initialization/icews_unique_count.py
|
TwoRavens/TwoRavens
|
e5f820557d6646df525ceed15e17d79f4159cf0a
|
[
"Apache-2.0"
] | 1
|
2020-05-18T06:02:13.000Z
|
2020-05-18T06:02:13.000Z
|
# returns number of unique records for icews with different filtering:
# -by rounded lat/lon (100,000)
# -by country, district, province, city (100,000)
# -by lat/lon, filtered by 2 or more matches (70,000)
from pymongo import MongoClient
import os
mongo_client = MongoClient(host='localhost', port=27017) # Default port
db = mongo_client.event_data
def icews_coordinates_rounded():
print(list(db.icews.aggregate([
{
"$project": {
"_id": 0,
"lat_rounded": {
"$divide": [
{
"$subtract": [
{
"$multiply": [
"$Latitude",
100
]
},
{
"$mod": [
{
"$multiply": [
"$Latitude",
100
]
},
1
]
}
]
},
100
]
},
"lon_rounded": {
"$divide": [
{
"$subtract": [
{
"$multiply": [
"$Longitude",
100
]
},
{
"$mod": [
{
"$multiply": [
"$Longitude",
100
]
},
1
]
}
]
},
100
]
}
}
},
{
"$group": {
"_id": {
"latitude": "$lat_rounded",
"longitude": "$lon_rounded"
}
}
},
{
"$count": "uniques"
}
])))
def icews_coordinates():
print(list(db.icews.aggregate([
{
"$group": {
"_id": {
"lat": "$Latitude",
"lon": "$Longitude"
}
}
},
{
"$count": "uniques"
}
])))
def icews_names():
print(list(db.icews.aggregate([
{
"$project": {
"country": {"$toLower": "$Country"},
"district": {"$toLower": "$District"},
"province": {"$toLower": "$Province"},
"city": {"$toLower": "$City"}
}
},
{
"$group": {
"_id": {
"Country": "$country",
"District": "$district",
"Province": "$province",
"City": "$city"
},
"total": {"$sum": 1}
}
},
{
"$match": {"total": {"$gt": 1}}
},
{
"$count": "uniques"
}
])))
# icews_coordinates_rounded()
icews_coordinates()
# icews_names()
| 20.092308
| 72
| 0.343415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 858
| 0.328484
|
223edb2e5970e591f491deb0d0fde065371aadb5
| 7,576
|
py
|
Python
|
OcCo_Torch/models/pointnet_util.py
|
sun-pyo/OcCo
|
e2e12dbaa8f9b98fb8c42fc32682f49e99be302f
|
[
"MIT"
] | 158
|
2020-08-19T18:13:28.000Z
|
2022-03-30T13:55:32.000Z
|
OcCo_Torch/models/pointnet_util.py
|
sun-pyo/OcCo
|
e2e12dbaa8f9b98fb8c42fc32682f49e99be302f
|
[
"MIT"
] | 28
|
2020-05-30T04:02:33.000Z
|
2022-03-30T15:46:38.000Z
|
OcCo_Torch/models/pointnet_util.py
|
sun-pyo/OcCo
|
e2e12dbaa8f9b98fb8c42fc32682f49e99be302f
|
[
"MIT"
] | 18
|
2020-08-19T19:52:38.000Z
|
2022-02-06T11:42:26.000Z
|
# Copyright (c) 2020. Hanchen Wang, hw501@cam.ac.uk
# Ref: https://github.com/fxia22/pointnet.pytorch/pointnet/model.py
import torch, torch.nn as nn, numpy as np, torch.nn.functional as F
from torch.autograd import Variable
def feature_transform_regularizer(trans):
d = trans.size()[1]
I = torch.eye(d)[None, :, :]
if trans.is_cuda:
I = I.cuda()
loss = torch.mean(torch.norm(torch.bmm(trans, trans.transpose(2, 1) - I), dim=(1, 2)))
return loss
# STN -> Spatial Transformer Network
class STN3d(nn.Module):
def __init__(self, channel):
super(STN3d, self).__init__()
self.conv1 = nn.Conv1d(channel, 64, 1) # in-channel, out-channel, kernel size
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
B = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=False)[0] # global descriptors
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(3).flatten().astype(np.float32))).view(1, 9).repeat(B, 1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, 3, 3)
return x
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv1 = nn.Conv1d(k, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, k * k)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
self.k = k
def forward(self, x):
B = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=False)[0]
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(
1, self.k ** 2).repeat(B, 1)
if x.is_cuda:
iden = iden.cuda()
x = x + iden
x = x.view(-1, self.k, self.k)
return x
class PointNetEncoder(nn.Module):
def __init__(self, global_feat=True, feature_transform=False,
channel=3, detailed=False):
# when input include normals, it
super(PointNetEncoder, self).__init__()
self.stn = STN3d(channel) # Batch * 3 * 3
self.conv1 = nn.Conv1d(channel, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 1024, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.global_feat = global_feat
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=64)
self.detailed = detailed
def forward(self, x):
_, D, N = x.size() # Batch Size, Dimension of Point Features, Num of Points
trans = self.stn(x)
x = x.transpose(2, 1)
if D > 3:
# pdb.set_trace()
x, feature = x.split([3, D-3], dim=2)
x = torch.bmm(x, trans)
# feature = torch.bmm(feature, trans) # feature -> normals
if D > 3:
x = torch.cat([x, feature], dim=2)
x = x.transpose(2, 1)
out1 = self.bn1(self.conv1(x))
x = F.relu(out1)
if self.feature_transform:
trans_feat = self.fstn(x)
x = x.transpose(2, 1)
x = torch.bmm(x, trans_feat)
x = x.transpose(2, 1)
else:
trans_feat = None
pointfeat = x
out2 = self.bn2(self.conv2(x))
x = F.relu(out2)
out3 = self.bn3(self.conv3(x))
# x = self.bn3(self.conv3(x))
x = torch.max(out3, 2, keepdim=False)[0]
if self.global_feat:
return x, trans, trans_feat
elif self.detailed:
return out1, out2, out3, x
else: # concatenate global and local feature together
x = x.view(-1, 1024, 1).repeat(1, 1, N)
return torch.cat([x, pointfeat], 1), trans, trans_feat
class PointNetPartSegEncoder(nn.Module):
def __init__(self, feature_transform=True, channel=3):
super(PointNetPartSegEncoder, self).__init__()
self.stn = STN3d(channel)
self.conv1 = nn.Conv1d(channel, 64, 1)
self.conv2 = nn.Conv1d(64, 128, 1)
self.conv3 = nn.Conv1d(128, 128, 1)
self.conv4 = nn.Conv1d(128, 512, 1)
self.conv5 = nn.Conv1d(512, 2048, 1)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(128)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(2048)
self.feature_transform = feature_transform
if self.feature_transform:
self.fstn = STNkd(k=128)
def forward(self, point_cloud, label):
B, D, N = point_cloud.size()
trans = self.stn(point_cloud)
point_cloud = point_cloud.transpose(2, 1)
if D > 3:
point_cloud, feature = point_cloud.split(3, dim=2)
point_cloud = torch.bmm(point_cloud, trans)
if D > 3:
point_cloud = torch.cat([point_cloud, feature], dim=2)
point_cloud = point_cloud.transpose(2, 1)
out1 = F.relu(self.bn1(self.conv1(point_cloud)))
out2 = F.relu(self.bn2(self.conv2(out1)))
out3 = F.relu(self.bn3(self.conv3(out2)))
if self.feature_transform:
trans_feat = self.fstn(out3)
net_transformed = torch.bmm(out3.transpose(2, 1), trans_feat)
out3 = net_transformed.transpose(2, 1)
out4 = F.relu(self.bn4(self.conv4(out3)))
out5 = self.bn5(self.conv5(out4))
out_max = torch.max(out5, 2, keepdim=False)[0]
out_max = torch.cat([out_max, label.squeeze(1)], 1)
expand = out_max.view(-1, 2048 + 16, 1).repeat(1, 1, N)
concat = torch.cat([expand, out1, out2, out3, out4, out5], 1)
if self.feature_transform:
return concat, trans_feat
return concat
class encoder(nn.Module):
def __init__(self, num_channel=3, **kwargs):
super(encoder, self).__init__()
self.feat = PointNetEncoder(global_feat=True, channel=num_channel)
def forward(self, x):
feat, _, _ = self.feat(x)
return feat
class detailed_encoder(nn.Module):
def __init__(self, num_channel=3, **kwargs):
super(detailed_encoder, self).__init__()
self.feat = PointNetEncoder(global_feat=False,
channel=num_channel,
detailed=True)
def forward(self, x):
out1, out2, out3, x = self.feat(x)
return out1, out2, out3, x
| 33.522124
| 105
| 0.567318
| 7,044
| 0.929778
| 0
| 0
| 0
| 0
| 0
| 0
| 469
| 0.061906
|
2241d0bcc483df8ae598bcac2ffa98c9d73b4660
| 747
|
py
|
Python
|
Week#3__Assignment#3/join.py
|
P7h/IntroToDataScience__Coursera_Course
|
4f3d4073e552c7e6f5d1e31c0391eccec32d3786
|
[
"Apache-2.0"
] | 1
|
2015-10-26T00:32:09.000Z
|
2015-10-26T00:32:09.000Z
|
Week#3__Assignment#3/join.py
|
P7h/IntroToDataScience__Coursera_Course
|
4f3d4073e552c7e6f5d1e31c0391eccec32d3786
|
[
"Apache-2.0"
] | null | null | null |
Week#3__Assignment#3/join.py
|
P7h/IntroToDataScience__Coursera_Course
|
4f3d4073e552c7e6f5d1e31c0391eccec32d3786
|
[
"Apache-2.0"
] | null | null | null |
import MapReduce
import sys
"""
SQL style Joins in MapReduce
"""
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
def mapper(record):
# key: document identifier
# value: document contents
value = str(record[0])
num = str(record[1])
mr.emit_intermediate(num, (value, record))
def reducer(key, list_of_values):
# key: word
# value: list of occurrence counts
x,y = list_of_values[0]
for val in range(len(list_of_values)):
f = []
a,b = list_of_values[val]
if a == 'line_item':
f += y
f += b
mr.emit(f)
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
| 20.189189
| 44
| 0.609103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 278
| 0.372155
|
224336ed5538a9d61b10299e07b1b099064fc032
| 823
|
py
|
Python
|
tests/test_testresources.py
|
sjoerdk/anonapi
|
ade94da2b8eb2fb94f831ef77e563f750f88d0ba
|
[
"MIT"
] | null | null | null |
tests/test_testresources.py
|
sjoerdk/anonapi
|
ade94da2b8eb2fb94f831ef77e563f750f88d0ba
|
[
"MIT"
] | 408
|
2018-11-24T19:41:10.000Z
|
2022-03-31T23:48:54.000Z
|
tests/test_testresources.py
|
sjoerdk/anonapi
|
ade94da2b8eb2fb94f831ef77e563f750f88d0ba
|
[
"MIT"
] | 2
|
2018-11-11T12:56:03.000Z
|
2021-08-09T01:53:07.000Z
|
from anonapi.testresources import (
MockAnonClientTool,
JobInfoFactory,
RemoteAnonServerFactory,
JobStatus,
)
def test_mock_anon_client_tool():
some_responses = [
JobInfoFactory(status=JobStatus.DONE),
JobInfoFactory(status=JobStatus.ERROR),
JobInfoFactory(status=JobStatus.INACTIVE),
]
tool = MockAnonClientTool(responses=some_responses)
server = RemoteAnonServerFactory()
assert tool.get_job_info(server=server, job_id=1).status == JobStatus.DONE
assert [x.status for x in tool.get_job_info_list(server, [1, 2, 3])] == [
JobStatus.ERROR,
JobStatus.INACTIVE,
JobStatus.DONE,
]
# job id returned will always match whatever was requested
assert [x.job_id for x in tool.get_job_info_list(server, [1, 4, 6])] == [1, 4, 6]
| 30.481481
| 85
| 0.690158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 58
| 0.070474
|
2243b6cc5ce377cffdd91e6609c4b0155421a8c1
| 13,442
|
py
|
Python
|
homeassistant/components/mikrotik/hub.py
|
jvitkauskas/home-assistant
|
3718b25bd9528188530f291f0810a1c7970abcdb
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/mikrotik/hub.py
|
jvitkauskas/home-assistant
|
3718b25bd9528188530f291f0810a1c7970abcdb
|
[
"Apache-2.0"
] | 7
|
2016-04-09T20:56:30.000Z
|
2016-04-19T21:28:46.000Z
|
homeassistant/components/mikrotik/hub.py
|
jvitkauskas/home-assistant
|
3718b25bd9528188530f291f0810a1c7970abcdb
|
[
"Apache-2.0"
] | null | null | null |
"""The Mikrotik router class."""
from datetime import timedelta
import logging
import socket
import ssl
import librouteros
from librouteros.login import plain as login_plain, token as login_token
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME, CONF_VERIFY_SSL
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
from .const import (
ARP,
ATTR_DEVICE_TRACKER,
ATTR_FIRMWARE,
ATTR_MODEL,
ATTR_SERIAL_NUMBER,
CAPSMAN,
CONF_ARP_PING,
CONF_DETECTION_TIME,
CONF_FORCE_DHCP,
DEFAULT_DETECTION_TIME,
DHCP,
IDENTITY,
INFO,
IS_WIRELESS,
MIKROTIK_SERVICES,
NAME,
WIRELESS,
)
from .errors import CannotConnect, LoginError
_LOGGER = logging.getLogger(__name__)
class Device:
"""Represents a network device."""
def __init__(self, mac, params):
"""Initialize the network device."""
self._mac = mac
self._params = params
self._last_seen = None
self._attrs = {}
self._wireless_params = None
@property
def name(self):
"""Return device name."""
return self._params.get("host-name", self.mac)
@property
def mac(self):
"""Return device mac."""
return self._mac
@property
def last_seen(self):
"""Return device last seen."""
return self._last_seen
@property
def attrs(self):
"""Return device attributes."""
attr_data = self._wireless_params if self._wireless_params else self._params
for attr in ATTR_DEVICE_TRACKER:
if attr in attr_data:
self._attrs[slugify(attr)] = attr_data[attr]
self._attrs["ip_address"] = self._params.get("active-address")
return self._attrs
def update(self, wireless_params=None, params=None, active=False):
"""Update Device params."""
if wireless_params:
self._wireless_params = wireless_params
if params:
self._params = params
if active:
self._last_seen = dt_util.utcnow()
class MikrotikData:
"""Handle all communication with the Mikrotik API."""
def __init__(self, hass, config_entry, api):
"""Initialize the Mikrotik Client."""
self.hass = hass
self.config_entry = config_entry
self.api = api
self._host = self.config_entry.data[CONF_HOST]
self.all_devices = {}
self.devices = {}
self.available = True
self.support_wireless = bool(self.command(MIKROTIK_SERVICES[IS_WIRELESS]))
self.hostname = None
self.model = None
self.firmware = None
self.serial_number = None
@staticmethod
def load_mac(devices=None):
"""Load dictionary using MAC address as key."""
if not devices:
return None
mac_devices = {}
for device in devices:
if "mac-address" in device:
mac = device["mac-address"]
mac_devices[mac] = device
return mac_devices
@property
def arp_enabled(self):
"""Return arp_ping option setting."""
return self.config_entry.options[CONF_ARP_PING]
@property
def force_dhcp(self):
"""Return force_dhcp option setting."""
return self.config_entry.options[CONF_FORCE_DHCP]
def get_info(self, param):
"""Return device model name."""
cmd = IDENTITY if param == NAME else INFO
data = list(self.command(MIKROTIK_SERVICES[cmd]))
return data[0].get(param) if data else None
def get_hub_details(self):
"""Get Hub info."""
self.hostname = self.get_info(NAME)
self.model = self.get_info(ATTR_MODEL)
self.firmware = self.get_info(ATTR_FIRMWARE)
self.serial_number = self.get_info(ATTR_SERIAL_NUMBER)
def connect_to_hub(self):
"""Connect to hub."""
try:
self.api = get_api(self.hass, self.config_entry.data)
self.available = True
return True
except (LoginError, CannotConnect):
self.available = False
return False
def get_list_from_interface(self, interface):
"""Get devices from interface."""
result = list(self.command(MIKROTIK_SERVICES[interface]))
return self.load_mac(result) if result else {}
def restore_device(self, mac):
"""Restore a missing device after restart."""
self.devices[mac] = Device(mac, self.all_devices[mac])
def update_devices(self):
"""Get list of devices with latest status."""
arp_devices = {}
wireless_devices = {}
device_list = {}
try:
self.all_devices = self.get_list_from_interface(DHCP)
if self.support_wireless:
_LOGGER.debug("wireless is supported")
for interface in [CAPSMAN, WIRELESS]:
wireless_devices = self.get_list_from_interface(interface)
if wireless_devices:
_LOGGER.debug("Scanning wireless devices using %s", interface)
break
if self.support_wireless and not self.force_dhcp:
device_list = wireless_devices
else:
device_list = self.all_devices
_LOGGER.debug("Falling back to DHCP for scanning devices")
if self.arp_enabled:
arp_devices = self.get_list_from_interface(ARP)
# get new hub firmware version if updated
self.firmware = self.get_info(ATTR_FIRMWARE)
except (CannotConnect, socket.timeout, socket.error):
self.available = False
return
if not device_list:
return
for mac, params in device_list.items():
if mac not in self.devices:
self.devices[mac] = Device(mac, self.all_devices.get(mac, {}))
else:
self.devices[mac].update(params=self.all_devices.get(mac, {}))
if mac in wireless_devices:
# if wireless is supported then wireless_params are params
self.devices[mac].update(
wireless_params=wireless_devices[mac], active=True
)
continue
# for wired devices or when forcing dhcp check for active-address
if not params.get("active-address"):
self.devices[mac].update(active=False)
continue
# ping check the rest of active devices if arp ping is enabled
active = True
if self.arp_enabled and mac in arp_devices:
active = self.do_arp_ping(
params.get("active-address"), arp_devices[mac].get("interface")
)
self.devices[mac].update(active=active)
def do_arp_ping(self, ip_address, interface):
"""Attempt to arp ping MAC address via interface."""
_LOGGER.debug("pinging - %s", ip_address)
params = {
"arp-ping": "yes",
"interval": "100ms",
"count": 3,
"interface": interface,
"address": ip_address,
}
cmd = "/ping"
data = list(self.command(cmd, params))
if data is not None:
status = 0
for result in data:
if "status" in result:
status += 1
if status == len(data):
_LOGGER.debug(
"Mikrotik %s - %s arp_ping timed out", ip_address, interface
)
return False
return True
def command(self, cmd, params=None):
"""Retrieve data from Mikrotik API."""
try:
_LOGGER.info("Running command %s", cmd)
if params:
response = self.api(cmd=cmd, **params)
else:
response = self.api(cmd=cmd)
except (
librouteros.exceptions.ConnectionClosed,
socket.error,
socket.timeout,
) as api_error:
_LOGGER.error("Mikrotik %s connection error %s", self._host, api_error)
raise CannotConnect
except librouteros.exceptions.ProtocolError as api_error:
_LOGGER.warning(
"Mikrotik %s failed to retrieve data. cmd=[%s] Error: %s",
self._host,
cmd,
api_error,
)
return None
return response if response else None
def update(self):
"""Update device_tracker from Mikrotik API."""
if not self.available or not self.api:
if not self.connect_to_hub():
return
_LOGGER.debug("updating network devices for host: %s", self._host)
self.update_devices()
class MikrotikHub:
"""Mikrotik Hub Object."""
def __init__(self, hass, config_entry):
"""Initialize the Mikrotik Client."""
self.hass = hass
self.config_entry = config_entry
self._mk_data = None
self.progress = None
@property
def host(self):
"""Return the host of this hub."""
return self.config_entry.data[CONF_HOST]
@property
def hostname(self):
"""Return the hostname of the hub."""
return self._mk_data.hostname
@property
def model(self):
"""Return the model of the hub."""
return self._mk_data.model
@property
def firmware(self):
"""Return the firware of the hub."""
return self._mk_data.firmware
@property
def serial_num(self):
"""Return the serial number of the hub."""
return self._mk_data.serial_number
@property
def available(self):
"""Return if the hub is connected."""
return self._mk_data.available
@property
def option_detection_time(self):
"""Config entry option defining number of seconds from last seen to away."""
return timedelta(seconds=self.config_entry.options[CONF_DETECTION_TIME])
@property
def signal_update(self):
"""Event specific per Mikrotik entry to signal updates."""
return f"mikrotik-update-{self.host}"
@property
def api(self):
"""Represent Mikrotik data object."""
return self._mk_data
async def async_add_options(self):
"""Populate default options for Mikrotik."""
if not self.config_entry.options:
options = {
CONF_ARP_PING: self.config_entry.data.pop(CONF_ARP_PING, False),
CONF_FORCE_DHCP: self.config_entry.data.pop(CONF_FORCE_DHCP, False),
CONF_DETECTION_TIME: self.config_entry.data.pop(
CONF_DETECTION_TIME, DEFAULT_DETECTION_TIME
),
}
self.hass.config_entries.async_update_entry(
self.config_entry, options=options
)
async def request_update(self):
"""Request an update."""
if self.progress is not None:
await self.progress
return
self.progress = self.hass.async_create_task(self.async_update())
await self.progress
self.progress = None
async def async_update(self):
"""Update Mikrotik devices information."""
await self.hass.async_add_executor_job(self._mk_data.update)
async_dispatcher_send(self.hass, self.signal_update)
async def async_setup(self):
"""Set up the Mikrotik hub."""
try:
api = await self.hass.async_add_executor_job(
get_api, self.hass, self.config_entry.data
)
except CannotConnect:
raise ConfigEntryNotReady
except LoginError:
return False
self._mk_data = MikrotikData(self.hass, self.config_entry, api)
await self.async_add_options()
await self.hass.async_add_executor_job(self._mk_data.get_hub_details)
await self.hass.async_add_executor_job(self._mk_data.update)
self.hass.async_create_task(
self.hass.config_entries.async_forward_entry_setup(
self.config_entry, "device_tracker"
)
)
return True
def get_api(hass, entry):
"""Connect to Mikrotik hub."""
_LOGGER.debug("Connecting to Mikrotik hub [%s]", entry[CONF_HOST])
_login_method = (login_plain, login_token)
kwargs = {"login_methods": _login_method, "port": entry["port"]}
if entry[CONF_VERIFY_SSL]:
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
_ssl_wrapper = ssl_context.wrap_socket
kwargs["ssl_wrapper"] = _ssl_wrapper
try:
api = librouteros.connect(
entry[CONF_HOST], entry[CONF_USERNAME], entry[CONF_PASSWORD], **kwargs,
)
_LOGGER.debug("Connected to %s successfully", entry[CONF_HOST])
return api
except (
librouteros.exceptions.LibRouterosError,
socket.error,
socket.timeout,
) as api_error:
_LOGGER.error("Mikrotik %s error: %s", entry[CONF_HOST], api_error)
if "invalid user name or password" in str(api_error):
raise LoginError
raise CannotConnect
| 32.468599
| 88
| 0.602515
| 11,467
| 0.853072
| 0
| 0
| 2,510
| 0.186728
| 1,887
| 0.140381
| 2,319
| 0.172519
|
22441b73e069c3528a08a1ed36f4c0850e6085f0
| 4,137
|
py
|
Python
|
gemlog_from_rss/spip/page.py
|
Hookz/Gemlog-from-RSS
|
b57a311db3008e8b0df2442236c4729a06d9b74d
|
[
"MIT"
] | 1
|
2021-02-19T16:06:07.000Z
|
2021-02-19T16:06:07.000Z
|
gemlog_from_rss/spip/page.py
|
Hookz/Gemlog-from-RSS
|
b57a311db3008e8b0df2442236c4729a06d9b74d
|
[
"MIT"
] | null | null | null |
gemlog_from_rss/spip/page.py
|
Hookz/Gemlog-from-RSS
|
b57a311db3008e8b0df2442236c4729a06d9b74d
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import requests
import re
from lxml.html import fromstring
from lxml.etree import ParserError
from gemlog_from_rss.spip import SinglePost
def localize(lang):
if lang == "pl":
return {
"our_articles": "Nasze artykuły"
}
if lang == "fr":
return {
"our_articles": "Nos articles"
}
else:
return {
"our_articles": "Our articles"
}
class Page:
def __init__(self, title=None, content_class="article-texte"):
self.content = None
self.url = None
self.title = title
self.content_class = content_class
def download_html(self, url):
self.url = url
req = requests.get(self.url)
open(f'resources/{self.url.split("/")[-1].split("?")[0]}.html', 'wb').write(req.content)
def fetch_content(self):
file_content = open(f'resources/{self.url.split("/")[-1].split("?")[0]}.html', 'r').read()\
.replace("<strong>", "\n**").replace("</strong>", "**")
soup = BeautifulSoup(file_content, 'html.parser')
if self.title is None:
self.title = soup.title.get_text()
self.content = soup.find("div", class_=re.compile(self.content_class)).get_text().split("Credits")[0]
def output_gemini(self):
uit = f"""
##{self.title}
{self.content}
"""
class MainPage:
def __init__(self, post_list=None, root_dir="blog", title=None, pages_list=None, feed=None, lang="en"):
if post_list is None:
post_list = []
if pages_list is None:
pages_list = []
if root_dir[-1] == "/":
root_dir = root_dir[:-1]
self.post_list = post_list
self.title = title
self.root_dir = root_dir
self.pages_list = pages_list
self.feed = feed
if self.feed is not None:
self.parse_feed()
if self.title is None:
import xml.etree.ElementTree as ET
self.title = ET.parse("resources/all_posts.xml").getroot()[0].find("title").text
self.lang = lang
self.dict = localize(lang)
self.root = None
def parse_feed(self):
req = requests.get(self.feed)
open("resources/all_posts.xml", "wb").write(req.content)
return req.content
def add_posts(self):
for child in self.root[0].findall('item'):
if child.find('{http://purl.org/rss/1.0/modules/content/}encoded') is not None:
substituting = re.sub(' class=".*?"', '',
child.find('{http://purl.org/rss/1.0/modules/content/}encoded').text)
substituting = re.sub('<h3>', '\n### ', substituting)
substituting = re.sub('</h3>', '\n', substituting)
substituting = re.sub('<img.*?>', '', substituting)
try:
self.post_list.append(SinglePost(
child.find("title").text,
fromstring(substituting).text_content(),
child.find('{http://purl.org/dc/elements/1.1/}creator').text,
child.find('{http://purl.org/dc/elements/1.1/}date').text.split("T")[0])
)
except ParserError:
continue
def create_files(self):
for post in self.post_list:
with open(f'{self.root_dir}/{post.link}', 'w') as f:
f.write(post.get_gemini(blog_name=self.title))
def add_page(self, page):
self.pages_list.append(page)
def add_pages(self, pages):
for page in pages:
self.add_page(page)
def add_pages_to_main(self):
content = f""
for page in self.pages_list:
content += f"## {page.title}\n\n" \
f"{page.content}\n\n"
return content
def make_main_page(self):
content = f"""
# {self.title}
{self.add_pages_to_main()}
## {self.dict["our_articles"]}:
"""
for post in self.post_list:
content += f"=> {post.link} {post.title}\n\n"
return content
| 32.069767
| 109
| 0.547982
| 3,677
| 0.888594
| 0
| 0
| 0
| 0
| 0
| 0
| 854
| 0.20638
|
2245fe03d22e27328780ecdf4fb5f3ecb80912ed
| 1,421
|
py
|
Python
|
simulation/aivika/modeler/arrival_timer.py
|
dsorokin/aivika-modeler
|
5c112015f9af6ba1974a3b208842da01e705f9ac
|
[
"BSD-3-Clause"
] | null | null | null |
simulation/aivika/modeler/arrival_timer.py
|
dsorokin/aivika-modeler
|
5c112015f9af6ba1974a3b208842da01e705f9ac
|
[
"BSD-3-Clause"
] | null | null | null |
simulation/aivika/modeler/arrival_timer.py
|
dsorokin/aivika-modeler
|
5c112015f9af6ba1974a3b208842da01e705f9ac
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017 David Sorokin <david.sorokin@gmail.com>
#
# Licensed under BSD3. See the LICENSE.txt file in the root of this distribution.
from simulation.aivika.modeler.model import *
from simulation.aivika.modeler.port import *
def create_arrival_timer(model, name, descr = None):
"""Return a new timer that allows measuring the processing time of transacts."""
y = ArrivalTimerPort(model, name = name, descr = descr)
code = 'newArrivalTimer'
y.write(code)
return y
def arrival_timer_stream(arrival_timer_port, stream_port):
"""Measure the processing time of transacts from the specified stream within the resulting stream."""
t = arrival_timer_port
s = stream_port
expect_arrival_timer(t)
expect_stream(s)
expect_same_model([t, s])
model = t.get_model()
item_data_type = s.get_item_data_type()
code = 'return $ runProcessor (arrivalTimerProcessor ' + t.read() + ') ' + s.read()
y = StreamPort(model, item_data_type)
y.write(code)
y.bind_to_input()
s.bind_to_output()
return y
def reset_arrival_timer(arrival_timer_port, reset_time):
"""Reset the arrival timer statistics at the specified modeling time."""
t = arrival_timer_port
expect_arrival_timer(t)
model = t.get_model()
code = 'runEventInStartTime $ enqueueEvent ' + str(reset_time)
code += ' $ resetArrivalTimer ' + t.read()
model.add_action(code)
| 36.435897
| 105
| 0.712878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 523
| 0.368051
|
2246b2f969806313a587b3933434d8a9f94f2b5f
| 106
|
py
|
Python
|
mundo 1/aula 7/exer8.py
|
jonatan098/cursopython
|
6e4cbaef6229e230fdbc66d80ec1b5a089887b0d
|
[
"MIT"
] | null | null | null |
mundo 1/aula 7/exer8.py
|
jonatan098/cursopython
|
6e4cbaef6229e230fdbc66d80ec1b5a089887b0d
|
[
"MIT"
] | null | null | null |
mundo 1/aula 7/exer8.py
|
jonatan098/cursopython
|
6e4cbaef6229e230fdbc66d80ec1b5a089887b0d
|
[
"MIT"
] | 1
|
2020-02-22T17:21:05.000Z
|
2020-02-22T17:21:05.000Z
|
m = float(input('digite o metro '))
print(f'{m} metros e igual {m*100} centimetros e {m*1000} milimetros')
| 53
| 70
| 0.688679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 80
| 0.754717
|
2246b7989fa46d68e0d0a29597a7a6170e8898d9
| 859
|
py
|
Python
|
ros/src/airsim_ros_pkgs/scripts/readscan.py
|
juwangvsu/AirSim
|
fcf8c7d89821a9067d53f0d083c7bc6efac1776c
|
[
"MIT"
] | null | null | null |
ros/src/airsim_ros_pkgs/scripts/readscan.py
|
juwangvsu/AirSim
|
fcf8c7d89821a9067d53f0d083c7bc6efac1776c
|
[
"MIT"
] | null | null | null |
ros/src/airsim_ros_pkgs/scripts/readscan.py
|
juwangvsu/AirSim
|
fcf8c7d89821a9067d53f0d083c7bc6efac1776c
|
[
"MIT"
] | 1
|
2021-03-23T23:11:02.000Z
|
2021-03-23T23:11:02.000Z
|
#!/usr/bin/env python
import rospy
import rosbag
import os
import sys
import textwrap
import yaml
lidarmsg=None
################# read the lidar msg from yaml file and return ##############
def readlidardummy():
global lidarmsg
if lidarmsg==None:
lidarmsg= doreadlidar()
return lidarmsg
def doreadlidar():
global lidarmsg
print('lidardummy do read')
with open(r'/media/student/data5/AirSim/ros/src/airsim_ros_pkgs/scripts/lidar_dummy.txt') as file:
# The FullLoader parameter handles the conversion from YAML
# scalar values to Python the dictionary format
lidarmsg = yaml.load(file)
#print(fruits_list)
#print(lidarmsg['range_max']+20)
#print(lidarmsg['header']['stamp']['secs'])
ranges=lidarmsg['ranges']
#print(len(ranges), ranges)
return lidarmsg
if __name__ == '__main__':
readlidardummy()
| 23.861111
| 100
| 0.694994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 441
| 0.513388
|
2247d6e5fabfa0bfb68bd37d1e8736537d83e496
| 4,375
|
py
|
Python
|
flytekit/models/qubole.py
|
slai/flytekit
|
9d73d096b748d263a638e6865d15db4880845305
|
[
"Apache-2.0"
] | null | null | null |
flytekit/models/qubole.py
|
slai/flytekit
|
9d73d096b748d263a638e6865d15db4880845305
|
[
"Apache-2.0"
] | 2
|
2021-06-26T04:32:43.000Z
|
2021-07-14T04:47:52.000Z
|
flytekit/models/qubole.py
|
slai/flytekit
|
9d73d096b748d263a638e6865d15db4880845305
|
[
"Apache-2.0"
] | null | null | null |
from flyteidl.plugins import qubole_pb2 as _qubole
from flytekit.models import common as _common
class HiveQuery(_common.FlyteIdlEntity):
def __init__(self, query, timeout_sec, retry_count):
"""
Initializes a new HiveQuery.
:param Text query: The query string.
:param int timeout_sec:
:param int retry_count:
"""
self._query = query
self._timeout_sec = timeout_sec
self._retry_count = retry_count
@property
def query(self):
"""
The query string.
:rtype: str
"""
return self._query
@property
def timeout_sec(self):
"""
:rtype: int
"""
return self._timeout_sec
@property
def retry_count(self):
"""
:rtype: int
"""
return self._retry_count
def to_flyte_idl(self):
"""
:rtype: _qubole.HiveQuery
"""
return _qubole.HiveQuery(query=self.query, timeout_sec=self.timeout_sec, retryCount=self.retry_count)
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param _qubole.HiveQuery pb2_object:
:return: HiveQuery
"""
return cls(
query=pb2_object.query,
timeout_sec=pb2_object.timeout_sec,
retry_count=pb2_object.retryCount,
)
class HiveQueryCollection(_common.FlyteIdlEntity):
def __init__(self, queries):
"""
Initializes a new HiveQueryCollection.
:param list[HiveQuery] queries: Queries to execute.
"""
self._queries = queries
@property
def queries(self):
"""
:rtype: list[HiveQuery]
"""
return self._queries
def to_flyte_idl(self):
"""
:rtype: _qubole.HiveQueryCollection
"""
return _qubole.HiveQueryCollection(
queries=[query.to_flyte_idl() for query in self.queries] if self.queries else None
)
@classmethod
def from_flyte_idl(cls, pb2_object):
"""
:param _qubole.HiveQuery pb2_object:
:rtype: HiveQueryCollection
"""
return cls(queries=[HiveQuery.from_flyte_idl(query) for query in pb2_object.queries])
class QuboleHiveJob(_common.FlyteIdlEntity):
def __init__(self, query, cluster_label, tags, query_collection=None):
"""
Initializes a HiveJob.
:param HiveQuery query: Single query to execute
:param Text cluster_label: The qubole cluster label to execute the query on
:param list[Text] tags: User tags for the queries
:param HiveQueryCollection query_collection: Deprecated Queries to execute.
"""
self._query = query
self._cluster_label = cluster_label
self._tags = tags
self._query_collection = query_collection
@property
def query_collection(self):
"""
The queries to be executed
:rtype: HiveQueryCollection
"""
return self._query_collection
@property
def query(self):
"""
The query to be executed
:rtype: HiveQuery
"""
return self._query
@property
def cluster_label(self):
"""
The cluster label where the query should be executed
:rtype: Text
"""
return self._cluster_label
@property
def tags(self):
"""
User tags for the queries
:rtype: list[Text]
"""
return self._tags
def to_flyte_idl(self):
"""
:rtype: _qubole.QuboleHiveJob
"""
return _qubole.QuboleHiveJob(
query_collection=self._query_collection.to_flyte_idl() if self._query_collection else None,
query=self._query.to_flyte_idl() if self._query else None,
cluster_label=self._cluster_label,
tags=self._tags,
)
@classmethod
def from_flyte_idl(cls, p):
"""
:param _qubole.QuboleHiveJob p:
:rtype: QuboleHiveJob
"""
return cls(
query_collection=HiveQueryCollection.from_flyte_idl(p.query_collection)
if p.HasField("query_collection")
else None,
query=HiveQuery.from_flyte_idl(p.query) if p.HasField("query") else None,
cluster_label=p.cluster_label,
tags=p.tags,
)
| 26.355422
| 109
| 0.597257
| 4,268
| 0.975543
| 0
| 0
| 2,140
| 0.489143
| 0
| 0
| 1,580
| 0.361143
|
224b518501a4fc21242f8b7e19fdfbb05fcec01d
| 1,583
|
py
|
Python
|
app/app.py
|
SogoKato/mecab-parser
|
402f22b4da283dfc74935fc66f092452158beb56
|
[
"MIT"
] | null | null | null |
app/app.py
|
SogoKato/mecab-parser
|
402f22b4da283dfc74935fc66f092452158beb56
|
[
"MIT"
] | null | null | null |
app/app.py
|
SogoKato/mecab-parser
|
402f22b4da283dfc74935fc66f092452158beb56
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from dataclasses import asdict
from logging import DEBUG
import os
from flask import Flask, jsonify, request
from werkzeug.exceptions import HTTPException
from mecab_parser import MecabParserFactory
app = Flask(__name__)
app.config["JSON_AS_ASCII"] = False
@app.route("/parse", methods=["POST"])
def parse():
if not request.is_json:
return jsonify({
"ok": False,
"code": 400,
"name": "Invalid Content-Type",
"description": "Content-Type must be application/json.",
})
content = request.get_json()
if (
content is None or
not isinstance(content, dict) or
"texts" not in content.keys() or
not isinstance(content["texts"], list) or
[x for x in content["texts"] if not isinstance(x, str)]
):
return jsonify({
"ok": False,
"code": 400,
"name": "Invalid JSON Format",
"description": 'Valid JSON format is {"texts": ["Ipsum dolore eirmod autem et"]}',
})
parser = MecabParserFactory.create("mecab-ipadic-neologd")
results = [[asdict(dc)for dc in parser(t)] for t in content["texts"]]
return jsonify({"ok": True, "results": results})
@app.errorhandler(HTTPException)
def handle_exception(e):
return jsonify({
"ok": False,
"code": e.code,
"name": e.name,
"description": e.description,
})
if __name__ == "__main__":
port = int(os.getenv("PORT", 5000))
app.logger.level = DEBUG
app.run(host="0.0.0.0", port=port)
| 27.77193
| 94
| 0.599495
| 0
| 0
| 0
| 0
| 1,156
| 0.730259
| 0
| 0
| 376
| 0.237524
|
224e634019de5a0413838cff34d92fa96b8463fb
| 1,402
|
py
|
Python
|
src/data_migrator/emitters/__init__.py
|
schubergphilis/data-migrator
|
b5ced9abd6bc2c60e9c115951fee38c2fd08923f
|
[
"MIT"
] | 18
|
2017-04-27T08:57:40.000Z
|
2021-04-01T22:39:40.000Z
|
src/data_migrator/emitters/__init__.py
|
schubergphilis/data-migrator
|
b5ced9abd6bc2c60e9c115951fee38c2fd08923f
|
[
"MIT"
] | 1,077
|
2017-04-13T20:56:44.000Z
|
2022-03-31T01:23:50.000Z
|
src/data_migrator/emitters/__init__.py
|
schubergphilis/data-migrator
|
b5ced9abd6bc2c60e9c115951fee38c2fd08923f
|
[
"MIT"
] | 6
|
2017-04-17T14:14:45.000Z
|
2020-05-05T10:05:23.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""Emitters are used to export models to output format.
This module contains all classes for emitters: base and actuals. Currently
the system has two emitters: :class:`~.CSVEmitter` and :class:`~.MySQLEmitter`
implemented, of which the last is the default emitter. An emitter provides the
export format for the scanned and cleaned datasets. It also provides preambles
and postambles in the output files, for example to clean the target table
before loading it.
The following classes are defined in this module:
* :class:`~.BaseEmitter`
* :class:`~.MySQLEmitter`
* :class:`~.CSVEmitter`
* :class:`~.JSONEmitter`
* :class:`~.UpdateEmitter`
The basic structure for emitting is a combination between
:class:`~.BaseManager` and :class:`~.BaseEmitter`:
.. code-block:: python
e = Emitter(manager=Model.objects)
print e.preamble(header=[..my header lines to add..])
for l in Model.objects.all():
print e.emit(l) # emit is returning a list of strings!
.. note::
At this moment *data-migrator* does not an actively take part in schema
migrations of any sort. It is purely about cleaning, anonymizing and
transforming data (yet!).
"""
from .update import UpdateEmitter # noqa
from .mysql import MySQLEmitter # noqa
from .csv import CSVEmitter # noqa
from .json_emit import JSONEmitter # noqa
from .singer import SingerEmitter # noqa
| 32.604651
| 78
| 0.738231
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,229
| 0.876605
|
22529ca1da0ceee85ccd01a18946c0340e79ffbb
| 330
|
py
|
Python
|
cycleshare/migrations/0013_remove_cycle_toprofile.py
|
vasundhara7/College-EWallet
|
0a4c32bc08218650635a04fb9a9e28446fd4f3e1
|
[
"Apache-2.0"
] | 2
|
2019-07-28T00:34:09.000Z
|
2020-06-18T11:58:03.000Z
|
cycleshare/migrations/0013_remove_cycle_toprofile.py
|
vasundhara7/College-EWallet
|
0a4c32bc08218650635a04fb9a9e28446fd4f3e1
|
[
"Apache-2.0"
] | null | null | null |
cycleshare/migrations/0013_remove_cycle_toprofile.py
|
vasundhara7/College-EWallet
|
0a4c32bc08218650635a04fb9a9e28446fd4f3e1
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.0.9 on 2018-12-12 08:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cycleshare', '0012_cycle_toprofile'),
]
operations = [
migrations.RemoveField(
model_name='cycle',
name='toprofile',
),
]
| 18.333333
| 47
| 0.593939
| 245
| 0.742424
| 0
| 0
| 0
| 0
| 0
| 0
| 99
| 0.3
|
2252c6b7f19070373e0889abae519f050d93b6aa
| 5,495
|
py
|
Python
|
projects/aFPGA/10_python/npu/generate_npu_sram_init_file.py
|
helloworld1983/NPU_on_FPGA
|
1d0626dbed463cfacd47805a3214a43662f5b28c
|
[
"BSD-2-Clause"
] | 63
|
2018-08-05T14:30:47.000Z
|
2022-03-31T09:41:55.000Z
|
projects/aFPGA/10_python/npu/generate_npu_sram_init_file.py
|
cxdzyq1110/NPU_on_FPGA
|
1d0626dbed463cfacd47805a3214a43662f5b28c
|
[
"BSD-2-Clause"
] | null | null | null |
projects/aFPGA/10_python/npu/generate_npu_sram_init_file.py
|
cxdzyq1110/NPU_on_FPGA
|
1d0626dbed463cfacd47805a3214a43662f5b28c
|
[
"BSD-2-Clause"
] | 22
|
2018-11-06T13:01:28.000Z
|
2022-03-09T08:52:27.000Z
|
import math
import numpy as np
from scipy import signal
#%% 全自动的数据生成
M = 16; N = 100;
AddImm = 1000;
MAT_M = 3; MAT_N = 5; MAT_P = 7;
Pm = 5; Pn = 5;
Km = 5; Kn = 5;
MODE = 1;
fpp = open("./npu_verification_para.list", "w")
fpp.write("%d\n"%(M))
fpp.write("%d\n"%(N))
fpp.write("%d\n"%(AddImm))
fpp.write("%d\n"%(MAT_M))
fpp.write("%d\n"%(MAT_N))
fpp.write("%d\n"%(MAT_P))
fpp.write("%d\n"%(Km))
fpp.write("%d\n"%(Kn))
fpp.write("%d\n"%(Pm))
fpp.write("%d\n"%(Pn))
fpp.write("%d\n"%(MODE))
fpp.close()
#%%
fpd = open("./source_sram_dq.list", "w")
#%% 首先是原始的图像数据
Dollar1 = np.random.randint(-50,-1, size=(M,N))*2**16
fpd.write("@%X\n"%(2*0x010000))
for i in range(0, M):
for j in range(0, N):
tmp_v = int(Dollar1[i, j])
if tmp_v<0:
tmp_v = tmp_v + 0x100000000
fpd.write("%04X\n"%((tmp_v >> 0)&0xFFFF))
fpd.write("%04X\n"%((tmp_v >> 16)&0xFFFF))
#%% 另一张图像的数据
Dollar22 = np.random.randint(-15,-10, size=(M,N))*2**16
fpd.write("@%X\n"%(2*0x020000))
for i in range(0, M):
for j in range(0, N):
tmp_v = int(Dollar22[i, j])
if tmp_v<0:
tmp_v = tmp_v + 0x100000000
fpd.write("%04X\n"%((tmp_v >> 0)&0xFFFF))
fpd.write("%04X\n"%((tmp_v >> 16)&0xFFFF))
#%% 然后是图像的加减乘
fp_add = open("./fp_add_test.txt", "w")
fp_addi = open("./fp_addi_test.txt", "w")
fp_sub = open("./fp_sub_test.txt", "w")
fp_dot = open("./fp_dot_test.txt", "w")
# 输出到文本
for i in range(0, len(Dollar1)):
for j in range(0, len(Dollar1[0])):
add_value = int((Dollar1[i, j]+Dollar22[i, j]))
addi_value = int((Dollar1[i, j]+AddImm))
sub_value = int((Dollar1[i, j]-Dollar22[i, j]))
dot_value = int((Dollar1[i, j]/2**16*Dollar22[i, j]))
fp_add.write("%d\n"%(add_value))
fp_sub.write("%d\n"%(sub_value))
fp_dot.write("%d\n"%(dot_value))
fp_addi.write("%d\n"%(addi_value))
fp_add.close()
fp_addi.close()
fp_sub.close()
fp_dot.close()
#%% 矩阵转置变换
fp_tran = open("./fp_tran_test.txt", "w")
# 输出到文本
for j in range(0, len(Dollar1[0])):
for i in range(0, len(Dollar1)):
tran_value = int((Dollar1[i, j]))
fp_tran.write("%d\n"%(tran_value))
fp_tran.close()
#%% 卷机运算卷积核
kernel = np.random.randint(-15,10, size=(Km,Kn))*2**16
fpd.write("@%X\n"%(2*0x030000))
for i in range(0, len(kernel)):
for j in range(0, len(kernel[0])):
tmp_v = int(kernel[i, j])
if tmp_v<0:
tmp_v = tmp_v + 0x100000000
fpd.write("%04X\n"%((tmp_v >> 0)&0xFFFF))
fpd.write("%04X\n"%((tmp_v >> 16)&0xFFFF))
d1 = Dollar1
d2 = kernel
d1x = d1/2**16;
d2x = d2/2**16;
dcx = (signal.convolve2d(d1x, d2x, 'valid') * 2**16).astype(np.int)
# 输出到文本
fp_conv = open("./fp_conv_test.txt", "w")
for i in range(0, len(dcx)):
for j in range(0, len(dcx[0])):
conv_value = int(dcx[i, j])
fp_conv.write("%d\n"%(conv_value))
fp_conv.close()
#%% 然后是计算pooling
fp_pool = open("./fp_pool_test.txt", "w")
dpx = np.zeros((M//Pm, N//Pn))
for i in range(0, M//Pm):
for j in range(0, N//Pn):
if MODE==0:
dpx[i, j] = np.mean(d1x[Pm*i:Pm*i+Pm, Pn*j:Pn*j+Pn])
elif MODE==1:
dpx[i, j] = np.max(d1x[Pm*i:Pm*i+Pm, Pn*j:Pn*j+Pn])
pool_value = int(2**16*dpx[i, j])
fp_pool.write("%d\n"%(pool_value))
fp_pool.close()
#%% 然后是要验证MULT矩阵乘法指令
mat1 = np.random.randint(-1,2, size=(MAT_M,MAT_N))
mat2 = np.random.randint(-2,-1, size=(MAT_N,MAT_P))
mat1_216 = 2**16*mat1
mat2_216 = 2**16*mat2
mat3 = np.dot(mat1, mat2)
fpd.write("@%X\n"%(2*0x040000))
# 矩阵乘法的源数据
for i in range(0, len(mat1)):
for j in range(0, len(mat1[0])):
mult_value = int(2**16*mat1[i, j])
fpd.write("%04X\n"%((mult_value >> 0)&0xFFFF))
fpd.write("%04X\n"%((mult_value >> 16)&0xFFFF))
fpd.write("@%X\n"%(2*0x050000))
for i in range(0, len(mat2)):
for j in range(0, len(mat2[0])):
mult_value = int(2**16*mat2[i, j])
fpd.write("%04X\n"%((mult_value >> 0)&0xFFFF))
fpd.write("%04X\n"%((mult_value >> 16)&0xFFFF))
# 输出到文本
fp_mult = open("./fp_mult_test.txt", "w")
for i in range(0, len(mat3)):
for j in range(0, len(mat3[0])):
mult_value = int(2**16*mat3[i, j])
fp_mult.write("%d\n"%(mult_value))
fp_mult.close()
#%%
######################
fp_tanh = open("./fp_tanh_test.txt", "w")
Dollar2 = np.random.randn(M,N)*2**16
fpd.write("@%X\n"%(2*0x060000))
for i in range(0, M):
for j in range(0, N):
tmp_v = int(Dollar2[i, j])
if tmp_v<0:
tmp_v = tmp_v + 0x100000000
fpd.write("%04X\n"%((tmp_v >> 0)&0xFFFF))
fpd.write("%04X\n"%((tmp_v >> 16)&0xFFFF))
tanh_value = int(2**16*math.tanh(Dollar2[i, j]/(2**16)))
fp_tanh.write("%d\n"%(tanh_value))
fp_tanh.close()
#%% 矩阵±标量的运算
fp_adds = open("./fp_adds_test.txt", "w")
Dollar2_ini = Dollar2[0, 0]
# 输出到文本
for i in range(0, len(Dollar1)):
for j in range(0, len(Dollar1[0])):
adds_value = int((Dollar1[i, j] + Dollar2_ini))
fp_adds.write("%d\n"%(adds_value))
fp_adds.close()
#%% RGB565转灰度图函数变换
fp_gray = open("./fp_gray_test.txt", "w")
fpd.write("@%X\n"%(2*0x070000))
red = np.random.randint(0,2**5, size=(M,N))
green = np.random.randint(0,2**6, size=(M,N))
blue = np.random.randint(0,2**5, size=(M,N))
rgb565 = red*2**11 + green*2**5 + blue
# 输出到文本
for i in range(0, len(rgb565)):
for j in range(0, len(rgb565[0])):
r = ((rgb565[i][j]>>11) & 0x1F) *8
g = ((rgb565[i][j]>>5) & 0x3F) *4
b = ((rgb565[i][j]>>0) & 0x1F) *8
gray_value = int((r*66 + g*129 + b*25)/256) + 16
if gray_value<16:
gray_value = 16
elif gray_value>235:
gray_value = 235
# 吸入文件中
fpd.write("%04X\n"%((rgb565[i][j] >> 0)&0xFFFF))
fpd.write("%04X\n"%((rgb565[i][j] >> 16)&0xFFFF))
fp_gray.write("%d\n"%(gray_value))
fp_gray.close()
#%% 关闭所有文件
fpd.close()
| 25.67757
| 67
| 0.601274
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,106
| 0.192214
|
225438e5c2b8551e69ccb321df71b6704ae2b4d5
| 17
|
py
|
Python
|
2.py
|
modianor/git_project
|
21d664bfa31d6f3e584ffc594514ea4342b6bc3f
|
[
"MIT"
] | null | null | null |
2.py
|
modianor/git_project
|
21d664bfa31d6f3e584ffc594514ea4342b6bc3f
|
[
"MIT"
] | null | null | null |
2.py
|
modianor/git_project
|
21d664bfa31d6f3e584ffc594514ea4342b6bc3f
|
[
"MIT"
] | null | null | null |
A = 1
B = 2
C = 4
| 5.666667
| 5
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
2255fb2ff207d881f927e1b321a4dc62c8ca610a
| 17
|
py
|
Python
|
src/ixu/commands/server/__init__.py
|
luanguimaraesla/ixu
|
f213bdf27fc7336a76110cf3f89e30ae1d5a64fb
|
[
"Apache-2.0"
] | 2
|
2021-05-14T17:14:09.000Z
|
2021-06-13T21:35:04.000Z
|
src/ixu/commands/server/__init__.py
|
luanguimaraesla/ixu
|
f213bdf27fc7336a76110cf3f89e30ae1d5a64fb
|
[
"Apache-2.0"
] | null | null | null |
src/ixu/commands/server/__init__.py
|
luanguimaraesla/ixu
|
f213bdf27fc7336a76110cf3f89e30ae1d5a64fb
|
[
"Apache-2.0"
] | null | null | null |
from . import up
| 8.5
| 16
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
225632b6786bc6319f8c49ffcd364075da52275b
| 247
|
py
|
Python
|
pythoncev/exercicios/ex096.py
|
gustavobelloni/Python
|
156a99d10f460f0fcaea18a87d35d4b0e3dba493
|
[
"MIT"
] | null | null | null |
pythoncev/exercicios/ex096.py
|
gustavobelloni/Python
|
156a99d10f460f0fcaea18a87d35d4b0e3dba493
|
[
"MIT"
] | null | null | null |
pythoncev/exercicios/ex096.py
|
gustavobelloni/Python
|
156a99d10f460f0fcaea18a87d35d4b0e3dba493
|
[
"MIT"
] | null | null | null |
def área(larg, comp):
a = larg * comp
print(f'A área de um terreno {larg}x{comp} é de {a}m²')
print('Controle de Terrenos')
print('--------------------')
l = float(input('Largura (m): '))
c = float(input('Comprimento (m): '))
área(l, c)
| 22.454545
| 59
| 0.554656
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 129
| 0.511905
|
22567858f90706f357ad018d732e41ca1cb74961
| 2,678
|
py
|
Python
|
readthedocs/proxito/views/decorators.py
|
tkoyama010/readthedocs.org
|
aac8fb39586db902d9fbb51b639dd281c819dae2
|
[
"MIT"
] | 2
|
2021-05-16T06:57:15.000Z
|
2021-05-16T06:57:18.000Z
|
readthedocs/proxito/views/decorators.py
|
tkoyama010/readthedocs.org
|
aac8fb39586db902d9fbb51b639dd281c819dae2
|
[
"MIT"
] | 12
|
2021-03-21T13:16:50.000Z
|
2022-03-12T00:55:05.000Z
|
readthedocs/proxito/views/decorators.py
|
mondeja/readthedocs.org
|
fb01c6d9d78272e3f4fd146697e8760c04e4fbb6
|
[
"MIT"
] | 1
|
2021-07-09T14:02:39.000Z
|
2021-07-09T14:02:39.000Z
|
import logging
from functools import wraps
from django.http import Http404
from readthedocs.projects.models import Project, ProjectRelationship
log = logging.getLogger(__name__) # noqa
def map_subproject_slug(view_func):
"""
A decorator that maps a ``subproject_slug`` URL param into a Project.
:raises: Http404 if the Project doesn't exist
.. warning:: Does not take into account any kind of privacy settings.
"""
@wraps(view_func)
def inner_view( # noqa
request, subproject=None, subproject_slug=None, *args, **kwargs
):
if subproject is None and subproject_slug:
# Try to fetch by subproject alias first, otherwise we might end up
# redirected to an unrelated project.
# Depends on a project passed into kwargs
rel = ProjectRelationship.objects.filter(
parent=kwargs['project'],
alias=subproject_slug,
).first()
if rel:
subproject = rel.child
else:
rel = ProjectRelationship.objects.filter(
parent=kwargs['project'],
child__slug=subproject_slug,
).first()
if rel:
subproject = rel.child
else:
log.warning(
'The slug is not subproject of project. subproject_slug=%s project_slug=%s',
subproject_slug, kwargs['project'].slug
)
raise Http404('Invalid subproject slug')
return view_func(request, subproject=subproject, *args, **kwargs)
return inner_view
def map_project_slug(view_func):
"""
A decorator that maps a ``project_slug`` URL param into a Project.
:raises: Http404 if the Project doesn't exist
.. warning:: Does not take into account any kind of privacy settings.
"""
@wraps(view_func)
def inner_view( # noqa
request, project=None, project_slug=None, *args, **kwargs
):
if project is None:
# Get a slug from the request if it can't be found in the URL
if not project_slug:
project_slug = request.host_project_slug
log.debug(
'Inserting project slug from request slug=[%s]',
project_slug
)
try:
project = Project.objects.get(slug=project_slug)
except Project.DoesNotExist:
raise Http404('Project does not exist.')
return view_func(request, project=project, *args, **kwargs)
return inner_view
| 33.475
| 100
| 0.581404
| 0
| 0
| 0
| 0
| 1,929
| 0.720314
| 0
| 0
| 842
| 0.314414
|
2256a0c4684d099dc813eeba74a7fcc9133e772a
| 4,217
|
py
|
Python
|
tortue_geniale/tg_channel_events.py
|
vavalm/discord_bot_tortue_geniale
|
2fa2865166dd109b1138b77ed7f21d8e59efd8ab
|
[
"MIT"
] | null | null | null |
tortue_geniale/tg_channel_events.py
|
vavalm/discord_bot_tortue_geniale
|
2fa2865166dd109b1138b77ed7f21d8e59efd8ab
|
[
"MIT"
] | null | null | null |
tortue_geniale/tg_channel_events.py
|
vavalm/discord_bot_tortue_geniale
|
2fa2865166dd109b1138b77ed7f21d8e59efd8ab
|
[
"MIT"
] | null | null | null |
import discord
import asyncio
import re
import logging
from data.groups_name import free_random_name
logging.basicConfig(level=logging.INFO)
client = discord.Client()
class ClientEvents(discord.Client):
'''
Classe initialization
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# create the background task and run it in the background
self.bg_task = self.loop.create_task(self.my_background_task())
async def my_background_task(self):
await self.wait_until_ready()
counter = 0
channel = self.get_channel(1234567) # channel ID goes here
while not self.is_closed():
counter += 1
await channel.send(counter)
await asyncio.sleep(60) # task runs every 60 seconds
async def on_ready(self):
print('Logged in as')
print(self.user.name)
print(self.user.id)
print('------')
''' ############### EVENTS ABOUT CHANNELS AND SERVERS MANAGEMENT ###############'''
async def on_member_join(self, member):
guild = member.guild
if guild.system_channel is not None:
to_send = 'Welcome {0.mention} to {1.name}!'.format(member, guild)
await guild.system_channel.send(to_send)
'''
Permet la création et la suppression automatique de channels
'''
async def on_voice_state_update(self, member: discord.Member, before: discord.VoiceState,
after: discord.VoiceState):
await self.wait_until_ready()
after_channel: discord.VoiceChannel = after.channel
before_channel: discord.VoiceChannel = before.channel
# We enter in a channel
if type(after_channel) is discord.VoiceChannel:
category: discord.CategoryChannel = after_channel.category
guild: discord.guild = member.guild
if "Escouade".lower() in str(category.name).lower() and (
"Créer channel").lower() == after_channel.name.lower():
team_size = re.findall(r'\d+', category.name)
if len(team_size) == 0:
return
else:
team_size = int(re.findall(r'\d+', category.name)[0])
print("Création nouveau Channel")
new_name = free_random_name(team_size, guild)
new_channel: discord.VoiceChannel = await guild.create_voice_channel(
new_name,
category=category,
user_limit=int(team_size))
await member.move_to(new_channel)
# If we quit a channel and no one else is in, deletion of the channel
if type(before_channel) is discord.VoiceChannel \
and ("Créer channel").lower() != before_channel.name.lower():
if len(before_channel.members) == 0:
await before_channel.delete(reason="Channel empty")
''' ############### EVENTS ABOUT REPLIES ON MESSAGE ###############'''
@client.event
async def on_message(self, message):
# we do not want the bot to reply to itself
if message.author.id == self.user.id:
return
if message.content.startswith('!hello'):
await message.channel.send('Hello {0.author.mention} sur le serveur {1.guild}'.format(message, message))
class CommandsClient(discord.Client):
async def on_ready(self):
print('Logged in as')
print(self.user.name)
print(self.user.id)
print('------')
async def on_member_join(self, member):
guild = member.guild
if guild.system_channel is not None:
to_send = 'Welcome {0.mention} to {1.name}!'.format(member, guild)
await guild.system_channel.send(to_send)
async def on_message(message):
if message.content.startswith('$greet'):
channel = message.channel
await channel.send('Say hello!')
def check(m):
return m.content == 'hello' and m.channel == channel
msg = await client.wait_for('message', check=check)
await channel.send('Hello {.author}!'.format(msg))
| 35.436975
| 116
| 0.596396
| 4,047
| 0.958778
| 0
| 0
| 338
| 0.080076
| 3,393
| 0.803838
| 834
| 0.197584
|
22574ae53ea97f421f17d51078183bbb695cb068
| 566
|
py
|
Python
|
middleware/login_required.py
|
ahmetelgun/flask-boilerplate
|
56bc0235b5f00a7aaca6a9536a1744442863b8b6
|
[
"Apache-2.0"
] | 2
|
2021-12-01T20:48:44.000Z
|
2022-02-04T16:33:33.000Z
|
middleware/login_required.py
|
ahmetelgun/flask_authentication_boilerplate
|
56bc0235b5f00a7aaca6a9536a1744442863b8b6
|
[
"Apache-2.0"
] | null | null | null |
middleware/login_required.py
|
ahmetelgun/flask_authentication_boilerplate
|
56bc0235b5f00a7aaca6a9536a1744442863b8b6
|
[
"Apache-2.0"
] | null | null | null |
from flask import request, make_response, jsonify, g
from datetime import datetime
from functools import wraps
import jwt
from models import DBContext, User
from settings import SECRET_KEY
from service import is_token_valid
def login_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
not_authenticated_response = make_response(
jsonify({"message": "Login required"}),
401
)
if g.user:
return func(g.user, *args, **kwargs)
return not_authenticated_response
return wrapper
| 22.64
| 52
| 0.681979
| 0
| 0
| 0
| 0
| 288
| 0.508834
| 0
| 0
| 25
| 0.04417
|
22574e100d92910a2acaa1fb84cd7d78b47e8242
| 2,189
|
py
|
Python
|
libs/sqlservice/utils.py
|
smartadvising/smartadvising-api
|
74cfcc336c87523fcb011a96bc4506ecdef93afe
|
[
"MIT"
] | null | null | null |
libs/sqlservice/utils.py
|
smartadvising/smartadvising-api
|
74cfcc336c87523fcb011a96bc4506ecdef93afe
|
[
"MIT"
] | null | null | null |
libs/sqlservice/utils.py
|
smartadvising/smartadvising-api
|
74cfcc336c87523fcb011a96bc4506ecdef93afe
|
[
"MIT"
] | null | null | null |
"""
Utilities
---------
The utilities module.
"""
from collections.abc import Mapping, Sequence
from functools import wraps
import types
class FrozenDict(Mapping):
"""A frozen dictionary implementation that prevents the object from being
mutated. This is primarily used when defining a dict-like object as a class
attribute that shouldn't be mutated by subclasses.
"""
def __init__(self, *args, **kwargs):
self._dict = dict(*args, **kwargs)
def copy(self):
return self._dict.copy()
def __getitem__(self, key):
return self._dict.__getitem__(key)
def __contains__(self, item):
return self._dict.__contains__(item)
def __iter__(self):
return self._dict.__iter__()
def __len__(self):
return self._dict.__len__()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self._dict)
def classonce(meth):
"""Decorator that executes a class method once, stores the results at the
class level, and subsequently returns those results for every future method
call.
"""
@wraps(meth)
def decorated(cls, *args, **kargs):
cached_attr = '__{0}'.format(meth.__name__)
if not hasattr(cls, cached_attr):
result = meth(cls, *args, **kargs)
setattr(cls, cached_attr, result)
return getattr(cls, cached_attr)
return decorated
def flatten(seq):
"""Flatten `seq` a single level deep."""
for item in seq:
if is_sequence(item):
for itm in item:
yield itm
else:
yield item
def is_sequence(value):
"""Test if `value` is a sequence but ``str``. This function is mainly used
to determine if `value` can be treated like a ``list`` for iteration
purposes.
"""
return (is_generator(value) or
(isinstance(value, Sequence) and not isinstance(value, str)))
def is_generator(value):
"""Return whether `value` is a generator or generator-like."""
return (isinstance(value, types.GeneratorType) or
(hasattr(value, '__iter__') and hasattr(value, '__next__') and
not hasattr(value, '__getitem__')))
| 27.708861
| 79
| 0.640018
| 753
| 0.343993
| 205
| 0.09365
| 280
| 0.127912
| 0
| 0
| 757
| 0.34582
|
2257918164befc9b4532377573ee36b973df3e73
| 1,817
|
py
|
Python
|
tests/test_normals.py
|
foobarbecue/trimesh
|
db2c649ebc577874702644248964b3295bd6ed5b
|
[
"MIT"
] | null | null | null |
tests/test_normals.py
|
foobarbecue/trimesh
|
db2c649ebc577874702644248964b3295bd6ed5b
|
[
"MIT"
] | null | null | null |
tests/test_normals.py
|
foobarbecue/trimesh
|
db2c649ebc577874702644248964b3295bd6ed5b
|
[
"MIT"
] | 1
|
2019-05-31T03:37:21.000Z
|
2019-05-31T03:37:21.000Z
|
try:
from . import generic as g
except BaseException:
import generic as g
class NormalsTest(g.unittest.TestCase):
def test_vertex_normal(self):
mesh = g.trimesh.creation.icosahedron()
# the icosahedron is centered at zero, so the true vertex
# normal is just a unit vector of the vertex position
truth = g.trimesh.util.unitize(mesh.vertices)
# force fallback to loop normal summing by passing None as the sparse
# matrix
normals = g.trimesh.geometry.mean_vertex_normals(len(mesh.vertices),
mesh.faces,
mesh.face_normals,
sparse=None)
self.assertTrue(g.np.allclose(normals - truth, 0.0))
# make sure the automatic sparse matrix generation works as well
normals = g.trimesh.geometry.mean_vertex_normals(len(mesh.vertices),
mesh.faces,
mesh.face_normals)
self.assertTrue(g.np.allclose(normals - truth, 0.0))
# make sure the Trimesh normals- related attributes are wired correctly
self.assertTrue(mesh.faces_sparse is not None)
self.assertTrue(mesh.vertex_normals.shape == mesh.vertices.shape)
self.assertTrue(g.np.allclose(mesh.vertex_normals - truth, 0.0))
def test_face_normals(self):
mesh = g.trimesh.creation.icosahedron()
self.assertTrue(mesh.face_normals.shape == mesh.faces.shape)
mesh.face_normals = None
self.assertTrue(mesh.face_normals.shape == mesh.faces.shape)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
| 41.295455
| 79
| 0.587782
| 1,646
| 0.905889
| 0
| 0
| 0
| 0
| 0
| 0
| 332
| 0.182719
|
2257acdf6a3fd8c165f7da9ebcd60fa9bce30be5
| 14,692
|
py
|
Python
|
flagmaker/settings.py
|
google/sa360-bigquery-bootstrapper
|
ca25e9d9f4d00f392970e5e942942f9acb21952b
|
[
"Apache-2.0"
] | 4
|
2020-03-14T03:26:46.000Z
|
2020-12-13T14:43:22.000Z
|
flagmaker/settings.py
|
google/sa360-bigquery-bootstrapper
|
ca25e9d9f4d00f392970e5e942942f9acb21952b
|
[
"Apache-2.0"
] | 1
|
2020-11-17T16:26:50.000Z
|
2020-11-17T16:26:50.000Z
|
flagmaker/settings.py
|
google/sa360-bigquery-bootstrapper
|
ca25e9d9f4d00f392970e5e942942f9acb21952b
|
[
"Apache-2.0"
] | 1
|
2020-10-13T17:02:23.000Z
|
2020-10-13T17:02:23.000Z
|
# /***********************************************************************
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Note that these code samples being shared are not official Google
# products and are not formally supported.
# ************************************************************************/
import os
from typing import Union
import yaml
from yaml.parser import ParserError
from collections.abc import Iterable
from enum import EnumMeta
from typing import ClassVar
from typing import Dict
from typing import Generic
from typing import List
from typing import TypeVar
from absl import flags
from prompt_toolkit import ANSI
from prompt_toolkit import prompt
from prompt_toolkit.shortcuts import CompleteStyle
from termcolor import colored
from termcolor import cprint
from flagmaker.building_blocks import list_to_string_list
from flagmaker.exceptions import FlagMakerPromptInterruption
from flagmaker.validators import ChoiceValidator
from .building_blocks import SettingOptionInterface
from .building_blocks import SettingsInterface
from .building_blocks import Value
from .exceptions import FlagMakerConfigurationError
from .exceptions import FlagMakerInputError
from .hints import StringKeyDict
from .sanity import Validator
FLAGS = flags.FLAGS
T = TypeVar('T', bound=SettingsInterface)
class SettingConfig(object):
cache_file: str = '{}/.sa360bq'.format(os.environ['HOME'])
cache_dict: dict = {}
class SettingOption(SettingOptionInterface, Generic[T]):
settings: T = None
default = None
cache = None
help = None
method: callable = None
_value: Value = None
required: bool = False
validation: callable = None
conditional: callable = None
after: callable = None
prompt: Union[callable, str]
custom_data: StringKeyDict
include_in_interactive: bool = True
called: dict
_options: EnumMeta = None
_error: bool = False
attrs: dict
def __init__(self):
self._value = Value()
self.called = {}
@classmethod
def create(cls, settings: T, helptext=None, default=None,
method=flags.DEFINE_string, required=True, validation=None,
conditional=None, after=None, prompt=None,
include_in_interactive=True, options=None, attrs=None):
if options is None:
options = []
fl = cls()
fl.settings = settings
fl.default = default
fl.help = helptext
fl.method = method
fl.required = required
fl.validation = validation
fl.conditional = conditional
fl.after = after
fl.prompt = prompt
fl.include_in_interactive = include_in_interactive
fl._options = options
fl.attrs = attrs or {}
return fl
@property
def options(self):
return (list(map(lambda x: x.value, self._options))
if self._options is not None else None)
def get_prompt(self, k):
d = self.get_default_or_cache()
default = ' [default={0}]'.format(
d if not isinstance(d, bool) else int(d)
) if d is not None else ''
prompt_val = ''
if self.prompt is not None:
prompt_val += '\n'
if self.prompt is str:
prompt_val += self.prompt
if callable(self.prompt):
prompt_val += self.prompt(self)
prompt_val += '\nInput'
if self.method != flags.DEFINE_enum:
method = self.get_basic_prompt
else:
method = self.get_option_prompt
return method(k, default, prompt_val)
def get_option_prompt(self, k, default, prompt_val):
if not isinstance(self._options, EnumMeta):
raise FlagMakerConfigurationError('Need to add options for ' + k)
options = list_to_string_list(self.options)
return (
'{0}\n'
'{1}\n'
'{2}\n'
'Choices{3}: '
).format(
k,
colored('Options:', attrs=['underline']),
options,
default, prompt_val
)
def get_basic_prompt(self, k, default, prompt_val):
return '{}{}{}'.format(k, default, prompt_val)
@property
def value(self):
return self._value.get_val()
@value.setter
def value(self, value):
while True:
try:
self._set_value(value)
break
except FlagMakerConfigurationError as err:
cprint(str(err), 'red')
def _set_value(self, value):
if value is None:
self._value.set_val(None)
return
if self.method == flags.DEFINE_boolean:
if value in ['1', 'true', 'True', True]:
value = True
elif value in ['0', 'false', 'False', False]:
value = False
elif self.method == flags.DEFINE_integer:
value = int(value)
elif self.method == flags.DEFINE_enum:
options = self.options
is_iterable = isinstance(options, Iterable)
if not (is_iterable and value in options):
raise FlagMakerInputError(
'Need to choose one of [{}]'.format(', '.join(options))
)
self._value.set_val(value)
# perform actions
if self.after is None:
self._error = False
return
in_called = (self, self.after) not in self.called
if in_called:
self.called[(self, self.after)] = True
self.after(self)
def get_default_or_cache(self) -> str:
if self.cache is not None:
default_or_cache = self.cache
else:
default_or_cache = self.default
return default_or_cache
def set_value(self, key: str = '', value: str = '',
ask: str = '', init: str = ''):
while True:
num_opts = int(value != '') + int(ask != '') + int(init != '')
if num_opts != 1:
raise FlagMakerInputError('Need to choose either '
'init, value or ask')
default_or_cache = self.get_default_or_cache()
if init is None:
return
elif init != '':
self.value = init
return
if ask != '':
if ask is None:
# we intentionally set ask to None. A conditional prompt
# doesn't want this to continue
return
kwargs = {
'bottom_toolbar': ANSI(self.help)
}
if self.method == flags.DEFINE_enum:
choices = [str(i[0])
for i in
enumerate(self.options, start=1)]
kwargs['validator'] = ChoiceValidator(choices)
kwargs['complete_style'] = CompleteStyle.READLINE_LIKE
selection = prompt(ANSI(ask), **kwargs)
if selection == '':
val = default_or_cache
else:
val = self.options[int(selection)-1]
elif self.method == flags.DEFINE_multi_string:
val = []
i = 0
while True:
i += 1
res = prompt(ANSI(
"{} #{} (Empty Value to finish): ".format(ask, i)
), **kwargs)
if res == '':
break
val.append(res)
else:
val = prompt(ANSI(ask + ": "), **kwargs)
if val == '' and default_or_cache is not None:
self.value = default_or_cache
else:
self.value = val
if self.value is not None:
SettingConfig.cache_dict[key] = self.value
else:
self.value = value
if not Validator.validate(self) or self._error:
continue
if self.value_explicitly_set() or not self.required:
return
else:
cprint('Required Field', 'red')
def value_explicitly_set(self) -> bool:
return bool(self._value)
def maybe_needs_input(self):
return not self.value_explicitly_set() and (
self.conditional is None or self.conditional(self.settings))
def get_method(self):
return self.method
def __str__(self):
return self.value or ''
def __repr__(self):
return '[{0}: {1}]'.format(
self.default,
str(self.value) if self.value else '',
)
def __bool__(self):
return bool(self.value)
def __index__(self):
return self.value
def __getitem__(self, item) -> SettingOptionInterface:
return self.value.__getitem__(item)
class SettingBlock:
def __init__(self, block: str,
settings: Dict[str, SettingOption],
conditional: callable = None):
self.name = block
self.settings = settings
self.conditional = conditional
def get(self):
cprint('{}'.format(self.name), None, attrs=['bold'])
cprint('==========================', attrs=['bold'])
class AbstractSettings(SettingsInterface):
"""Settings Base Class
Loaded from the Config class. Used to generate flags for an app.
"""
args: List[SettingBlock] = None
flattened_args: dict = {}
def start(self):
"""Bootstraps the settings loading process
Called from load_settings. Should not be called directly.
"""
for block in self.args:
for k, s in block.settings.items():
s.set_value(init=FLAGS.get_flag_value(k, None))
self.flattened_args[k] = s
def load_settings(self):
self.start()
first = True
interactive_mode = self.args[0].settings.pop('interactive')
cache: dict = {}
if os.path.exists(SettingConfig.cache_file):
try:
with open(SettingConfig.cache_file, 'r') as fh:
cache = yaml.load(
fh.read(), Loader=yaml.Loader
) or {}
except ParserError:
cache = {}
os.remove(SettingConfig.cache_file)
for block in self.args:
header_shown = False
if block.conditional is not None and not block.conditional(self):
continue
for k, setting in block.settings.items():
setting.cache = cache[k] if k in cache else None
if setting.maybe_needs_input():
if not interactive_mode and setting.default:
setting.set_value(k, init=setting.default)
continue
if first:
cprint('Interactive Setup', attrs=['bold', 'underline'])
cprint(
'===============================',
attrs=['bold'],
)
first = False
if not header_shown:
cprint(block.name, attrs=['underline'])
header_shown = True
if setting.include_in_interactive:
try:
setting.set_value(k, ask=setting.get_prompt(k))
except FlagMakerPromptInterruption as err:
setting.set_value(k, value=err.value)
with open(SettingConfig.cache_file, 'w+') as fh:
fh.write(yaml.dump(
SettingConfig.cache_dict, Dumper=yaml.Dumper
))
return self
def assign_flags(self) -> flags:
for block in self.args:
for k, setting in block.settings.items():
kwargs = {
'default': None,
}
if not setting.include_in_interactive:
kwargs['default'] = setting.default
if setting.method == flags.DEFINE_enum:
kwargs['enum_values'] = setting.options
setting.method(k, help=setting.help, **kwargs)
self.flattened_args[k] = setting
return FLAGS
def __getitem__(self, item):
return self.flattened_args[item]
def get_settings(self):
# system settings
settings = [SettingBlock('System Settings', {
'interactive': SettingOption.create(
self,
'Enter Interactive Mode even to verify default values',
default=False,
include_in_interactive=False,
method=flags.DEFINE_bool,
),
})]
settings += self.settings()
return settings
def install(self):
self.args = self.get_settings()
self.assign_flags()
def __repr__(self):
return str(self.args)
def __enter__(self):
return self
def __exit__(self, err, value, traceback):
with open(SettingConfig.cache_file, 'a+') as fh:
fh.write(yaml.dump(
SettingConfig.cache_dict, Dumper=yaml.Dumper
))
AbstractSettingsClass = ClassVar[T]
class Config(Generic[T]):
"""The entry point for setting the Settings class for an app.
Example: Config(MySettingsClass)
This will bootstrap the settings class correctly.
"""
def __init__(self, s: ClassVar[T]):
self.s: ClassVar[T] = s
self.instance = s()
self.instance.install()
def get(self) -> T:
if not FLAGS.is_parsed():
raise FlagMakerConfigurationError(
'Do not call this '
'method until after app.run()'
)
with self.instance as instance:
instance.load_settings()
return self.instance
| 33.239819
| 80
| 0.544854
| 12,799
| 0.871154
| 0
| 0
| 1,207
| 0.082154
| 0
| 0
| 2,039
| 0.138783
|
225800b91896c420ce99d82a83f56df5a8a804aa
| 19,390
|
py
|
Python
|
cli/sub.py
|
mylovage/GolemQT
|
7560d4e67d564022fbbdc00c069a51c673bfe267
|
[
"MIT"
] | null | null | null |
cli/sub.py
|
mylovage/GolemQT
|
7560d4e67d564022fbbdc00c069a51c673bfe267
|
[
"MIT"
] | null | null | null |
cli/sub.py
|
mylovage/GolemQT
|
7560d4e67d564022fbbdc00c069a51c673bfe267
|
[
"MIT"
] | null | null | null |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2020 azai/Rgveda/GolemQuant base on QUANTAXIS/yutiansut
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import json
import sys
import websocket
from datetime import datetime as dt, timezone, timedelta, date
import datetime
import time as timer
import numba as nb
import traceback
try:
import easyquotation
easyquotation_not_install = False
except:
easyquotation_not_install = True
try:
import QUANTAXIS as QA
from QUANTAXIS.QAUtil.QAParameter import ORDER_DIRECTION
from QUANTAXIS.QAUtil.QASql import QA_util_sql_mongo_sort_ASCENDING
from QUANTAXIS.QAUtil.QADate_trade import (
QA_util_if_tradetime,
QA_util_get_pre_trade_date,
QA_util_get_real_date,
trade_date_sse
)
from QUANTAXIS.QAData.QADataStruct import (
QA_DataStruct_Index_min,
QA_DataStruct_Index_day,
QA_DataStruct_Stock_day,
QA_DataStruct_Stock_min
)
from QUANTAXIS.QAIndicator.talib_numpy import *
from QUANTAXIS.QAUtil.QADate_Adv import (
QA_util_timestamp_to_str,
QA_util_datetime_to_Unix_timestamp,
QA_util_print_timestamp
)
from QUANTAXIS.QAUtil import (
DATABASE,
QASETTING,
QA_util_log_info,
QA_util_log_debug,
QA_util_log_expection,
QA_util_to_json_from_pandas
)
except:
print('PLEASE run "pip install QUANTAXIS" before call GolemQ.cli.sub modules')
pass
try:
from GolemQ.utils.parameter import (
AKA,
INDICATOR_FIELD as FLD,
TREND_STATUS as ST,
)
except:
class AKA():
"""
趋势状态常量,专有名称指标,定义成常量可以避免直接打字符串造成的拼写错误。
"""
# 蜡烛线指标
CODE = 'code'
NAME = 'name'
OPEN = 'open'
HIGH = 'high'
LOW = 'low'
CLOSE = 'close'
VOLUME = 'volume'
VOL = 'vol'
DATETIME = 'datetime'
LAST_CLOSE = 'last_close'
PRICE = 'price'
SYSTEM_NAME = 'GolemQuant'
def __setattr__(self, name, value):
raise Exception(u'Const Class can\'t allow to change property\' value.')
return super().__setattr__(name, value)
from GolemQ.utils.symbol import (
normalize_code
)
def formater_l1_tick(code: str, l1_tick: dict) -> dict:
"""
处理分发 Tick 数据,新浪和tdx l1 tick差异字段格式化处理
"""
if ((len(code) == 6) and code.startswith('00')):
l1_tick['code'] = normalize_code(code, l1_tick['now'])
else:
l1_tick['code'] = normalize_code(code)
l1_tick['servertime'] = l1_tick['time']
l1_tick['datetime'] = '{} {}'.format(l1_tick['date'], l1_tick['time'])
l1_tick['price'] = l1_tick['now']
l1_tick['vol'] = l1_tick['volume']
del l1_tick['date']
del l1_tick['time']
del l1_tick['now']
del l1_tick['name']
del l1_tick['volume']
# print(l1_tick)
return l1_tick
def formater_l1_ticks(l1_ticks: dict, codelist: list = None, stacks=None, symbol_list=None) -> dict:
"""
处理 l1 ticks 数据
"""
if (stacks is None):
l1_ticks_data = []
symbol_list = []
else:
l1_ticks_data = stacks
for code, l1_tick_values in l1_ticks.items():
# l1_tick = namedtuple('l1_tick', l1_ticks[code])
# formater_l1_tick_jit(code, l1_tick)
if (codelist is None) or \
(code in codelist):
l1_tick = formater_l1_tick(code, l1_tick_values)
if (l1_tick['code'] not in symbol_list):
l1_ticks_data.append(l1_tick)
symbol_list.append(l1_tick['code'])
return l1_ticks_data, symbol_list
@nb.jit(nopython=True)
def formater_l1_ticks_jit(l1_ticks: dict) -> dict:
"""
我是阿财,我专门挖坑,所以这个函数我未调试完成
处理分发 Tick 数据,新浪和tdx l1 tick差异字段格式化处理
因为l1 tick数据必须2秒内处理完毕,尝试优化可能性,Cython或者JIT
"""
l1_ticks_data = []
for code in l1_ticks:
l1_tick = namedtuple('l1_tick', l1_ticks[code])
# formater_l1_tick_jit(code, l1_tick)
# l1_tick = formater_l1_tick(code, l1_ticks[code])
# l1_data = pd.DataFrame(l1_tick, index=['datetime'])
# l1_data['code'] = code
# l1_data = l1_data.rename({'time':'servertime', 'now':'price'})
# l1_tick = namedtuple('l1_tick', l1_tick._fields+('code',))
l1_tick['code'] = code
l1_tick['servertime'] = l1_tick['time']
l1_tick['datetime'] = '{} {}'.format(l1_tick['date'], l1_tick['time'])
l1_tick['price'] = l1_tick['now']
l1_tick['vol'] = l1_tick['volume']
del l1_tick['date']
del l1_tick['time']
del l1_tick['now']
del l1_tick['name']
del l1_tick['volume']
# del l1_tick['name']
# print(l1_tick)
# return l1_tick
l1_ticks_data.append(l1_tick)
return l1_ticks_data
def sub_l1_from_sina():
"""
从新浪获取L1数据,3秒更新一次,建议mongodb数据库存放在企业级SSD上面
(我用Intel DC P3600 800GB SSD,锐龙 3600,每个tick 保存时间 < 0.6s)
"""
client = QASETTING.client['QAREALTIME']
if (easyquotation_not_install == True):
print(u'PLEASE run "pip install easyquotation" before call GolemQ.cli.sub modules')
return
def collections_of_today():
database = client.get_collection('realtime_{}'.format(datetime.date.today()))
database.create_index([('code', QA_util_sql_mongo_sort_ASCENDING)])
database.create_index([('datetime', QA_util_sql_mongo_sort_ASCENDING)])
database.create_index([("code",
QA_util_sql_mongo_sort_ASCENDING),
("datetime",
QA_util_sql_mongo_sort_ASCENDING)],
# unique=True,
)
return database
quotation = easyquotation.use('sina') # 新浪 ['sina'] 腾讯 ['tencent', 'qq']
sleep_time = 2.0
sleep = int(sleep_time)
_time1 = dt.now()
database = collections_of_today()
get_once = True
# 开盘/收盘时间
end_time = dt.strptime(str(dt.now().date()) + ' 16:30',
'%Y-%m-%d %H:%M')
start_time = dt.strptime(str(dt.now().date()) + ' 09:15',
'%Y-%m-%d %H:%M')
day_changed_time = dt.strptime(str(dt.now().date()) + ' 01:00',
'%Y-%m-%d %H:%M')
while (dt.now() < end_time):
# 开盘/收盘时间
end_time = dt.strptime(str(dt.now().date()) + ' 16:30',
'%Y-%m-%d %H:%M')
start_time = dt.strptime(str(dt.now().date()) + ' 09:15',
'%Y-%m-%d %H:%M')
day_changed_time = dt.strptime(str(dt.now().date()) + ' 01:00',
'%Y-%m-%d %H:%M')
_time = dt.now()
if QA_util_if_tradetime(_time) and \
(dt.now() < day_changed_time):
# 日期变更,写入表也会相应变更,这是为了防止用户永不退出一直执行
print(u'当前日期更新~! {} '.format(datetime.date.today()))
database = collections_of_today()
print(u'Not Trading time 现在是中国A股收盘时间 {}'.format(_time))
timer.sleep(sleep)
continue
symbol_list = []
l1_ticks_data = []
if QA_util_if_tradetime(_time) or \
(get_once): # 如果在交易时间
l1_ticks = quotation.market_snapshot(prefix=True)
l1_ticks_data, symbol_list = formater_l1_ticks(l1_ticks)
if (dt.now() < start_time) or \
((len(l1_ticks_data) > 0) and \
(dt.strptime(l1_ticks_data[-1]['datetime'],
'%Y-%m-%d %H:%M:%S') < dt.strptime(str(dt.now().date()) + ' 00:00',
'%Y-%m-%d %H:%M'))):
print(u'Not Trading time 现在是中国A股收盘时间 {}'.format(_time))
timer.sleep(sleep)
continue
# 获取第二遍,包含上证指数信息
l1_ticks = quotation.market_snapshot(prefix=False)
l1_ticks_data, symbol_list = formater_l1_ticks(l1_ticks,
stacks=l1_ticks_data,
symbol_list=symbol_list)
# 查询是否新 tick
query_id = {
"code": {
'$in': list(set([l1_tick['code'] for l1_tick in l1_ticks_data]))
},
"datetime": sorted(list(set([l1_tick['datetime'] for l1_tick in l1_ticks_data])))[-1]
}
# print(sorted(list(set([l1_tick['datetime'] for l1_tick in
# l1_ticks_data])))[-1])
refcount = database.count_documents(query_id)
if refcount > 0:
if (len(l1_ticks_data) > 1):
# 删掉重复数据
# print('Delete', refcount, list(set([l1_tick['datetime']
# for l1_tick in l1_ticks_data])))
database.delete_many(query_id)
database.insert_many(l1_ticks_data)
else:
# 持续更新模式,更新单条记录
database.replace_one(query_id, l1_ticks_data[0])
else:
# 新 tick,插入记录
# print('insert_many', refcount)
database.insert_many(l1_ticks_data)
if (get_once != True):
print(u'Trading time now 现在是中国A股交易时间 {}\nProcessing ticks data cost:{:.3f}s'.format(dt.now(),
(
dt.now() - _time).total_seconds()))
if ((dt.now() - _time).total_seconds() < sleep):
timer.sleep(sleep - (dt.now() - _time).total_seconds())
print('Program Last Time {:.3f}s'.format((dt.now() - _time1).total_seconds()))
get_once = False
else:
print(u'Not Trading time 现在是中国A股收盘时间 {}'.format(_time))
timer.sleep(sleep)
# 每天下午5点,代码就会执行到这里,如有必要,再次执行收盘行情下载,也就是 QUANTAXIS/save X
save_time = dt.strptime(str(dt.now().date()) + ' 17:00', '%Y-%m-%d %H:%M')
if (dt.now() > end_time) and \
(dt.now() < save_time):
# 收盘时间 下午16:00到17:00 更新收盘数据
# 我不建议整合,因为少数情况会出现 程序执行阻塞 block,
# 本进程被阻塞后无人干预第二天影响实盘行情接收。
pass
# While循环每天下午5点自动结束,在此等待13小时,大概早上六点结束程序自动重启
print(u'While循环每天下午5点自动结束,在此等待13小时,大概早上六点结束程序自动重启,这样只要窗口不关,永远每天自动收取 tick')
timer.sleep(40000)
def sub_codelist_l1_from_sina(codelist: list = None):
"""
从新浪获取L1数据,3秒更新一次,建议mongodb数据库存放在企业级SSD上面
(我用Intel DC P3600 800GB SSD,锐龙 3600,每个tick 保存时间 < 0.6s)
"""
def collections_of_today():
database = DATABASE.get_collection('realtime_{}'.format(datetime.date.today()))
database.create_index([('code', QA_util_sql_mongo_sort_ASCENDING)])
database.create_index([('datetime', QA_util_sql_mongo_sort_ASCENDING)])
database.create_index([("code",
QA_util_sql_mongo_sort_ASCENDING),
("datetime",
QA_util_sql_mongo_sort_ASCENDING)],
# unique=True,
)
return database
quotation = easyquotation.use('sina') # 新浪 ['sina'] 腾讯 ['tencent', 'qq']
sleep_time = 2.0
sleep = int(sleep_time)
_time1 = dt.now()
database = collections_of_today()
get_once = True
# 开盘/收盘时间
end_time = dt.strptime(str(dt.now().date()) + ' 16:30', '%Y-%m-%d %H:%M')
start_time = dt.strptime(str(dt.now().date()) + ' 09:15', '%Y-%m-%d %H:%M')
day_changed_time = dt.strptime(str(dt.now().date()) + ' 01:00',
'%Y-%m-%d %H:%M')
while (dt.now() < end_time):
# 开盘/收盘时间
end_time = dt.strptime(str(dt.now().date()) + ' 16:30', '%Y-%m-%d %H:%M')
start_time = dt.strptime(str(dt.now().date()) + ' 09:15', '%Y-%m-%d %H:%M')
day_changed_time = dt.strptime(str(dt.now().date()) + ' 01:00',
'%Y-%m-%d %H:%M')
_time = dt.now()
if QA_util_if_tradetime(_time) and \
(dt.now() < day_changed_time):
# 日期变更,写入表也会相应变更,这是为了防止用户永不退出一直执行
print(u'当前日期更新~! {} '.format(datetime.date.today()))
database = collections_of_today()
print(u'Not Trading time 现在是中国A股收盘时间 {}'.format(_time))
timer.sleep(sleep)
continue
if QA_util_if_tradetime(_time) or \
(get_once): # 如果在交易时间
l1_ticks = quotation.market_snapshot(prefix=True)
l1_ticks_data, symbol_list = formater_l1_ticks(l1_ticks, codelist=codelist)
if (dt.now() < start_time) or \
((len(l1_ticks_data) > 0) and \
(dt.strptime(l1_ticks_data[-1]['datetime'],
'%Y-%m-%d %H:%M:%S') < dt.strptime(str(dt.now().date()) + ' 00:00',
'%Y-%m-%d %H:%M'))):
print(u'Not Trading time 现在是中国A股收盘时间 {}'.format(_time))
timer.sleep(sleep)
continue
# 获取第二遍,包含上证指数信息
l1_ticks = quotation.market_snapshot(prefix=False)
l1_ticks_data, symbol_list = formater_l1_ticks(l1_ticks,
codelist=codelist,
stacks=l1_ticks_data,
symbol_list=symbol_list)
# 查询是否新 tick
query_id = {
"code": {
'$in': list(set([l1_tick['code'] for l1_tick in l1_ticks_data]))
},
"datetime": sorted(list(set([l1_tick['datetime'] for l1_tick in l1_ticks_data])))[-1]
}
# print(symbol_list, len(symbol_list))
refcount = database.count_documents(query_id)
if refcount > 0:
if (len(l1_ticks_data) > 1):
# 删掉重复数据
database.delete_many(query_id)
database.insert_many(l1_ticks_data)
else:
# 持续更新模式,更新单条记录
database.replace_one(query_id, l1_ticks_data[0])
else:
# 新 tick,插入记录
database.insert_many(l1_ticks_data)
if (get_once != True):
print(u'Trading time now 现在是中国A股交易时间 {}\nProcessing ticks data cost:{:.3f}s'.format(dt.now(),
(
dt.now() - _time).total_seconds()))
if ((dt.now() - _time).total_seconds() < sleep):
timer.sleep(sleep - (dt.now() - _time).total_seconds())
print('Program Last Time {:.3f}s'.format((dt.now() - _time1).total_seconds()))
get_once = False
else:
print(u'Not Trading time 现在是中国A股收盘时间 {}'.format(_time))
timer.sleep(sleep)
# 每天下午5点,代码就会执行到这里,如有必要,再次执行收盘行情下载,也就是 QUANTAXIS/save X
save_time = dt.strptime(str(dt.now().date()) + ' 17:00', '%Y-%m-%d %H:%M')
if (dt.now() > end_time) and \
(dt.now() < save_time):
# 收盘时间 下午16:00到17:00 更新收盘数据
# 我不建议整合,因为少数情况会出现 程序执行阻塞 block,
# 本进程被阻塞后无人干预第二天影响实盘行情接收。
# save_X_func()
pass
# While循环每天下午5点自动结束,在此等待13小时,大概早上六点结束程序自动重启
print(u'While循环每天下午5点自动结束,在此等待13小时,大概早上六点结束程序自动重启,这样只要窗口不关,永远每天自动收取 tick')
timer.sleep(40000)
def sub_1min_from_tencent_lru():
"""
我是阿财,我专门挖坑,所以这个函数我未调试完成
从腾讯获得当天交易日分钟K线数据
"""
blockname = ['MSCI中国', 'MSCI成份', 'MSCI概念', '三网融合',
'上证180', '上证380', '沪深300', '上证380',
'深证300', '上证50', '上证电信', '电信等权',
'上证100', '上证150', '沪深300', '中证100',
'中证500', '全指消费', '中小板指', '创业板指',
'综企指数', '1000可选', '国证食品', '深证可选',
'深证消费', '深成消费', '中证酒', '中证白酒',
'行业龙头', '白酒', '证券', '消费100',
'消费电子', '消费金融', '富时A50', '银行',
'中小银行', '证券', '军工', '白酒', '啤酒',
'医疗器械', '医疗器械服务', '医疗改革', '医药商业',
'医药电商', '中药', '消费100', '消费电子',
'消费金融', '黄金', '黄金概念', '4G5G',
'5G概念', '生态农业', '生物医药', '生物疫苗',
'机场航运', '数字货币', '文化传媒']
all_stock_blocks = QA.QA_fetch_stock_block_adv()
for blockname in blocks:
if (blockname in all_stock_blocks.block_name):
codelist_300 = all_stock_blocks.get_block(blockname).code
print(u'QA预定义板块“{}”成分数据'.format(blockname))
print(codelist_300)
else:
print(u'QA默认无板块“{}”成分数据'.format(blockname))
quotation = easyquotation.use("timekline")
data = quotation.real([codelist], prefix=False)
while (True):
l1_tick = quotation.market_snapshot(prefix=False)
print(l1_tick)
return True
if __name__ == '__main__':
# 从新浪财经获取tick数据,自动启停,
# 无限轮询的任何程序都可能会断线,我没有处理异常,所以断线就会抛出异常退出,
# 我认为这样最好,可以释放意外占用的TCP/IP半连接,避免无人值守的服务器耗尽
# 端口资源。建议使用我的土味无限循环脚本,等于3秒后自动重试:
"""
举个例子,例如这个sub.py脚本保存在 D:\代码\QUANTAXIS\QUANTAXIS\cli
目录下面,并且创建一个空的 __init__.py,对同级2个文件__init__.py,
还有本 sub.py,没有你就新建一个。
创建一个PowerShell:sub_l1.ps1
D:
CD D:\代码\QUANTAXIS\
$n = 1
while($n -lt 6)
{
python -m QUANTAXIS.cli.sub
Start-Sleep -Seconds 3
}
创建一个Cmd/Batch:sub_l1.cmd
D:
CD D:\代码\QUANTAXIS\
:start
python -m QUANTAXIS.cli.sub
@ping 127.0.0.1 -n 3 >nul
goto start
pause
Linux Bash脚本我不会,你们能用linux肯定会自己编写。
"""
import sys
sys.path.append('/root/ipython/')
import CommonUtils as cu
try:
cu.sendDingMsg("Start realtime sub from sina_l1 progress start now.")
sub_l1_from_sina()
except:
traceback.print_exc()
cu.sendDingMsg("Realtime sub from sina_l1 progress has stopped. please check it soon.")
# sub_l1_from_sina()
# sub_1min_from_tencent_lru()
pass
| 37.145594
| 143
| 0.545848
| 667
| 0.030346
| 0
| 0
| 1,308
| 0.059509
| 0
| 0
| 8,776
| 0.399272
|
225834b3b08a2a19311dda4b3b5c026b4df674f0
| 3,125
|
py
|
Python
|
setup.py
|
voxel51/eta
|
17fb1148e853704872ed50e0e30c7800272b8398
|
[
"Apache-2.0"
] | 25
|
2018-07-21T02:37:34.000Z
|
2022-03-30T12:57:54.000Z
|
setup.py
|
voxel51/eta
|
17fb1148e853704872ed50e0e30c7800272b8398
|
[
"Apache-2.0"
] | 183
|
2018-06-13T18:57:00.000Z
|
2022-02-24T14:35:49.000Z
|
setup.py
|
voxel51/eta
|
17fb1148e853704872ed50e0e30c7800272b8398
|
[
"Apache-2.0"
] | 13
|
2018-09-10T18:46:58.000Z
|
2022-02-07T02:25:31.000Z
|
#!/usr/bin/env python
"""
Installs ETA.
Copyright 2017-2021, Voxel51, Inc.
voxel51.com
"""
import os
from setuptools import setup, find_packages
from wheel.bdist_wheel import bdist_wheel
VERSION = "0.6.1"
class BdistWheelCustom(bdist_wheel):
def finalize_options(self):
bdist_wheel.finalize_options(self)
# Pure Python, so build a wheel for any Python version
self.universal = True
with open("README.md", "r") as fh:
long_description = fh.read()
def get_version():
if "RELEASE_VERSION" in os.environ:
version = os.environ["RELEASE_VERSION"]
if not version.startswith(VERSION):
raise ValueError(
"Release version does not match version: %s and %s"
% (version, VERSION)
)
return version
return VERSION
setup(
name="voxel51-eta",
version=get_version(),
description="Extensible Toolkit for Analytics",
author="Voxel51, Inc.",
author_email="info@voxel51.com",
url="https://github.com/voxel51/eta",
license="Apache",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
include_package_data=True,
install_requires=[
"argcomplete",
"dill",
"future",
"glob2",
"importlib-metadata; python_version<'3.8'",
"ndjson",
"numpy",
"opencv-python-headless<5,>=4.1",
"packaging",
"patool",
"Pillow>=6.2",
"python-dateutil",
"pytz",
"requests",
"retrying",
"six",
"scikit-image",
"sortedcontainers",
"tabulate",
"tzlocal",
],
extras_require={
"pipeline": ["blockdiag", "Sphinx", "sphinxcontrib-napoleon"],
"storage": [
"boto3>=1.15",
"google-api-python-client",
"google-cloud-storage>=1.36",
"httplib2<=0.15",
"pysftp",
],
},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Processing",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Visualization",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
entry_points={"console_scripts": ["eta=eta.core.cli:main"]},
python_requires=">=2.7",
cmdclass={"bdist_wheel": BdistWheelCustom},
)
| 28.935185
| 70
| 0.58944
| 204
| 0.06528
| 0
| 0
| 0
| 0
| 0
| 0
| 1,630
| 0.5216
|
2258db01c670eb29b690517709324afbc74e8b71
| 8,582
|
py
|
Python
|
Kernel/kernel.py
|
y11en/BranchMonitoringProject
|
5d3ca533338da919a1757562f3810d49296ebf48
|
[
"MIT"
] | 122
|
2017-03-08T00:45:03.000Z
|
2022-03-01T03:05:21.000Z
|
Kernel/kernel.py
|
y11en/BranchMonitoringProject
|
5d3ca533338da919a1757562f3810d49296ebf48
|
[
"MIT"
] | 3
|
2017-03-08T01:16:54.000Z
|
2017-03-22T22:59:26.000Z
|
Kernel/kernel.py
|
y11en/BranchMonitoringProject
|
5d3ca533338da919a1757562f3810d49296ebf48
|
[
"MIT"
] | 42
|
2017-03-08T21:28:48.000Z
|
2022-02-20T15:24:46.000Z
|
# Kernel introspection module to enrich branch collected data
# This code is part of BranchMonitoring framework
# Written by: Marcus Botacin - 2017
# Federal University of Parana (UFPR)
from xml.etree.ElementTree import ElementTree # Parse XML
import subprocess # Run dump tools
import win32file as w # Use windows API
import time # Wait for data
import signal # Interrupt endless loop
# Monitoring class - retrieves branch data
class Monitor():
# class instantiation
def __init__(self,save=None):
self.save=save
self.mods = None # No introspection data at this point
signal.signal(signal.SIGINT,self.signal_handler) # Installing signal handler
# debug print
if __debug__:
print("Starting Monitor")
# open driver handle
def __open_driver_handler(self):
self.hdevice=w.CreateFile("\\\\.\\BranchMonitor",0x80000000|0x40000000,0,None,3,0x00000080,None)
# close driver handle
def __close_driver_handler(self):
w.CloseHandle(self.hdevice)
# get branch data from driver handle
def __get_branch_data(self):
# read bytes and string itself
tam,string = w.ReadFile(self.hdevice,200,None)
# if no data, return
if len(string)==0:
return None
# case having data
else:
# interpret string as hex address
branch=int(string[8:15][::-1].encode('hex'),16)
return branch
# signal handler
def signal_handler(self,signal,frame):
self.run=False
# get offset from a given function address
# mod: module to look into
# offset: offset to look for
def offset_to_func(self,mod,offset):
# get pointer to given module
funcs = self.exports[mod]
# default: no offset found
last_offset = 0
last_fname = "Unknown"
# search whole exported symbols
for f in funcs:
name = f[0] # function name
addr = f[1] # function offset
rel_addr = f[2] # relative function offset
# if we are looking for such offset
if offset == addr or offset == rel_addr:
# immediately returns
return name
# in case of a jump inside a given function
# consider the closest exported symbol
if offset > addr and addr > last_offset:
last_offset = addr
last_fname = name
# return "unknown" or the closest symbol
return last_fname
# identifies to which module a given address refers
def addr_to_module(self,branch):
# consider only the meaningful bytes
branch = branch & 0xFFFFFFFF
# look into all loaded modules
for m in self.mods:
start_addr = mods[m][0] # lowest addr
end_addr = mods[m][1] # highestaddr
# if branch is inside
if branch >= start_addr and branch <= end_addr:
# if there are exported symbols, dig into it
if(self.exports is not None):
# return base_module+function_at_offset
return m+"+"+self.offset_to_func(m,branch-start_addr)
# otherwise, return just the name
return m
# nothing found
return "Unknown"
# polling loop
def loop(self,mods=None,exports=None,save=False):
if save:
log = open(self.save,"w")
# default definitions
last = ""
self.mods = mods
self.exports = exports
self.run = True
# debug print
if __debug__:
print("Starting looping")
# open handler
self.__open_driver_handler()
try:
# infinite loop
while(self.run):
# try to get a branch tuple
branch=self.__get_branch_data()
# check if got
if branch is not None:
# no introspection, just print
if self.mods is None:
print("%x" % branch)
else:
# if there's introspection data, dig into it
module_string = self.addr_to_module(branch)
# do not print repeated entries
if module_string != last:
s = "%x <%s>" % (branch,module_string)
print(s)
if save:
log.write(s+"\n")
last = module_string
else:
# no data, sleep
time.sleep(1)
# signal received
finally:
# cleanup
if save:
log.close()
self.__close_driver_handler()
# Dumper: the introspection class
class Dumper():
# instantiation
def __init__(self):
# set parser configs
self.parse()
# set parser configs
def parse(self):
# External tools are required
# DriverView used to enumerate modules
# DriverView binary path
self.drvview_path = "driverview-x64\DriverView.exe"
# DriverView Output file
self.drvview_output = "driverview-x64\drv.xml"
# DllView used to map function to offsets
# DllView binary path
self.dllview_path = "dllexp-x64\dllexp.exe"
# DllView output
self.dllview_output = "Downloads\dllexp-x64\dll.xml"
# enumerate loaded modules
def dump_modules(self):
if __debug__:
print("Dumping Modules")
# using DriverView
s = subprocess.Popen([self.drvview_path,"/sxml",self.drvview_output])
s.wait()
# get offsets
def dump_exports(self,bin):
# using DllView
s = subprocess.Popen([self.dllview_path,"/from_files",bin,"/sxml",self.dllview_output])
s.wait()
# parse exported symbols
def parse_exports(self):
exp = []
self.dtree = ElementTree()
self.dtree.parse(self.dllview_output)
for p in self.dtree.findall("item"):
# get function name
fname = p.find('function_name').text
# try to get address
try:
# address
addr = int(p.find('address').text,16)
except:
# error, no meaningful address
addr = 0xFFFFFFFF
# also get relative addr
rel_addr = int(p.find('relative_address').text,16)
# add tuple to list
exp.append((fname,addr,rel_addr))
# return list
return exp
# get offsets and parse
def get_exports(self,bin):
if __debug__:
print("Getting Exports for: %s" % bin)
self.dump_exports(bin)
return self.parse_exports()
# parse loaded modules/drivers
def parse_modules(self):
mods = dict()
exports = dict()
self.dtree = ElementTree()
self.dtree.parse(self.drvview_output)
for p in self.dtree.findall("item"):
# module name
mod_name = p.find('driver_name').text
# initial addr
mod_addr = int(p.find('address').text.replace("`",""),16)
# end addr
mod_end_addr = int(p.find('end_address').text.replace("`",""),16)
# add to dict - no repeated modules
mods[mod_name]=(mod_addr,mod_end_addr)
# try to get exports for the module
# returns a list
exp = self.get_exports(p.find('filename').text)
# map module to export list
exports[mod_name] = exp
# return module dict and exports dict
return mods, exports
# "main"
if __name__ == '__main__':
# introspect first
d = Dumper()
d.dump_modules()
mods, exports = d.parse_modules()
# then monitor
m=Monitor(save="save.log")
# infinite loop
# introspected data as parameter to the monitor
m.loop(mods,exports,True)
# no module import
else:
print("No module import support yet!")
| 34.465863
| 105
| 0.53519
| 7,599
| 0.885458
| 0
| 0
| 0
| 0
| 0
| 0
| 2,988
| 0.348171
|
2258e4decef3126cb93f24dd49680df54adc84dc
| 243
|
py
|
Python
|
config/environments/__init__.py
|
mihail-ivanov/flask-init
|
47f634f70bb8bd02db8f0a0a3a1955b08a249254
|
[
"MIT"
] | null | null | null |
config/environments/__init__.py
|
mihail-ivanov/flask-init
|
47f634f70bb8bd02db8f0a0a3a1955b08a249254
|
[
"MIT"
] | null | null | null |
config/environments/__init__.py
|
mihail-ivanov/flask-init
|
47f634f70bb8bd02db8f0a0a3a1955b08a249254
|
[
"MIT"
] | null | null | null |
from .development import DevelopmentConfig
from .testing import TestingConfig
from .production import ProductionConfig
app_config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
}
| 20.25
| 42
| 0.773663
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.139918
|
225b7caf45db6cf9057062f56f08950fb1b441f2
| 5,026
|
py
|
Python
|
dialogs.py
|
rdbende/Sun-Valley-messageboxes
|
d6f2b0849caf63c609fc22ecd3909491e2f3ffcf
|
[
"MIT"
] | 5
|
2021-12-29T11:58:37.000Z
|
2022-03-06T15:13:08.000Z
|
dialogs.py
|
rdbende/Sun-Valley-messageboxes
|
d6f2b0849caf63c609fc22ecd3909491e2f3ffcf
|
[
"MIT"
] | 1
|
2022-02-05T10:30:08.000Z
|
2022-02-05T16:04:06.000Z
|
dialogs.py
|
rdbende/Sun-Valley-messageboxes
|
d6f2b0849caf63c609fc22ecd3909491e2f3ffcf
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from tkinter import ttk
from functools import partial
def popup(parent, title, details, icon, *, buttons):
dialog = tk.Toplevel()
result = None
big_frame = ttk.Frame(dialog)
big_frame.pack(fill="both", expand=True)
big_frame.columnconfigure(0, weight=1)
big_frame.rowconfigure(0, weight=1)
info_frame = ttk.Frame(big_frame, padding=(10, 12), style="Dialog_info.TFrame")
info_frame.grid(row=0, column=0, sticky="nsew")
info_frame.columnconfigure(1, weight=1)
info_frame.rowconfigure(1, weight=1)
try:
color = big_frame.tk.call("set", "themeColors::dialogInfoBg")
except tk.TclError:
color = big_frame.tk.call("ttk::style", "lookup", "TFrame", "-background")
icon_label = ttk.Label(info_frame, image=icon, anchor="nw", background=color)
if icon is not None:
icon_label.grid(
row=0, column=0, sticky="nsew", padx=(12, 0), pady=10, rowspan=2
)
title_label = ttk.Label(
info_frame, text=title, anchor="nw", font=("", 14, "bold"), background=color
)
title_label.grid(row=0, column=1, sticky="nsew", padx=(12, 17), pady=(10, 8))
detail_label = ttk.Label(info_frame, text=details, anchor="nw", background=color)
detail_label.grid(row=1, column=1, sticky="nsew", padx=(12, 17), pady=(5, 10))
button_frame = ttk.Frame(
big_frame, padding=(22, 22, 12, 22), style="Dialog_buttons.TFrame"
)
button_frame.grid(row=2, column=0, sticky="nsew")
def on_button(value):
nonlocal result
result = value
dialog.destroy()
for index, button_value in enumerate(buttons):
style = None
state = None
default = False
sticky = "nes" if len(buttons) == 1 else "nsew"
if len(button_value) > 2:
if button_value[2] == "accent":
style = "Accent.TButton"
default = True
elif button_value[2] == "disabled":
state = "disabled"
elif button_value[2] == "default":
default = True
button = ttk.Button(
button_frame,
text=button_value[0],
width=18,
command=partial(on_button, button_value[1]),
style=style,
state=state,
)
if default:
button.bind("<Return>", button["command"])
button.focus()
button.grid(row=0, column=index, sticky=sticky, padx=(0, 10))
button_frame.columnconfigure(index, weight=1)
dialog.overrideredirect(True)
dialog.update()
dialog_width = dialog.winfo_width()
dialog_height = dialog.winfo_height()
if parent is None:
parent_width = dialog.winfo_screenwidth()
parent_height = dialog.winfo_screenheight()
parent_x = 0
parent_y = 0
else:
parent_width = parent.winfo_width()
parent_height = parent.winfo_height()
parent_x = parent.winfo_x()
parent_y = parent.winfo_y()
x_coord = int(parent_width / 2 + parent_x - dialog_width / 2)
y_coord = int(parent_height / 2 + parent_y - dialog_height / 2)
dialog.geometry("+{}+{}".format(x_coord, y_coord))
dialog.minsize(320, dialog_height)
dialog.transient(parent)
dialog.grab_set()
dialog.wait_window()
return result
def show_message(title="Title", details="Description", *, parent=None, icon=None):
return popup(
parent,
title,
details,
icon,
buttons=[("Ok", None, "default")],
)
def ask_ok_cancel(title="Title", details="Description", *, parent=None, icon=None):
return popup(
parent,
title,
details,
icon,
buttons=[("Ok", True, "accent"), ("Cancel", None)],
)
def ask_yes_no(title="Title", details="Description", *, parent=None, icon=None):
return popup(
parent,
title,
details,
icon,
buttons=[("Yes", True, "accent"), ("No", False)],
)
def ask_yes_no_cancel(title="Title", details="Description", *, parent=None, icon=None):
return popup(
parent,
title,
details,
icon,
buttons=[("Yes", True, "accent"), ("No", False), ("Cancel", None)],
)
def ask_retry_cancel(title="Title", details="Description", *, parent=None, icon=None):
return popup(
parent,
title,
details,
icon,
buttons=[("Retry", True, "accent"), ("Cancel", None)],
)
def ask_allow_block(title="Title", details="Description", *, parent=None, icon=None):
return popup(
parent,
title,
details,
icon,
buttons=[("Allow", True, "accent"), ("Block", False)],
)
if __name__ == "__main__":
window = tk.Tk()
window.tk.call("source", "sun-valley.tcl")
window.tk.call("set_theme", "dark")
window.geometry("600x600")
show_message("No WiFi connection", "Check your connection and try again.")
window.mainloop()
| 27.615385
| 87
| 0.591922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 621
| 0.123558
|
225c724f4896f9bddbbf401bf1a3929af43df247
| 94
|
py
|
Python
|
enthought/endo/docerror.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 3
|
2016-12-09T06:05:18.000Z
|
2018-03-01T13:00:29.000Z
|
enthought/endo/docerror.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | 1
|
2020-12-02T00:51:32.000Z
|
2020-12-02T08:48:55.000Z
|
enthought/endo/docerror.py
|
enthought/etsproxy
|
4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347
|
[
"BSD-3-Clause"
] | null | null | null |
# proxy module
from __future__ import absolute_import
from etsdevtools.endo.docerror import *
| 23.5
| 39
| 0.840426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 14
| 0.148936
|
225d1d06e227d6a8a3242fe225e574042e91441e
| 12,591
|
py
|
Python
|
troposphere/kendra.py
|
marinpurgar/troposphere
|
ec35854000ddfd5e2eecd251d5ecaf31979bd2d1
|
[
"BSD-2-Clause"
] | null | null | null |
troposphere/kendra.py
|
marinpurgar/troposphere
|
ec35854000ddfd5e2eecd251d5ecaf31979bd2d1
|
[
"BSD-2-Clause"
] | null | null | null |
troposphere/kendra.py
|
marinpurgar/troposphere
|
ec35854000ddfd5e2eecd251d5ecaf31979bd2d1
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2012-2020, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 18.6.0
from . import AWSObject
from . import AWSProperty
from . import Tags
from .validators import boolean
from .validators import integer
class AclConfiguration(AWSProperty):
props = {
'AllowedGroupsColumnName': (basestring, True),
}
class ChangeDetectingColumns(AWSProperty):
props = {
'ChangeDetectingColumns': ([basestring], False),
}
class DataSourceToIndexFieldMapping(AWSProperty):
props = {
'DataSourceFieldName': (basestring, True),
'DateFieldFormat': (basestring, False),
'IndexFieldName': (basestring, True),
}
class DataSourceToIndexFieldMappingList(AWSProperty):
props = {
'DataSourceToIndexFieldMappingList':
([DataSourceToIndexFieldMapping], False),
}
class ColumnConfiguration(AWSProperty):
props = {
'ChangeDetectingColumns': (ChangeDetectingColumns, True),
'DocumentDataColumnName': (basestring, True),
'DocumentIdColumnName': (basestring, True),
'DocumentTitleColumnName': (basestring, False),
'FieldMappings': (DataSourceToIndexFieldMappingList, False),
}
class ConnectionConfiguration(AWSProperty):
props = {
'DatabaseHost': (basestring, True),
'DatabaseName': (basestring, True),
'DatabasePort': (integer, True),
'SecretArn': (basestring, True),
'TableName': (basestring, True),
}
class DataSourceVpcConfiguration(AWSProperty):
props = {
'SecurityGroupIds': ([basestring], True),
'SubnetIds': ([basestring], True),
}
class SqlConfiguration(AWSProperty):
props = {
'QueryIdentifiersEnclosingOption': (basestring, False),
}
class DatabaseConfiguration(AWSProperty):
props = {
'AclConfiguration': (AclConfiguration, False),
'ColumnConfiguration': (ColumnConfiguration, True),
'ConnectionConfiguration': (ConnectionConfiguration, True),
'DatabaseEngineType': (basestring, True),
'SqlConfiguration': (SqlConfiguration, False),
'VpcConfiguration': (DataSourceVpcConfiguration, False),
}
class DataSourceInclusionsExclusionsStrings(AWSProperty):
props = {
'DataSourceInclusionsExclusionsStrings': ([basestring], False),
}
class OneDriveUserList(AWSProperty):
props = {
'OneDriveUserList': ([basestring], False),
}
class S3Path(AWSProperty):
props = {
'Bucket': (basestring, True),
'Key': (basestring, True),
}
class OneDriveUsers(AWSProperty):
props = {
'OneDriveUserList': (OneDriveUserList, False),
'OneDriveUserS3Path': (S3Path, False),
}
class OneDriveConfiguration(AWSProperty):
props = {
'ExclusionPatterns': (DataSourceInclusionsExclusionsStrings, False),
'FieldMappings': (DataSourceToIndexFieldMappingList, False),
'InclusionPatterns': (DataSourceInclusionsExclusionsStrings, False),
'OneDriveUsers': (OneDriveUsers, True),
'SecretArn': (basestring, True),
'TenantDomain': (basestring, True),
}
class AccessControlListConfiguration(AWSProperty):
props = {
'KeyPath': (basestring, False),
}
class DocumentsMetadataConfiguration(AWSProperty):
props = {
'S3Prefix': (basestring, False),
}
class S3DataSourceConfiguration(AWSProperty):
props = {
'AccessControlListConfiguration':
(AccessControlListConfiguration, False),
'BucketName': (basestring, True),
'DocumentsMetadataConfiguration':
(DocumentsMetadataConfiguration, False),
'ExclusionPatterns': (DataSourceInclusionsExclusionsStrings, False),
'InclusionPrefixes': (DataSourceInclusionsExclusionsStrings, False),
}
class SalesforceChatterFeedIncludeFilterTypes(AWSProperty):
props = {
'SalesforceChatterFeedIncludeFilterTypes': ([basestring], False),
}
class SalesforceChatterFeedConfiguration(AWSProperty):
props = {
'DocumentDataFieldName': (basestring, True),
'DocumentTitleFieldName': (basestring, False),
'FieldMappings': (DataSourceToIndexFieldMappingList, False),
'IncludeFilterTypes':
(SalesforceChatterFeedIncludeFilterTypes, False),
}
class SalesforceCustomKnowledgeArticleTypeConfiguration(AWSProperty):
props = {
'DocumentDataFieldName': (basestring, True),
'DocumentTitleFieldName': (basestring, False),
'FieldMappings': (DataSourceToIndexFieldMappingList, False),
'Name': (basestring, True),
}
class SalesforceCustomKnowledgeArticleTypeConfigurationList(AWSProperty):
props = {
'SalesforceCustomKnowledgeArticleTypeConfigurationList':
([SalesforceCustomKnowledgeArticleTypeConfiguration], False),
}
class SalesforceKnowledgeArticleStateList(AWSProperty):
props = {
'SalesforceKnowledgeArticleStateList': ([basestring], False),
}
class SalesforceStandardKnowledgeArticleTypeConfiguration(AWSProperty):
props = {
'DocumentDataFieldName': (basestring, True),
'DocumentTitleFieldName': (basestring, False),
'FieldMappings': (DataSourceToIndexFieldMappingList, False),
}
class SalesforceKnowledgeArticleConfiguration(AWSProperty):
props = {
'CustomKnowledgeArticleTypeConfigurations':
(SalesforceCustomKnowledgeArticleTypeConfigurationList, False),
'IncludedStates': (SalesforceKnowledgeArticleStateList, True),
'StandardKnowledgeArticleTypeConfiguration':
(SalesforceStandardKnowledgeArticleTypeConfiguration, False),
}
class SalesforceStandardObjectAttachmentConfiguration(AWSProperty):
props = {
'DocumentTitleFieldName': (basestring, False),
'FieldMappings': (DataSourceToIndexFieldMappingList, False),
}
class SalesforceStandardObjectConfiguration(AWSProperty):
props = {
'DocumentDataFieldName': (basestring, True),
'DocumentTitleFieldName': (basestring, False),
'FieldMappings': (DataSourceToIndexFieldMappingList, False),
'Name': (basestring, True),
}
class SalesforceStandardObjectConfigurationList(AWSProperty):
props = {
'SalesforceStandardObjectConfigurationList':
([SalesforceStandardObjectConfiguration], False),
}
class SalesforceConfiguration(AWSProperty):
props = {
'ChatterFeedConfiguration':
(SalesforceChatterFeedConfiguration, False),
'CrawlAttachments': (boolean, False),
'ExcludeAttachmentFilePatterns':
(DataSourceInclusionsExclusionsStrings, False),
'IncludeAttachmentFilePatterns':
(DataSourceInclusionsExclusionsStrings, False),
'KnowledgeArticleConfiguration':
(SalesforceKnowledgeArticleConfiguration, False),
'SecretArn': (basestring, True),
'ServerUrl': (basestring, True),
'StandardObjectAttachmentConfiguration':
(SalesforceStandardObjectAttachmentConfiguration, False),
'StandardObjectConfigurations':
(SalesforceStandardObjectConfigurationList, False),
}
class ServiceNowKnowledgeArticleConfiguration(AWSProperty):
props = {
'CrawlAttachments': (boolean, False),
'DocumentDataFieldName': (basestring, True),
'DocumentTitleFieldName': (basestring, False),
'ExcludeAttachmentFilePatterns':
(DataSourceInclusionsExclusionsStrings, False),
'FieldMappings': (DataSourceToIndexFieldMappingList, False),
'IncludeAttachmentFilePatterns':
(DataSourceInclusionsExclusionsStrings, False),
}
class ServiceNowServiceCatalogConfiguration(AWSProperty):
props = {
'CrawlAttachments': (boolean, False),
'DocumentDataFieldName': (basestring, True),
'DocumentTitleFieldName': (basestring, False),
'ExcludeAttachmentFilePatterns':
(DataSourceInclusionsExclusionsStrings, False),
'FieldMappings': (DataSourceToIndexFieldMappingList, False),
'IncludeAttachmentFilePatterns':
(DataSourceInclusionsExclusionsStrings, False),
}
class ServiceNowConfiguration(AWSProperty):
props = {
'HostUrl': (basestring, True),
'KnowledgeArticleConfiguration':
(ServiceNowKnowledgeArticleConfiguration, False),
'SecretArn': (basestring, True),
'ServiceCatalogConfiguration':
(ServiceNowServiceCatalogConfiguration, False),
'ServiceNowBuildVersion': (basestring, True),
}
class SharePointConfiguration(AWSProperty):
props = {
'CrawlAttachments': (boolean, False),
'DocumentTitleFieldName': (basestring, False),
'ExclusionPatterns': (DataSourceInclusionsExclusionsStrings, False),
'FieldMappings': (DataSourceToIndexFieldMappingList, False),
'InclusionPatterns': (DataSourceInclusionsExclusionsStrings, False),
'SecretArn': (basestring, True),
'SharePointVersion': (basestring, True),
'Urls': ([basestring], True),
'UseChangeLog': (boolean, False),
'VpcConfiguration': (DataSourceVpcConfiguration, False),
}
class DataSourceConfiguration(AWSProperty):
props = {
'DatabaseConfiguration': (DatabaseConfiguration, False),
'OneDriveConfiguration': (OneDriveConfiguration, False),
'S3Configuration': (S3DataSourceConfiguration, False),
'SalesforceConfiguration': (SalesforceConfiguration, False),
'ServiceNowConfiguration': (ServiceNowConfiguration, False),
'SharePointConfiguration': (SharePointConfiguration, False),
}
class DataSource(AWSObject):
resource_type = "AWS::Kendra::DataSource"
props = {
'DataSourceConfiguration': (DataSourceConfiguration, True),
'Description': (basestring, False),
'IndexId': (basestring, True),
'Name': (basestring, True),
'RoleArn': (basestring, True),
'Schedule': (basestring, False),
'Tags': (Tags, False),
'Type': (basestring, True),
}
class Faq(AWSObject):
resource_type = "AWS::Kendra::Faq"
props = {
'Description': (basestring, False),
'FileFormat': (basestring, False),
'IndexId': (basestring, True),
'Name': (basestring, True),
'RoleArn': (basestring, True),
'S3Path': (S3Path, True),
'Tags': (Tags, False),
}
class CapacityUnitsConfiguration(AWSProperty):
props = {
'QueryCapacityUnits': (integer, True),
'StorageCapacityUnits': (integer, True),
}
class ValueImportanceItem(AWSProperty):
props = {
'Key': (basestring, False),
'Value': (integer, False),
}
class ValueImportanceItems(AWSProperty):
props = {
'ValueImportanceItems': ([ValueImportanceItem], False),
}
class Relevance(AWSProperty):
props = {
'Duration': (basestring, False),
'Freshness': (boolean, False),
'Importance': (integer, False),
'RankOrder': (basestring, False),
'ValueImportanceItems': (ValueImportanceItems, False),
}
class Search(AWSProperty):
props = {
'Displayable': (boolean, False),
'Facetable': (boolean, False),
'Searchable': (boolean, False),
'Sortable': (boolean, False),
}
class DocumentMetadataConfiguration(AWSProperty):
props = {
'Name': (basestring, True),
'Relevance': (Relevance, False),
'Search': (Search, False),
'Type': (basestring, True),
}
class DocumentMetadataConfigurationList(AWSProperty):
props = {
'DocumentMetadataConfigurationList':
([DocumentMetadataConfiguration], False),
}
class ServerSideEncryptionConfiguration(AWSProperty):
props = {
'KmsKeyId': (basestring, False),
}
class Index(AWSObject):
resource_type = "AWS::Kendra::Index"
props = {
'CapacityUnits': (CapacityUnitsConfiguration, False),
'Description': (basestring, False),
'DocumentMetadataConfigurations':
(DocumentMetadataConfigurationList, False),
'Edition': (basestring, True),
'Name': (basestring, True),
'RoleArn': (basestring, True),
'ServerSideEncryptionConfiguration':
(ServerSideEncryptionConfiguration, False),
'Tags': (Tags, False),
}
| 30.194245
| 76
| 0.669685
| 12,113
| 0.962036
| 0
| 0
| 0
| 0
| 0
| 0
| 3,144
| 0.249702
|
225fee4b672c69f3b564170c5c438a29025400e1
| 3,046
|
py
|
Python
|
cv2/wxPython-CV-widget/main.py
|
whitmans-max/python-examples
|
881a8f23f0eebc76816a0078e19951893f0daaaa
|
[
"MIT"
] | 140
|
2017-02-21T22:49:04.000Z
|
2022-03-22T17:51:58.000Z
|
cv2/wxPython-CV-widget/main.py
|
whitmans-max/python-examples
|
881a8f23f0eebc76816a0078e19951893f0daaaa
|
[
"MIT"
] | 5
|
2017-12-02T19:55:00.000Z
|
2021-09-22T23:18:39.000Z
|
cv2/wxPython-CV-widget/main.py
|
whitmans-max/python-examples
|
881a8f23f0eebc76816a0078e19951893f0daaaa
|
[
"MIT"
] | 79
|
2017-01-25T10:53:33.000Z
|
2022-03-11T16:13:57.000Z
|
import wx
import cv2
#----------------------------------------------------------------------
# Panel to display image from camera
#----------------------------------------------------------------------
class WebcamPanel(wx.Window): # wx.Panel, wx.Control
def __init__(self, parent, camera, fps=15, flip=False):
wx.Window.__init__(self, parent)
# remember arguments
self.camera = camera
self.fps = fps
self.flip = flip
# get frame size
ret_value, frame = self.camera.read()
height, width = frame.shape[:2]
# resize panel with camera image
self.SetSize( (width, height) )
#self.SetMinSize( (width, height) )
# resize main window
self.GetParent().GetParent().SetSize( (width, height+37) ) # wymaga poprawki aby nie trzeba bylo dawac +37
#self.GetGrandParent().SetSize( (width, height+25) )
#self.GetTopLevelParent().SetSize( (width, height+25) ) # wrong parent
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if self.flip:
frame = cv2.flip(frame, 1)
# create bitmap with frame
self.bmp = wx.BitmapFromBuffer(width, height, frame)
# timer to refresh frames
self.timer = wx.Timer(self)
self.timer.Start(1000./fps)
# add functions to events
self.Bind(wx.EVT_PAINT, self.OnPaint) # run when it is needed
self.Bind(wx.EVT_TIMER, self.NextFrame) # run by timer
def OnPaint(self, event):
dc = wx.BufferedPaintDC(self)
dc.DrawBitmap(self.bmp, 0, 0)
def NextFrame(self, event):
ret_value, frame = self.camera.read()
if ret_value:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
if self.flip:
frame = cv2.flip(frame, 1)
self.bmp.CopyFromBuffer(frame)
self.Refresh()
#----------------------------------------------------------------------
# Main Window
#----------------------------------------------------------------------
class MainWindow(wx.Frame):
def __init__(self, camera, fps=10):
wx.Frame.__init__(self, None)
self.panel = wx.Panel(self, -1)
# add sizer
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(self.sizer)
# add button
self.button = wx.Button(self.panel, label="CAPTURE")
self.button.Bind(wx.EVT_BUTTON, self.OnButton)
self.sizer.Add(self.button, 0, wx.EXPAND)
# add panel with webcam image
self.webcampanel = WebcamPanel(self.panel, camera)
self.sizer.Add(self.webcampanel, 1, wx.EXPAND)
#self.sizer.Layout()
#self.webcampanel.Layout()
#self.Fit()
self.Show()
def OnButton(self, event):
print("TODO: save image in file")
#----------------------------------------------------------------------
camera = cv2.VideoCapture(0)
app = wx.App()
MainWindow(camera)
app.MainLoop()
| 30.158416
| 114
| 0.520026
| 2,512
| 0.824688
| 0
| 0
| 0
| 0
| 0
| 0
| 980
| 0.321733
|
2260413d47cac288ecaeb49a5d64f3b2f805bd94
| 580
|
py
|
Python
|
src/inputbox.py
|
creikey/nuked-dashboard
|
250f8af29570bca69394fd1328343917fa067543
|
[
"MIT"
] | 1
|
2019-01-17T14:20:32.000Z
|
2019-01-17T14:20:32.000Z
|
src/inputbox.py
|
creikey/nuked-dashboard
|
250f8af29570bca69394fd1328343917fa067543
|
[
"MIT"
] | 3
|
2019-01-19T01:33:10.000Z
|
2019-01-19T01:35:35.000Z
|
src/inputbox.py
|
creikey/doomsdash
|
250f8af29570bca69394fd1328343917fa067543
|
[
"MIT"
] | null | null | null |
import pynk
from pynk.nkpygame import NkPygame
class InputBox:
def __init__(self, max_len):
# self.max_len = pynk.ffi.new("int *", max_len)
self.len = pynk.ffi.new("int *", 0)
self.max_len = max_len
# self.len = 0
self.cur_text = pynk.ffi.new("char[{}]".format(max_len))
def show(self, nkpy: NkPygame):
pynk.lib.nk_edit_string(nkpy.ctx, pynk.lib.NK_EDIT_SIMPLE,
self.cur_text, self.len, self.max_len, pynk.ffi.addressof(
pynk.lib, "nk_filter_default"))
| 34.117647
| 90
| 0.575862
| 530
| 0.913793
| 0
| 0
| 0
| 0
| 0
| 0
| 97
| 0.167241
|
226058992d51da3d32320a685665a445a8020b91
| 1,454
|
py
|
Python
|
01_demo/MLP_test.py
|
wwww666/Tensorflow2.0
|
4df3a3784482bb8db7943ffb402b5822d5111ab9
|
[
"Apache-2.0"
] | 2
|
2020-04-24T10:20:18.000Z
|
2021-02-25T03:53:07.000Z
|
01_demo/MLP_test.py
|
wwww666/Tensorflow2.0
|
4df3a3784482bb8db7943ffb402b5822d5111ab9
|
[
"Apache-2.0"
] | null | null | null |
01_demo/MLP_test.py
|
wwww666/Tensorflow2.0
|
4df3a3784482bb8db7943ffb402b5822d5111ab9
|
[
"Apache-2.0"
] | null | null | null |
'''
自定义实现双层网络,实现Relu激活函数
'''
import tensorflow as tf
import numpy as np
import sys
sys.path.append("..")
from softmax_test import train_che3
from tensorflow.keras.datasets.fashion_mnist import load_data
# 加载数据集;转换类型并做归一化处理
(x_train,y_train),(x_test,y_test)=load_data()
batch_size=256
x_train=tf.cast(x_train,tf.float32)
x_test=tf.cast(x_test,tf.float32)
x_train=x_train/255.
x_test=x_test/255.
train_iter=tf.data.Dataset.from_tensor_slices((x_train,y_train)).batch(batch_size)
test_iter=tf.data.Dataset.from_tensor_slices((x_test,y_test)).batch(batch_size)
# 定义输入,输出,隐藏层大小;初始化权重W,偏差b
num_inputs,num_outputs,num_hiddens=784,10,256
W1=tf.Variable(tf.random.normal(shape=(num_inputs,num_hiddens),mean=0.0,stddev=0.01,dtype=tf.float32))
b1=tf.Variable(tf.zeros(num_hiddens,dtype=tf.float32))
W2=tf.Variable(tf.random.normal(shape=(num_hiddens,num_outputs),mean=0.0,stddev=0.01,dtype=tf.float32))
b2=tf.Variable(tf.random.normal([num_outputs],stddev=0.1))
# 定义relu激活函数
def relu(X):
return tf.math.maximum(X,0)
# 定义网络结构,返回softmax的分类概率结果
def net(X):
X=tf.reshape(X,shape=[-1,num_inputs])
h=relu(X)
return tf.math.softmax(tf.matmul(h,W2)+b2)
# 定义损失函数
def loss(y_hat,y_true):
return tf.losses.sparse_categorical_crossentropy(y_true,y_hat)
# 训练模型,定义参数
num_epochs,lr=5,0.1
params=[W1,b1,W2,b2]
# 采用上一个文件中的方法直接训练
train_che3(net,train_iter,test_iter,loss,num_epochs,batch_size,params,lr)
| 29.08
| 104
| 0.751719
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 373
| 0.223621
|
22613f6d8ef797b79ce3c0bf426040fa5c8d5f9b
| 1,704
|
py
|
Python
|
ticketnum/ticket_numbering.py
|
phizzl3/PrintShopScripts
|
26cf12d189836907370fd8671ef0d8eba7cd3411
|
[
"MIT"
] | 1
|
2021-01-19T20:36:35.000Z
|
2021-01-19T20:36:35.000Z
|
ticketnum/ticket_numbering.py
|
phizzl3/counter-calculator
|
26cf12d189836907370fd8671ef0d8eba7cd3411
|
[
"MIT"
] | null | null | null |
ticketnum/ticket_numbering.py
|
phizzl3/counter-calculator
|
26cf12d189836907370fd8671ef0d8eba7cd3411
|
[
"MIT"
] | null | null | null |
"""
A simple script for numbering nUp tickets for the print shop.
"""
def numbering_main() -> None:
"""
Gets numbering sequences for nUp ticket numbering.
Gets the total number of tickets requested along with now many will fit on a
sheet (n_up) as well as the starting ticket number and prints the ticket
number groupings to the console.
"""
print('[ Ticket Numbering Assist ]'.center(40))
# Get ticket, sheet and numbering info
total_requested = int(input('\n How many tickets do you need in total?: '))
n_up = int(input(' How many tickets will fit on a sheet?: '))
starting_number = int(input(' What number should we start with?: '))
# Do math & round up if needed
total_sheets = total_requested // n_up
final_tickets = total_requested
if total_requested % n_up > 0:
total_sheets += 1
final_tickets = total_sheets * n_up
# Print totals to the console
print('\n Final totals...')
print(f' Total tickets Printed: {final_tickets}')
print(f' Tickets per sheet: {n_up}')
print(f' Total Sheets needed: {total_sheets}\n')
print(' Here are your numbers...\n')
# Get ending ticket number and set initial display number
ending_number = starting_number + total_sheets - 1
display_number = 1
# Display to console
for i in range(n_up):
print(
f' #{display_number:2}: Starting Number - {starting_number:4} | Ending Number - {ending_number:4}')
starting_number = ending_number + 1
ending_number = starting_number + total_sheets - 1
display_number += 1
input('\n Press ENTER to return...')
if __name__ == '__main__':
numbering_main()
| 33.411765
| 111
| 0.662559
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 958
| 0.562207
|
2261d6d71d2909cadfc80285de61e3b9d29b7970
| 2,539
|
py
|
Python
|
emergent ferromagnetism near three-quarters filling in twisted bilayer graphene/scripts/myTerrain.py
|
aaronsharpe/publication_archives
|
aabf1a7899b81c43fc27bdd05094f5a84e509e90
|
[
"MIT"
] | null | null | null |
emergent ferromagnetism near three-quarters filling in twisted bilayer graphene/scripts/myTerrain.py
|
aaronsharpe/publication_archives
|
aabf1a7899b81c43fc27bdd05094f5a84e509e90
|
[
"MIT"
] | null | null | null |
emergent ferromagnetism near three-quarters filling in twisted bilayer graphene/scripts/myTerrain.py
|
aaronsharpe/publication_archives
|
aabf1a7899b81c43fc27bdd05094f5a84e509e90
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon May 15 21:32:17 2017
@author: Aaron Sharpe
"""
import numpy as np
import os
from matplotlib.colors import LinearSegmentedColormap
def igorTerrain(n):
n1 = np.around(n*25/256)
n2 = np.around(n*37/256)
n3 = np.around(n*100/256)
n4 = np.around(n*150/256)
r = np.zeros((n,3))
g = np.zeros((n,3))
b = np.zeros((n,3))
r[0:int(n1),1] = np.linspace(0.2,0,n1)
r[int(n1)-1:int(n2),1] = np.linspace(0,0,(n2-n1)+1)
r[int(n2)-1:int(n3),1] = np.linspace(0,1,(n3-n2)+1)
r[int(n3)-1:int(n4),1] = np.linspace(1,.535,(n4-n3)+1)
r[int(n4)-1:int(n),1] = np.linspace(.535,1,(n-n4)+1)
r[:,2] = r[:,1]
g[0:int(n1),1] = np.linspace(.2,.6,n1)
g[int(n1)-1:int(n2),1] = np.linspace(.6,.8,(n2-n1)+1)
g[int(n2)-1:int(n3),1] = np.linspace(.8,1,(n3-n2)+1)
g[int(n3)-1:int(n4),1] = np.linspace(1,.356,(n4-n3)+1)
g[int(n4)-1:int(n),1] = np.linspace(.356,1,(n-n4)+1)
g[:,2] = g[:,1]
b[0:int(n1),1] = np.linspace(.6,1,n1)
b[int(n1)-1:int(n2),1] = np.linspace(1,.375,(n2-n1)+1)
b[int(n2)-1:int(n3),1] = np.linspace(.375,.6,(n3-n2)+1)
b[int(n3)-1:int(n4),1] = np.linspace(.6,.3,(n4-n3)+1)
b[int(n4)-1:int(n),1] = np.linspace(.3,1,(n-n4)+1)
b[:,2] = b[:,1]
x = np.linspace(0,1,n)
r[:,0] = x
g[:,0] = x
b[:,0] = x
r = tuple(map(tuple,r))
g = tuple(map(tuple,g))
b = tuple(map(tuple,b))
cdict = {'red':r,'green':g,'blue':b}
myTerrain = LinearSegmentedColormap('my_colormap', cdict)
#print(myTerrain)
return myTerrain
def coldHot(trunclo,trunchi):
CURRENT_DIR = os.path.dirname(__file__)
file_path = os.path.join(CURRENT_DIR,'coldwarm.txt')
mat = np.loadtxt(file_path)
n = np.shape(mat[int(trunclo):-int(trunchi)])[0]
rdat = mat[int(trunclo):-int(trunchi),0]/np.max(mat[int(trunclo):-int(trunchi),0])
gdat = mat[int(trunclo):-int(trunchi),1]/np.max(mat[int(trunclo):-int(trunchi),1])
bdat = mat[int(trunclo):-int(trunchi),2]/np.max(mat[int(trunclo):-int(trunchi),2])
r = np.zeros((n,3))
g = np.zeros((n,3))
b = np.zeros((n,3))
r[:,1] = rdat;
r[:,2] = rdat
g[:,1] = gdat;
g[:,2] = gdat
b[:,1] = bdat;
b[:,2] = bdat
x = np.linspace(0,1,n)
r[:,0] = x
g[:,0] = x
b[:,0] = x
r = tuple(map(tuple,r))
g = tuple(map(tuple,g))
b = tuple(map(tuple,b))
cdict = {'red':r,'green':g,'blue':b}
myColdhot = LinearSegmentedColormap('my_colormap', cdict)
return myColdhot
| 30.590361
| 86
| 0.54549
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 182
| 0.071682
|
226279b24caaedae28347564e963c2b64e533cf3
| 217
|
py
|
Python
|
datapackage_pipelines/generators/utilities.py
|
gperonato/datapackage-pipelines
|
72b98918db1c19590586a3a85c5b087227cbbc3b
|
[
"MIT"
] | 109
|
2016-09-01T08:41:55.000Z
|
2021-11-10T10:08:35.000Z
|
datapackage_pipelines/generators/utilities.py
|
gperonato/datapackage-pipelines
|
72b98918db1c19590586a3a85c5b087227cbbc3b
|
[
"MIT"
] | 144
|
2016-08-30T16:26:50.000Z
|
2021-04-18T09:06:12.000Z
|
datapackage_pipelines/generators/utilities.py
|
gperonato/datapackage-pipelines
|
72b98918db1c19590586a3a85c5b087227cbbc3b
|
[
"MIT"
] | 34
|
2016-09-05T12:46:53.000Z
|
2022-03-05T01:53:49.000Z
|
def arg_to_step(arg):
if isinstance(arg, str):
return {'run': arg}
else:
return dict(zip(['run', 'parameters', 'cache'], arg))
def steps(*args):
return [arg_to_step(arg) for arg in args]
| 21.7
| 61
| 0.59447
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0.133641
|
2262f6ba6d8c2278a63ea0e571aa7725d2647bf8
| 11,843
|
py
|
Python
|
plugin/taskmage2/project/projects.py
|
willjp/vim-taskmage
|
adcf809ccf1768753eca4dadaf6279b34e8d5699
|
[
"BSD-2-Clause"
] | 1
|
2017-11-28T14:12:03.000Z
|
2017-11-28T14:12:03.000Z
|
plugin/taskmage2/project/projects.py
|
willjp/vim-taskmage
|
adcf809ccf1768753eca4dadaf6279b34e8d5699
|
[
"BSD-2-Clause"
] | 16
|
2017-08-13T18:01:26.000Z
|
2020-11-17T04:55:43.000Z
|
plugin/taskmage2/project/projects.py
|
willjp/vim-taskmage
|
adcf809ccf1768753eca4dadaf6279b34e8d5699
|
[
"BSD-2-Clause"
] | null | null | null |
import os
import shutil
import tempfile
from taskmage2.utils import filesystem, functional
from taskmage2.asttree import asttree, renderers
from taskmage2.parser import iostream, parsers
from taskmage2.project import taskfiles
class Project(object):
def __init__(self, root='.'):
""" Constructor.
Args:
path (str, optional): ``(ex: None, '/src/project/subdir/file.mtask', '/src/project', '/src/project/.taskmage' )``
Path to your projectroot, or a file/directory within
your taskmage project root.
.. code-block:: python
'/src/project'
'/src/project/subdir/file.mtask'
'/src/project/.taskmage'
"""
self._root = None
if root:
self.load(root)
def __repr__(self):
"""
Returns:
str: ``<Project(path/to/project) at 0x7ff6b33106a0>``
"""
if self.root:
relpath = os.path.relpath(self.root)
else:
relpath = 'None'
repr_ = '<Project({}) at {}>'.format(relpath, hex(id(self)))
return repr_
def __hash__(self):
hashstr = '<taskmage2.project.projects.Project({})>'.format(str(self.root))
return hash(hashstr)
@classmethod
def from_path(cls, filepath):
""" Instantiates a new Project, loaded using `filepath`.
Args:
path (str): ``(ex: '/src/project/subdir/file.mtask', '/src/project', '/src/project/.taskmage' )``
Path to your projectroot, or a file/directory within
your taskmage project root.
.. code-block:: python
'/src/project'
'/src/project/subdir/file.mtask'
'/src/project/.taskmage'
"""
project = Project(root=None)
project.load(filepath)
return project
@property
def root(self):
""" The root directory of a project. Contains ``.taskmage`` directory.
Returns:
.. code-block:: python
'/src/project'
"""
return self._root
@classmethod
def create(cls, root):
""" Create a new taksmage project in directory `rootdir` .
Args:
rootdir (str):
Path to the root of your taskmage project.
Returns:
str: project root directory
"""
root = format_rootpath(root)
if os.path.exists(root):
if not os.path.isdir(root):
raise OSError(
'unable to create taskmage project, provided '
'path exists and is not a directory. "{}"'.format(root)
)
taskmage_dir = '{}/.taskmage'.format(root)
filesystem.make_directories(taskmage_dir)
return root
@staticmethod
def find(path):
"""
Returns:
str: absolute path to taskmage project root
"""
path = filesystem.format_path(os.path.abspath(path))
# is path root
if os.path.isdir('{}/.taskmage'.format(path)):
return path
# /src/project/.taskmage
if os.path.basename(path) == '.taskmage':
return os.path.dirname(path)
# /src/project
# /src/project/sub-path
for parent_dir in filesystem.walk_parents(path):
if os.path.isdir('{}/.taskmage'.format(parent_dir)):
return parent_dir
raise RuntimeError('unable to find taskmage project from path: {}'.format(path))
def load(self, path):
""" Loads a taskmage project from a path.
Args:
path (str): ``(ex: '/src/project/subdir/file.mtask', '/src/project', '/src/project/.taskmage' )``
Path to your projectroot, or a file/directory within
your taskmage project root.
.. code-block:: python
'/src/project'
'/src/project/subdir/file.mtask'
'/src/project/.taskmage'
"""
path = os.path.abspath(path)
projectroot = self.find(path)
self._root = projectroot
def archive_completed(self, filepath=None):
""" Archives all completed task-branches.
Example:
.. code-block:: ReStructuredText
## a,b, and c will be archived
## (entire task-branch completed)
x a
x b
x c
## nothing will be archived
## (task-branch is not entirely completed)
x a
x b
* c
Args:
filepath (str, optional): ``(ex: '/src/project/file.mtask' )``
Optionally, archive completed tasks in a single target file.
"""
if filepath is not None:
self._archive_completed(filepath)
else:
# for every mtask file in the entire project...
raise NotImplementedError('todo - archive completed tasks from all mtask files')
def is_project_path(self, filepath):
""" Test if a file is within this project.
"""
if filepath.startswith('{}/'.format(self.root)):
return True
return False
def is_archived_path(self, filepath):
""" Test if file is an archived mtask file.
"""
if filepath.startswith('{}/.taskmage/'.format(self.root)):
return True
return False
def is_active_path(self, filepath):
""" Test if file is an active (non-archived) mtask file.
"""
if self.is_project_path(filepath) and not self.is_archived_path(filepath):
return True
return False
def get_archived_path(self, filepath):
""" Returns filepath to corresponding archived mtask file's (from un-archived mtask file).
"""
if not self.is_project_path(filepath):
msg = ('filepath not within current taskmage project. \n'
'project "{}"\n'
'filepath "{}\n').format(self.root, filepath)
raise RuntimeError(msg)
if self.is_archived_path(filepath):
return filepath
filepath = filesystem.format_path(filepath)
relpath = filepath[len(self.root) + 1:]
archived_path = '{}/.taskmage/{}'.format(self.root, relpath)
return archived_path
def get_active_path(self, filepath):
""" Returns filepath to corresponding un-archived mtask file (from archived mtask file).
"""
if not self.is_project_path(filepath):
raise RuntimeError(
('filepath not within current taskmage project. \n'
'project "{}"\n'
'filepath "{}\n').format(self.root, filepath)
)
if not self.is_archived_path(filepath):
return filepath
filepath = filesystem.format_path(filepath)
taskdir = '{}/.taskmage'.format(self.root)
relpath = filepath[len(taskdir) + 1:]
active_path = '{}/{}'.format(self.root, relpath)
return active_path
def get_counterpart(self, filepath):
""" Returns active-path if archived-path, or inverse.
"""
if not self.is_project_path(filepath):
raise RuntimeError(
('filepath not within current taskmage project. \n'
'project "{}"\n'
'filepath "{}\n').format(self.root, filepath)
)
if self.is_archived_path(filepath):
return self.get_active_path(filepath)
else:
return self.get_archived_path(filepath)
def filter_taskfiles(self, filters):
""" Returns a list of all taskfiles in project, filtered by provided `filters` .
Args:
filters (list):
List of functions that accepts a :py:obj:`taskmage2.project.taskfiles.TaskFile`
as an argument, and returns True (keep) or False (remove)
Returns:
Iterable:
iterable of project taskfiles (after all filters applied to them).
.. code-block:: python
[
TaskFile('/path/to/todos/file1.mtask'),
TaskFile('/path/to/todos/file2.mtask'),
TaskFile('/path/to/todos/file3.mtask'),
...
]
"""
return functional.multifilter(filters, self.iter_taskfiles())
def iter_taskfiles(self):
""" Iterates over all `*.mtask` files in project (both completed and uncompleted).
Returns:
Iterable:
iterable of all project taskfiles
.. code-block:: python
[
TaskFile('/path/to/todos/file1.mtask'),
TaskFile('/path/to/todos/file2.mtask'),
TaskFile('/path/to/todos/file3.mtask'),
...
]
"""
for (root, dirnames, filenames) in os.walk(self.root):
for filename in filenames:
if not filename.endswith('.mtask'):
continue
filepath = '{}/{}'.format(root, filename)
yield taskfiles.TaskFile(filepath)
def _archive_completed(self, filepath):
"""
Args:
filepath (str):
absolute path to a .mtask file.
"""
(active_ast, archive_ast) = self._archive_completed_as_ast(filepath)
archive_path = self.get_archived_path(filepath)
tempdir = tempfile.mkdtemp()
try:
# create tempfile objects
active_taskfile = taskfiles.TaskFile('{}/active.mtask'.format(tempdir))
archive_taskfile = taskfiles.TaskFile('{}/archive.mtask'.format(tempdir))
# write tempfiles
active_taskfile.write(active_ast)
archive_taskfile.write(archive_ast)
# (if successful) overwrite real files
active_taskfile.copyfile(filepath)
archive_taskfile.copyfile(archive_path)
finally:
# delete tempdir
if os.path.isdir(tempdir):
shutil.rmtree(tempdir)
def _archive_completed_as_ast(self, filepath):
"""
Returns:
.. code-block:: python
(
asttree.AbstractSyntaxTree(), # new active AST
asttree.AbstractSyntaxTree(), # new archive AST
)
"""
# get active AST
active_ast = self._get_mtaskfile_ast(filepath)
# get archive AST
archive_path = self.get_archived_path(filepath)
archive_ast = self._get_mtaskfile_ast(archive_path)
# perform archive
archive_ast = active_ast.archive_completed(archive_ast)
return (active_ast, archive_ast)
def _get_mtaskfile_ast(self, filepath):
if not os.path.isfile(filepath):
return asttree.AbstractSyntaxTree()
with open(filepath, 'r') as fd_src:
fd = iostream.FileDescriptor(fd_src)
AST = parsers.parse(fd, 'mtask')
return AST
def format_rootpath(path):
""" Formats a project-directory path.
Ensures path ends with `.taskmage` dir, and uses forward slashes exclusively.
Returns:
str:
a new formatted path
"""
return functional.pipeline(
path,
[
_ensure_path_ends_with_dot_taskmage,
filesystem.format_path,
]
)
def _ensure_path_ends_with_dot_taskmage(path):
if os.path.basename(path):
return path
return '{}/.taskmage'.format(path)
| 31.248021
| 125
| 0.548679
| 11,094
| 0.936756
| 816
| 0.068901
| 2,283
| 0.192772
| 0
| 0
| 5,769
| 0.487123
|
2263a0daf4d65f69a2ef1044b98efa275d27150f
| 1,611
|
py
|
Python
|
discord/ext/vbu/cogs/utils/converters/filtered_user.py
|
6days9weeks/Novus
|
a21157f15d7a07669cb75b3f023bd9eedf40e40e
|
[
"MIT"
] | 2
|
2022-01-22T16:05:42.000Z
|
2022-01-22T16:06:07.000Z
|
discord/ext/vbu/cogs/utils/converters/filtered_user.py
|
6days9weeks/Novus
|
a21157f15d7a07669cb75b3f023bd9eedf40e40e
|
[
"MIT"
] | null | null | null |
discord/ext/vbu/cogs/utils/converters/filtered_user.py
|
6days9weeks/Novus
|
a21157f15d7a07669cb75b3f023bd9eedf40e40e
|
[
"MIT"
] | null | null | null |
from discord.ext import commands
class FilteredUser(commands.UserConverter):
"""
A simple :class:`discord.ext.commands.UserConverter` that doesn't allow bots
or the author to be passed into the function.
"""
def __init__(self, *, allow_author: bool = False, allow_bots: bool = False):
super().__init__()
self.allow_author = allow_author
self.allow_bots = allow_bots
async def convert(self, ctx: commands.Context, argument: str):
m = await super().convert(ctx, argument)
if self.allow_author is False and ctx.author.id == m.id:
raise commands.BadArgument("You can't run this command on yourself.")
if self.allow_bots is False and m.bot:
raise commands.BadArgument("You can't run this command on bots.")
return m
class FilteredMember(commands.MemberConverter):
"""
A simple :class:`discord.ext.commands.MemberConverter` that doesn't allow bots
or the author to be passed into the function.
"""
def __init__(self, *, allow_author: bool = False, allow_bots: bool = False):
super().__init__()
self.allow_author = allow_author
self.allow_bots = allow_bots
async def convert(self, ctx: commands.Context, argument: str):
m = await super().convert(ctx, argument)
if self.allow_author is False and ctx.author.id == m.id:
raise commands.BadArgument("You can't run this command on yourself.")
if self.allow_bots is False and m.bot:
raise commands.BadArgument("You can't run this command on bots.")
return m
| 38.357143
| 82
| 0.664184
| 1,572
| 0.975791
| 0
| 0
| 0
| 0
| 800
| 0.496586
| 442
| 0.274364
|
226437962414de4509b79b7a803dd031ebb02932
| 361
|
py
|
Python
|
py/2017/3B.py
|
pedrotari7/advent_of_code
|
98d5bc8d903435624a019a5702f5421d7b4ef8c8
|
[
"MIT"
] | null | null | null |
py/2017/3B.py
|
pedrotari7/advent_of_code
|
98d5bc8d903435624a019a5702f5421d7b4ef8c8
|
[
"MIT"
] | null | null | null |
py/2017/3B.py
|
pedrotari7/advent_of_code
|
98d5bc8d903435624a019a5702f5421d7b4ef8c8
|
[
"MIT"
] | null | null | null |
a = 289326
coords = [(1, 0), (1, -1), (0, -1), (-1, -1), (-1, 0), (-1, 1), (0, 1), (1, 1)]
x,y = (0,0)
dx,dy = (1,0)
M = {(x,y):1}
while M[(x, y)] < a:
x, y = x+dx, y+dy
M[(x, y)] = sum([M[(x+ox, y+oy)] for ox,oy in coords if (x+ox,y+oy) in M])
if (x == y) or (x > 0 and x == 1-y) or (x < 0 and x == -y):
dx, dy = -dy, dx
print M[(x, y)]
| 24.066667
| 79
| 0.382271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
226749a06c765ec39cc633d7c553b9c567992420
| 811
|
py
|
Python
|
q.py
|
Akatsuki1910/tokuron
|
2f5b05dc1c1395f30e738a0d5749ac32d46e5379
|
[
"MIT"
] | null | null | null |
q.py
|
Akatsuki1910/tokuron
|
2f5b05dc1c1395f30e738a0d5749ac32d46e5379
|
[
"MIT"
] | null | null | null |
q.py
|
Akatsuki1910/tokuron
|
2f5b05dc1c1395f30e738a0d5749ac32d46e5379
|
[
"MIT"
] | null | null | null |
""" Q learning """
import numpy as np
import plot
Q = np.array(np.zeros([11, 3]))
GAMMA = 0.9
ALPHA = 0.1
def action_select(s_s):
""" action select """
return np.random.choice([i for i in range(1, 4) if i + s_s < 11])
for i in range(10000):
S_STATE = 0
while S_STATE != 10:
a_state = action_select(S_STATE)
R = 0.001
s_state_dash = S_STATE + a_state
if s_state_dash == 10:
R = -10
else:
s_state_dash = action_select(s_state_dash)+s_state_dash
if s_state_dash == 10:
R = 10
Q[S_STATE, a_state-1] = Q[S_STATE, a_state-1]+ALPHA * \
(R+GAMMA * Q[s_state_dash,
np.argmax(Q[s_state_dash, ])] - Q[S_STATE, a_state-1])
S_STATE = s_state_dash
plot.plot_func(Q)
| 21.918919
| 69
| 0.557337
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.048089
|
226990cee4efe4dbfe653dc0472db81ab56d2396
| 390
|
py
|
Python
|
videogame_project/videogame_app/models.py
|
cs-fullstack-fall-2018/django-form-post1-R3coTh3Cod3r
|
3e44b737425fe347757a50f30aa5df021057bfde
|
[
"Apache-2.0"
] | null | null | null |
videogame_project/videogame_app/models.py
|
cs-fullstack-fall-2018/django-form-post1-R3coTh3Cod3r
|
3e44b737425fe347757a50f30aa5df021057bfde
|
[
"Apache-2.0"
] | null | null | null |
videogame_project/videogame_app/models.py
|
cs-fullstack-fall-2018/django-form-post1-R3coTh3Cod3r
|
3e44b737425fe347757a50f30aa5df021057bfde
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
from django.utils import timezone
class GameIdea(models.Model):
name=models.CharField(max_length=100)
genre= models.CharField(max_length=100)
currentdate= models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.name, self.genre
def displayed(self):
self.currentdate = timezone.now()
self.save()
| 26
| 60
| 0.707692
| 326
| 0.835897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
226a5ca3cf4445179f1951c272dd77866530bcb2
| 4,296
|
py
|
Python
|
tests/test_estimate_r.py
|
lo-hfk/epyestim
|
ca2ca928b744f324dade248c24a40872b69a5222
|
[
"MIT"
] | 11
|
2021-01-10T22:37:26.000Z
|
2022-03-14T10:46:21.000Z
|
tests/test_estimate_r.py
|
lo-hfk/epyestim
|
ca2ca928b744f324dade248c24a40872b69a5222
|
[
"MIT"
] | null | null | null |
tests/test_estimate_r.py
|
lo-hfk/epyestim
|
ca2ca928b744f324dade248c24a40872b69a5222
|
[
"MIT"
] | 4
|
2021-03-26T23:43:03.000Z
|
2021-11-21T15:16:05.000Z
|
import unittest
from datetime import date
import numpy as np
import pandas as pd
from numpy.testing import assert_array_almost_equal
from scipy.stats import gamma
from epyestim.estimate_r import overall_infectivity, sum_by_split_dates, estimate_r, gamma_quantiles
class EstimateRTest(unittest.TestCase):
def test_overall_infectivity(self):
infections_ts = pd.Series(
[1, 3, 4, 7, 10, 3],
index=pd.date_range(start='2020-01-01', end='2020-01-06')
)
gt_distribution = np.array([0.0, 0.3, 0.4, 0.2, 0.0])
infectivity = overall_infectivity(infections_ts, gt_distribution)
self.assertTrue(infectivity.index.equals(infections_ts.index))
assert_array_almost_equal(
np.array([0.0, 0.3, 1.3, 2.6, 4.3, 6.6]),
infectivity.values
)
def test_split_dates(self):
"""
day 1 2 3 4 5 6 7 8 9
a 1 2 3 4 5 6 7 8 9
b 9 8 7 6 5 4 3 2 1
splits ^-------^---------^
sum(a) 10 35
sum(b) 30 15
"""
df = pd.DataFrame({
'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [9, 8, 7, 6, 5, 4, 3, 2, 1]
}, index=pd.date_range(start='2020-01-01', end='2020-01-09'))
split_dates = [date(2020, 1, 1), date(2020, 1, 5), date(2020,1, 10)]
sums = sum_by_split_dates(df, split_dates)
self.assertTrue(df.index.equals(sums.index))
assert_array_almost_equal(np.array([10, 10, 10, 10, 35, 35, 35, 35, 35]), sums['a'])
assert_array_almost_equal(np.array([30, 30, 30, 30, 15, 15, 15, 15, 15]), sums['b'])
def test_estimate_r_rolling(self):
infections_ts = pd.Series(
[1, 3, 4, 7, 10, 3],
index=pd.date_range(start='2020-01-01', end='2020-01-06')
)
gt_distribution = np.array([0.0, 0.3, 0.4, 0.2, 0.1])
r_df = estimate_r(
infections_ts=infections_ts,
gt_distribution=gt_distribution,
a_prior=1,
b_prior=5,
window_size=3,
)
self.assertTrue(r_df.index.equals(pd.date_range(start='2020-01-03', end='2020-01-06')))
assert_array_almost_equal(np.array([9, 15, 22, 21]), r_df['a_posterior'])
assert_array_almost_equal(np.array([0.555556, 0.227273, 0.117647, 0.070922]), r_df['b_posterior'])
def test_estimate_r_boundary(self):
infections_ts = pd.Series(
[1, 3, 4, 7, 10, 3],
index=pd.date_range(start='2020-01-01', end='2020-01-06')
)
gt_distribution = np.array([0.0, 0.3, 0.4, 0.2, 0.1])
r_df = estimate_r(
infections_ts=infections_ts,
gt_distribution=gt_distribution,
a_prior=1,
b_prior=5,
boundary_dates=[date(2020, 1, 1), date(2020, 1, 3), date(2020, 1, 6)]
)
self.assertTrue(r_df.index.equals(pd.date_range(start='2020-01-01', end='2020-01-05')))
assert_array_almost_equal(np.array([5, 5, 22, 22, 22]), r_df['a_posterior'])
assert_array_almost_equal(np.array([2, 2, 0.117647, 0.117647, 0.117647]), r_df['b_posterior'])
def test_estimate_r_none_fail(self):
infections_ts = pd.Series(
[1, 3, 4, 7, 10, 3],
index=pd.date_range(start='2020-01-01', end='2020-01-06')
)
gt_distribution = np.array([0.0, 0.3, 0.4, 0.2, 0.1])
self.assertRaises(ValueError, lambda: estimate_r(
infections_ts=infections_ts,
gt_distribution=gt_distribution,
a_prior=1,
b_prior=5,
))
def test_gamma_quantiles_equivalent(self):
a = np.array([1.0, 2.0, 1.5, 2.5, 17.0, 13.0])
b = np.array([2.0, 3.2, 5.1, 0.2, 34.6, 23.0])
q = 0.3
df = pd.DataFrame({'a_posterior': a, 'b_posterior': b})
def get_r_quantile(q):
def getter(row):
return gamma(a=row['a_posterior'], scale=row['b_posterior']).ppf(q)
return getter
quantiles_slow = df.apply(get_r_quantile(q), axis=1)
quantiles_fast = gamma_quantiles(q, df.a_posterior, df.b_posterior)
assert_array_almost_equal(quantiles_slow, quantiles_fast)
if __name__ == '__main__':
unittest.main()
| 34.926829
| 106
| 0.571927
| 3,978
| 0.925978
| 0
| 0
| 0
| 0
| 0
| 0
| 514
| 0.119646
|
226bbbb2f75ccc059e2118af7b3e40bfe68eb6e9
| 3,355
|
py
|
Python
|
tests/imagenet_classification_test.py
|
SanggunLee/edgetpu
|
d3cf166783265f475c1ddba5883e150ee84f7bfe
|
[
"Apache-2.0"
] | 2
|
2020-05-07T22:34:16.000Z
|
2020-09-03T20:30:37.000Z
|
tests/imagenet_classification_test.py
|
SanggunLee/edgetpu
|
d3cf166783265f475c1ddba5883e150ee84f7bfe
|
[
"Apache-2.0"
] | null | null | null |
tests/imagenet_classification_test.py
|
SanggunLee/edgetpu
|
d3cf166783265f475c1ddba5883e150ee84f7bfe
|
[
"Apache-2.0"
] | 1
|
2020-01-08T05:55:58.000Z
|
2020-01-08T05:55:58.000Z
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests image classification accuracy with ImageNet validation data set.
Please download the validation image data from to edgetpu/test_data/imagenet/
"""
import unittest
from edgetpu.classification.engine import ClassificationEngine
from PIL import Image
from . import test_utils
class TestImagenetClassification(unittest.TestCase):
def _crop_image(self, image, cropping_fraction=0.75):
"""Crops an image in the center.
Args:
image: PIL image object.
cropping_fraction: Fraction of cropped image.
Returns:
Cropped image as PIL image object.
"""
init_width, init_height = image.size
new_width = round(init_width * cropping_fraction)
new_height = round(init_height * cropping_fraction)
width_offset = round((init_width - init_width * cropping_fraction) / 2.0)
height_offset = round((init_height - init_height * cropping_fraction) / 2.0)
return image.crop((width_offset, height_offset,
width_offset + new_width, height_offset + new_height))
def _test_model(self, model_name, expected_top_1=None, expected_top_5=None):
engine = ClassificationEngine(test_utils.test_data_path(model_name))
with open(test_utils.test_data_path('imagenet/val.txt'), 'r') as gt_file:
gt = [line .strip().split(' ') for line in gt_file.readlines()]
top_1_count = 0
top_5_count = 0
print('Running inference for model %s...' % model_name)
for i in range(50000):
label = int(gt[i][1]) + 1
image_name = 'imagenet/ILSVRC2012_val_%s.JPEG' % str(i + 1).zfill(8)
with test_utils.test_image(image_name) as image:
image = self._crop_image(image.convert('RGB'))
prediction = engine.classify_with_image(image, threshold=0.0, top_k=5)
if prediction[0][0] == label:
top_1_count += 1
top_5_count += 1
else:
for j in range(1, len(prediction)):
if prediction[j][0] == label:
top_5_count += 1
top_1_accuracy = top_1_count / 50000.0
top_5_accuracy = top_5_count / 50000.0
print('Top 1 accuracy: %.2f%%' % (top_1_accuracy * 100))
print('Top 5 accuracy: %.2f%%' % (top_5_accuracy * 100))
if expected_top_1 is not None:
self.assertAlmostEqual(top_1_accuracy, expected_top_1, places=4)
if expected_top_5 is not None:
self.assertAlmostEqual(top_5_accuracy, expected_top_5, places=4)
def test_mobilenet_v1(self):
self._test_model('mobilenet_v1_1.0_224_quant_edgetpu.tflite',
expected_top_1=0.6854, expected_top_5=0.8772)
def test_mobilenet_v2(self):
self._test_model('mobilenet_v2_1.0_224_quant_edgetpu.tflite',
expected_top_1=0.6912, expected_top_5=0.8829)
if __name__ == '__main__':
unittest.main()
| 37.696629
| 80
| 0.701937
| 2,443
| 0.728167
| 0
| 0
| 0
| 0
| 0
| 0
| 1,147
| 0.341878
|
226d9e6adcc58d1700424bf4cff15de32eb71005
| 1,781
|
py
|
Python
|
11_testing_best_practices/generate_maze_faster.py
|
krother/maze_run
|
2860198a2af7d05609d043de1b582cc0070aac09
|
[
"MIT"
] | 7
|
2017-05-02T12:23:03.000Z
|
2020-04-07T07:01:52.000Z
|
11_testing_best_practices/generate_maze_faster.py
|
fengshao007P/maze_run
|
2860198a2af7d05609d043de1b582cc0070aac09
|
[
"MIT"
] | null | null | null |
11_testing_best_practices/generate_maze_faster.py
|
fengshao007P/maze_run
|
2860198a2af7d05609d043de1b582cc0070aac09
|
[
"MIT"
] | 21
|
2016-02-26T10:26:16.000Z
|
2021-12-04T23:38:00.000Z
|
# Improved version of the code from chapter 03
# created in chapter 11 to accelerate execution
import random
XMAX, YMAX = 19, 16
def create_grid_string(dots, xsize, ysize):
"""
Creates a grid of size (xx, yy)
with the given positions of dots.
"""
grid = ""
for y in range(ysize):
for x in range(xsize):
grid += "." if (x, y) in dots else "#"
grid += "\n"
return grid
def get_all_dot_positions(xsize, ysize):
"""Returns a list of (x, y) tuples covering all positions in a grid"""
return [(x,y) for x in range(1, xsize-1) for y in range(1, ysize-1)]
def get_neighbors(x, y):
"""Returns a list with the 8 neighbor positions of (x, y)"""
return [
(x, y-1), (x, y+1), (x-1, y), (x+1, y),
(x-1, y-1), (x+1, y-1), (x-1, y+1), (x+1, y+1)
]
def generate_dot_positions(xsize, ysize):
"""Creates positions of dots for a random maze"""
positions = get_all_dot_positions(xsize, ysize)
random.shuffle(positions)
dots = set()
for x, y in positions:
neighbors = get_neighbors(x, y)
free = [nb in dots for nb in neighbors]
if free.count(True) < 5:
dots.add((x, y))
return dots
def create_maze(xsize, ysize):
"""Returns a xsize*ysize maze as a string"""
dots = generate_dot_positions(xsize, ysize)
maze = create_grid_string(dots, xsize, ysize)
return maze
if __name__ == '__main__':
dots = set(((1,1), (1,2), (1,3), (2,2), (3,1), (3,2), (3,3)))
print(create_grid_string(dots, 5, 5))
positions = get_all_dot_positions(5, 5)
print(create_grid_string(positions, 5, 5))
neighbors = get_neighbors(3, 2)
print(create_grid_string(neighbors, 5, 5))
maze = create_maze(12, 7)
print(maze)
| 25.811594
| 74
| 0.601909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 424
| 0.238069
|
226ee0a94d2c674c5419d2b1671a6c420a52ce80
| 98
|
py
|
Python
|
flask-backend/create_database.py
|
amlannandy/OpenMF
|
da5f474bb3002084f3e5bc9ceb18b32efdf34107
|
[
"Apache-2.0"
] | null | null | null |
flask-backend/create_database.py
|
amlannandy/OpenMF
|
da5f474bb3002084f3e5bc9ceb18b32efdf34107
|
[
"Apache-2.0"
] | null | null | null |
flask-backend/create_database.py
|
amlannandy/OpenMF
|
da5f474bb3002084f3e5bc9ceb18b32efdf34107
|
[
"Apache-2.0"
] | null | null | null |
from api.models.models import User
from api import db, create_app
db.create_all(app=create_app())
| 24.5
| 34
| 0.806122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
226f3f6717063fd8afff828ee410784d07c44bf7
| 1,820
|
py
|
Python
|
src/UQpy/Distributions/baseclass/DistributionContinuous1D.py
|
marrov/UQpy
|
b04a267b3080e3d4d38e876547ba0d3b979734f3
|
[
"MIT"
] | 132
|
2018-03-13T13:56:33.000Z
|
2022-03-21T13:59:17.000Z
|
src/UQpy/Distributions/baseclass/DistributionContinuous1D.py
|
marrov/UQpy
|
b04a267b3080e3d4d38e876547ba0d3b979734f3
|
[
"MIT"
] | 140
|
2018-05-21T13:40:01.000Z
|
2022-03-29T14:18:01.000Z
|
src/UQpy/Distributions/baseclass/DistributionContinuous1D.py
|
marrov/UQpy
|
b04a267b3080e3d4d38e876547ba0d3b979734f3
|
[
"MIT"
] | 61
|
2018-05-02T13:40:05.000Z
|
2022-03-06T11:31:21.000Z
|
import numpy as np
import scipy.stats as stats
from UQpy.Distributions.baseclass.Distribution import Distribution
class DistributionContinuous1D(Distribution):
"""
Parent class for univariate continuous probability distributions.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@staticmethod
def _check_x_dimension(x):
"""
Check the dimension of input x - must be an ndarray of shape (npoints,) or (npoints, 1)
"""
x = np.atleast_1d(x)
if len(x.shape) > 2 or (len(x.shape) == 2 and x.shape[1] != 1):
raise ValueError('Wrong dimension in x.')
return x.reshape((-1,))
def _construct_from_scipy(self, scipy_name=stats.rv_continuous):
self.cdf = lambda x: scipy_name.cdf(x=self._check_x_dimension(x), **self.params)
self.pdf = lambda x: scipy_name.pdf(x=self._check_x_dimension(x), **self.params)
self.log_pdf = lambda x: scipy_name.logpdf(x=self._check_x_dimension(x), **self.params)
self.icdf = lambda x: scipy_name.ppf(q=self._check_x_dimension(x), **self.params)
self.moments = lambda moments2return='mvsk': scipy_name.stats(moments=moments2return, **self.params)
self.rvs = lambda nsamples=1, random_state=None: scipy_name.rvs(
size=nsamples, random_state=random_state, **self.params).reshape((nsamples, 1))
def tmp_fit(dist, data):
data = self._check_x_dimension(data)
fixed_params = {}
for key, value in dist.params.items():
if value is not None:
fixed_params['f' + key] = value
params_fitted = scipy_name.fit(data=data, **fixed_params)
return dict(zip(dist.order_params, params_fitted))
self.fit = lambda data: tmp_fit(self, data)
| 45.5
| 108
| 0.647253
| 1,704
| 0.936264
| 0
| 0
| 351
| 0.192857
| 0
| 0
| 224
| 0.123077
|
226fb3b836b4a323bba46bf26d01dbf892dfb882
| 1,666
|
py
|
Python
|
bridge/models/basic/layers.py
|
JTT94/schrodinger_bridge
|
71841f2789c180a23d4b4641f160da5c0288a337
|
[
"MIT"
] | null | null | null |
bridge/models/basic/layers.py
|
JTT94/schrodinger_bridge
|
71841f2789c180a23d4b4641f160da5c0288a337
|
[
"MIT"
] | null | null | null |
bridge/models/basic/layers.py
|
JTT94/schrodinger_bridge
|
71841f2789c180a23d4b4641f160da5c0288a337
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
import torch.nn.functional as F
import math
from functools import partial
class MLP(torch.nn.Module):
def __init__(self, input_dim, layer_widths, activate_final = False, activation_fn=F.relu):
super(MLP, self).__init__()
layers = []
prev_width = input_dim
for layer_width in layer_widths:
layers.append(torch.nn.Linear(prev_width, layer_width))
# # same init for everyone
# torch.nn.init.constant_(layers[-1].weight, 0)
prev_width = layer_width
self.input_dim = input_dim
self.layer_widths = layer_widths
self.layers = torch.nn.ModuleList(layers)
self.activate_final = activate_final
self.activation_fn = activation_fn
def forward(self, x):
for i, layer in enumerate(self.layers[:-1]):
x = self.activation_fn(layer(x))
x = self.layers[-1](x)
if self.activate_final:
x = self.activation_fn(x)
return x
class ConvEncoder(torch.nn.Module):
def __init__(self, hidden_size=16, num_pixels=24, kernel_size=3, in_channels=3, out_channels=3, padding=0, stride=1):
super().__init__()
self.out_dim = ((num_pixels+2*padding-(kernel_size-1) - 1)//stride+1)
self.conv1 = torch.nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding, stride=stride)
self.linear1= torch.nn.Linear(self.out_dim**2*out_channels, hidden_size)
def forward(self, x):
x = self.conv1(x)
x = x.flatten(start_dim=1)
x = self.linear1(x)
out = F.relu(x)
return out
| 34.708333
| 121
| 0.630852
| 1,552
| 0.931573
| 0
| 0
| 0
| 0
| 0
| 0
| 73
| 0.043818
|
2270789e36e09bf77f3225fa068413436f325de3
| 10,920
|
py
|
Python
|
chessbot.py
|
UbiLabsChessbot/tensorflow_chessbot
|
5112d9213d0224dc7acc373a7048167b7e6da6ce
|
[
"MIT"
] | null | null | null |
chessbot.py
|
UbiLabsChessbot/tensorflow_chessbot
|
5112d9213d0224dc7acc373a7048167b7e6da6ce
|
[
"MIT"
] | null | null | null |
chessbot.py
|
UbiLabsChessbot/tensorflow_chessbot
|
5112d9213d0224dc7acc373a7048167b7e6da6ce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Finds submissions with chessboard images in them,
# use a tensorflow convolutional neural network to predict pieces and return
# a lichess analysis link and FEN diagram of chessboard
import praw
import collections
import os
import time
from datetime import datetime
from praw.helpers import submission_stream
import requests
import socket
import re
from helper_functions_chessbot import *
import auth_config # for PRAW
import tensorflow_chessbot # For neural network model
#########################################################
# Setup
# Set up praw
chess_fen_bot = "ChessFenBot"
# Login
r = praw.Reddit(auth_config.USER_AGENT)
# Login old-style due to Reddit politics
r.login(auth_config.USERNAME, auth_config.PASSWORD, disable_warning=True)
# Get accessor to subreddit
subreddit = r.get_subreddit('chess+chessbeginners+AnarchyChess+betterchess')
# How many submissions to read from initially
submission_read_limit = 100
# How long to wait after replying to a post before continuing
reply_wait_time = 10 # minimum seconds to wait between replies, will also rate-limit safely
# Filename containing list of submission ids that
# have already been processed, updated at end of program
processed_filename = "submissions_already_processed.txt"
# Submissions computer vision or prediction failed on
failures_filename = "submission_failures.txt"
# All responses id, fen + certainty
responses_filename = "submission_responses.txt"
# Response message template
message_template = """[◕ _ ◕]^*
I attempted to generate a [chessboard layout]({unaligned_fen_img_link}) from the posted image,
with a certainty of **{certainty:.3f}%**. *{pithy_message}*
-
◇ White to play : [Analysis]({lichess_analysis_w}) | [Editor]({lichess_editor_w})
`{fen_w}`
-
◆ Black to play : [Analysis]({lichess_analysis_b}) | [Editor]({lichess_editor_b})
`{fen_b}`
-
> ▾ Links for when pieces are inverted on the board:
>
> White to play : [Analysis]({inverted_lichess_analysis_w}) | [Editor]({inverted_lichess_editor_w})
> `{inverted_fen_w}`
>
> Black to play : [Analysis]({inverted_lichess_analysis_b}) | [Editor]({inverted_lichess_editor_b})
> `{inverted_fen_b}`
-
---
^(Yes I am a machine learning bot | )
[^(`How I work`)](http://github.com/Elucidation/tensorflow_chessbot 'Must go deeper')
^( | Reply with a corrected FEN to add to my next training dataset)
"""
#########################################################
# ChessBot Message Generation Functions
def isPotentialChessboardTopic(sub):
"""if url is imgur link, or url ends in .png/.jpg/.gif"""
if sub.url == None:
return False
return ('imgur' in sub.url
or any([sub.url.lower().endswith(ending) for ending in ['.png', '.jpg', '.gif']]))
def invert(fen):
return ''.join(reversed(fen))
def generateMessage(fen, certainty, side):
"""Generate response message using FEN, certainty and side for flipping link order"""
vals = {} # Holds template responses
# Things that don't rely on black/white to play
# FEN image link is aligned with screenshot, not side to play
vals['unaligned_fen_img_link'] = 'http://www.fen-to-image.com/image/30/%s.png' % fen
vals['certainty'] = certainty*100.0 # to percentage
vals['pithy_message'] = getPithyMessage(certainty)
if side == 'b':
# Flip FEN if black to play, assumes image is flipped
fen = invert(fen)
inverted_fen = invert(fen)
# Get castling status based on pieces being in initial positions or not
castle_status = getCastlingStatus(fen)
inverted_castle_status = getCastlingStatus(inverted_fen)
# Fill out template and return
vals['fen_w'] = "%s w %s -" % (fen, castle_status)
vals['fen_b'] = "%s b %s -" % (fen, castle_status)
vals['inverted_fen_w'] = "%s w %s -" % (inverted_fen, inverted_castle_status)
vals['inverted_fen_b'] = "%s b %s -" % (inverted_fen, inverted_castle_status)
vals['lichess_analysis_w'] = 'http://www.lichess.org/analysis/%s_w_%s' % (fen, castle_status)
vals['lichess_analysis_b'] = 'http://www.lichess.org/analysis/%s_b_%s' % (fen, castle_status)
vals['lichess_editor_w'] = 'http://www.lichess.org/editor/%s_w_%s' % (fen, castle_status)
vals['lichess_editor_b'] = 'http://www.lichess.org/editor/%s_b_%s' % (fen, castle_status)
vals['inverted_lichess_analysis_w'] = 'http://www.lichess.org/analysis/%s_w_%s' % (inverted_fen, inverted_castle_status)
vals['inverted_lichess_analysis_b'] = 'http://www.lichess.org/analysis/%s_b_%s' % (inverted_fen, inverted_castle_status)
vals['inverted_lichess_editor_w'] = 'http://www.lichess.org/editor/%s_w_%s' % (inverted_fen, inverted_castle_status)
vals['inverted_lichess_editor_b'] = 'http://www.lichess.org/editor/%s_b_%s' % (inverted_fen, inverted_castle_status)
return message_template.format(**vals)
#########################################################
# PRAW Helper Functions
def waitWithComments(sleep_time, segment=60):
"""Sleep for sleep_time seconds, printing to stdout every segment of time"""
print("\t%s - %s seconds to go..." % (datetime.now(), sleep_time))
while sleep_time > segment:
time.sleep(segment) # sleep in increments of 1 minute
sleep_time -= segment
print("\t%s - %s seconds to go..." % (datetime.now(), sleep_time))
time.sleep(sleep_time)
def logInfoPerSubmission(submission, count, count_actual, is_processed=False):
if ((time.time() - logInfoPerSubmission.last) > 120):
print("\n\t---\n\t%s - %d processed submissions, %d read\n" % (datetime.now(), count_actual, count))
logInfoPerSubmission.last = time.time()
is_proc = ''
if is_processed:
is_proc = ' P'
try:
print("#%d Submission(%s%s): %s" % (count, submission.id, is_proc, submission))
except UnicodeDecodeError as e:
print("#%d Submission(%s%s): <ignoring unicode>" % (count, submission.id, is_proc))
logInfoPerSubmission.last = time.time() # 'static' variable
def loadProcessed(processed_filename=processed_filename):
if not os.path.isfile(processed_filename):
print("%s - Starting new processed file" % datetime.now())
return set()
else:
print("Loading existing processed file...")
with open(processed_filename,'r') as f:
return set([x.strip() for x in f.readlines()])
def saveProcessed(already_processed, processed_filename=processed_filename):
with open(processed_filename,'w') as f:
for submission_id in already_processed:
f.write("%s\n" % submission_id)
print("%s - Saved processed ids to file" % datetime.now())
def addSubmissionToFailures(submission, failures_filename=failures_filename):
with open(failures_filename,'a') as f:
f.write("%s : %s | %s\n" % (submission.id, submission.title, submission.url))
print("%s - Saved failure to file" % datetime.now())
def addSubmissionToResponses(submission, fen, certainty, side, responses_filename=responses_filename):
# Reverse fen if it's black to play, assumes board is flipped
if side == 'b':
fen = ''.join(reversed(fen))
with open(responses_filename,'a') as f:
f.write("%s : %s | %s | %s %s %g\n" % (submission.id, submission.title, submission.url, fen, side, certainty))
print("%s - Saved response to file" % datetime.now())
#########################################################
# Main Script
# Track commend ids that have already been processed successfully
# Load list of already processed comment ids
already_processed = loadProcessed()
print("%s - Starting with %d already processed\n==========\n\n" % (datetime.now(), len(already_processed)))
count = 0
count_actual = 0
running = True
# Start up Tensorflow CNN with trained model
predictor = tensorflow_chessbot.ChessboardPredictor()
while running:
# get submission stream
try:
submissions = submission_stream(r, subreddit, limit=submission_read_limit)
# for each submission
for submission in submissions:
count += 1
# print out some debug info
is_processed = submission.id in already_processed
logInfoPerSubmission(submission, count, count_actual, is_processed)
# Skip if already processed
if is_processed:
continue
# check if submission title is a question
if isPotentialChessboardTopic(submission):
# Use CNN to make a prediction
print("\n---\nImage URL: %s" % submission.url)
fen, certainty = predictor.makePrediction(submission.url)
if fen is None:
print("> %s - Couldn't generate FEN, skipping..." % datetime.now())
# update & save list
already_processed.add(submission.id)
saveProcessed(already_processed)
addSubmissionToFailures(submission)
print("\n---\n")
continue
fen = shortenFEN(fen) # ex. '111pq11r' -> '3pq2r'
print("Predicted FEN: %s" % fen)
print("Certainty: %.4f%%" % (certainty*100))
# Get side from title or fen
side = getSideToPlay(submission.title, fen)
# Generate response message
msg = generateMessage(fen, certainty, side)
print("fen: %s\nside: %s\n" % (fen, side))
# respond, keep trying till success
while True:
try:
print("> %s - Responding to %s: %s" % (datetime.now(), submission.id, submission))
# Reply with comment
submission.add_comment(msg)
# update & save list
already_processed.add(submission.id)
saveProcessed(already_processed)
addSubmissionToResponses(submission, fen, certainty, side)
count_actual += 1
print("\n---\n")
# Wait after submitting to not overload
waitWithComments(reply_wait_time)
break
except praw.errors.AlreadySubmitted as e:
print("> %s - Already submitted skipping..." % datetime.now())
break
except praw.errors.RateLimitExceeded as e:
print("> {} - Rate Limit Error for commenting on {}, sleeping for {} before retrying...".format(datetime.now(), submission.id, e.sleep_time))
waitWithComments(e.sleep_time)
# Handle errors
except (socket.error, requests.exceptions.ReadTimeout, requests.packages.urllib3.exceptions.ReadTimeoutError, requests.exceptions.ConnectionError) as e:
print("> %s - Connection error, resetting accessor, waiting 30 and trying again: %s" % (datetime.now(), e))
# saveProcessed(already_processed)
time.sleep(30)
continue
except Exception as e:
print("Unknown Error, continuing after 30:",e)
time.sleep(30)
continue
except KeyboardInterrupt:
print("Exiting...")
running = False
finally:
saveProcessed(already_processed)
print("%s - %d Processed total." % (datetime.now(),len(already_processed)))
print("%s - Program Ended. %d replied / %d read in this session" % (datetime.now(), count_actual, count))
| 36.891892
| 154
| 0.677747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,995
| 0.456999
|
2271553668c1d9c135110d311fde305c56e23bd6
| 1,557
|
py
|
Python
|
Tools/english_word/src/spider.py
|
pynickle/awesome-python-tools
|
e405fb8d9a1127ae7cd5bcbd6481da78f6f1fb07
|
[
"BSD-2-Clause"
] | 21
|
2019-06-02T01:55:14.000Z
|
2022-01-08T22:35:31.000Z
|
Tools/english_word/src/spider.py
|
code-nick-python/awesome-daily-tools
|
e405fb8d9a1127ae7cd5bcbd6481da78f6f1fb07
|
[
"BSD-2-Clause"
] | 3
|
2019-06-02T01:55:17.000Z
|
2019-06-14T12:32:06.000Z
|
Tools/english_word/src/spider.py
|
code-nick-python/awesome-daily-tools
|
e405fb8d9a1127ae7cd5bcbd6481da78f6f1fb07
|
[
"BSD-2-Clause"
] | 16
|
2019-06-23T13:00:04.000Z
|
2021-09-18T06:09:58.000Z
|
import requests
import re
import time
import random
import pprint
import os
headers = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3858.0 Safari/537.36"}
def youdict(threadName, q):
res = []
index = 0
url = q.get(timeout = 2)
index += 1
r = requests.get(url, headers = headers, timeout = 5)
html = str(r.content, encoding="utf-8").replace("\n", "").replace(" ", "").replace('<span class="yd-kw-suffix">[英语单词大全]</span>', "")
words = re.findall('<div class="caption"><h3 style="margin-top: 10px;"><a style="color:#333;" target="_blank" href="/w/.*?">(.*?)</a>[ ]?</h3><p>(.*?)</p></div>', html)
for word in words:
res.append(word)
if index%5 == 0:
time.sleep(3 + random.random())
else:
time.sleep(1 + random.random())
return res
def hujiang(threadName, q):
res = []
index = 0
url = q.get(timeout = 2)
index += 1
r = requests.get(url, headers=headers, timeout=5)
html = str(r.content, encoding="utf-8").replace("\n", "").replace(" ", "").replace('<span class="yd-kw-suffix">[英语单词大全]</span>', "")
words = re.findall('<li class="clearfix"><a href="/ciku/(.*?)/" target="_blank">.*?</a><span>(.*?)</span></li>', html)
for word in words:
res.append(word)
if index%5 == 0:
time.sleep(3 + random.random())
else:
time.sleep(1 + random.random())
return res
if __name__ == "__main__":
main()
| 32.4375
| 173
| 0.552987
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 529
| 0.334598
|
22727c318ff129b6243d715f6523dbfa7a528208
| 769
|
py
|
Python
|
NFCow/malls/migrations/0001_initial.py
|
jojoriveraa/titulacion-NFCOW
|
643f7f2cbe9c68d9343f38d12629720b12e9ce1e
|
[
"Apache-2.0"
] | null | null | null |
NFCow/malls/migrations/0001_initial.py
|
jojoriveraa/titulacion-NFCOW
|
643f7f2cbe9c68d9343f38d12629720b12e9ce1e
|
[
"Apache-2.0"
] | 11
|
2016-01-09T06:27:02.000Z
|
2016-01-10T05:21:05.000Z
|
NFCow/malls/migrations/0001_initial.py
|
jojoriveraa/titulacion-NFCOW
|
643f7f2cbe9c68d9343f38d12629720b12e9ce1e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-23 08:44
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Mall',
fields=[
('id', models.PositiveIntegerField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=255)),
('address', models.CharField(max_length=255)),
('group', models.CharField(max_length=255)),
('image', models.ImageField(upload_to='malls')),
('postcode', models.PositiveIntegerField()),
],
),
]
| 27.464286
| 87
| 0.572172
| 614
| 0.79844
| 0
| 0
| 0
| 0
| 0
| 0
| 124
| 0.161248
|
22749bf06e02c8354fb9677be5d2215d3d9afe0c
| 16,406
|
py
|
Python
|
mmdnn/conversion/examples/tensorflow/extractor.py
|
ferriswym/MMdnn
|
dc204cdba58a6cba079816715ac766d94bd87732
|
[
"MIT"
] | null | null | null |
mmdnn/conversion/examples/tensorflow/extractor.py
|
ferriswym/MMdnn
|
dc204cdba58a6cba079816715ac766d94bd87732
|
[
"MIT"
] | null | null | null |
mmdnn/conversion/examples/tensorflow/extractor.py
|
ferriswym/MMdnn
|
dc204cdba58a6cba079816715ac766d94bd87732
|
[
"MIT"
] | null | null | null |
#----------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#----------------------------------------------------------------------------------------------
from __future__ import absolute_import
import os
import tensorflow as tf
from tensorflow.contrib.slim.nets import vgg
from tensorflow.contrib.slim.nets import inception
from tensorflow.contrib.slim.nets import resnet_v1
from tensorflow.contrib.slim.nets import resnet_v2
from mmdnn.conversion.examples.tensorflow.models import inception_resnet_v2
from mmdnn.conversion.examples.tensorflow.models import mobilenet_v1
from mmdnn.conversion.examples.tensorflow.models import nasnet
from mmdnn.conversion.examples.tensorflow.models.mobilenet import mobilenet_v2
from mmdnn.conversion.examples.tensorflow.models import inception_resnet_v1
from mmdnn.conversion.examples.tensorflow.models import test_rnn
slim = tf.contrib.slim
from mmdnn.conversion.examples.imagenet_test import TestKit
from mmdnn.conversion.examples.extractor import base_extractor
from mmdnn.conversion.common.utils import download_file
# https://github.com/tensorflow/tensorflow/issues/24496
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
class tensorflow_extractor(base_extractor):
MMDNN_BASE_URL = 'http://mmdnn.eastasia.cloudapp.azure.com:89/models/'
architecture_map = {
'vgg16' : {
'url' : 'http://download.tensorflow.org/models/vgg_16_2016_08_28.tar.gz',
'filename' : 'vgg_16.ckpt',
'builder' : lambda : vgg.vgg_16,
'arg_scope' : vgg.vgg_arg_scope,
'input' : lambda : tf.placeholder(name='input', dtype=tf.float32, shape=[None, 224, 224, 3]),
'num_classes' : 1000,
},
'vgg19' : {
'url' : 'http://download.tensorflow.org/models/vgg_19_2016_08_28.tar.gz',
'filename' : 'vgg_19.ckpt',
'builder' : lambda : vgg.vgg_19,
'arg_scope' : vgg.vgg_arg_scope,
'input' : lambda : tf.placeholder(name='input', dtype=tf.float32, shape=[None, 224, 224, 3]),
'num_classes' : 1000,
},
'inception_v1' : {
'url' : 'http://download.tensorflow.org/models/inception_v1_2016_08_28.tar.gz',
'filename' : 'inception_v1.ckpt',
'builder' : lambda : inception.inception_v1,
'arg_scope' : inception.inception_v3_arg_scope,
'input' : lambda : tf.placeholder(name='input', dtype=tf.float32, shape=[None, 224, 224, 3]),
'num_classes' : 1001,
},
'inception_v1_frozen' : {
'url' : 'https://storage.googleapis.com/download.tensorflow.org/models/inception_v1_2016_08_28_frozen.pb.tar.gz',
'filename' : 'inception_v1_2016_08_28_frozen.pb',
'tensor_out' : ['InceptionV1/Logits/Predictions/Reshape_1:0'],
'tensor_in' : ['input:0'],
'input_shape' : [[224, 224, 3]], # input_shape of the elem in tensor_in
'feed_dict' :lambda img: {'input:0':img},
'num_classes' : 1001,
},
'inception_v3' : {
'url' : 'http://download.tensorflow.org/models/inception_v3_2016_08_28.tar.gz',
'filename' : 'inception_v3.ckpt',
'builder' : lambda : inception.inception_v3,
'arg_scope' : inception.inception_v3_arg_scope,
'input' : lambda : tf.placeholder(name='input', dtype=tf.float32, shape=[None, 299, 299, 3]),
'num_classes' : 1001,
},
'inception_v3_frozen' : {
'url' : 'https://storage.googleapis.com/download.tensorflow.org/models/inception_v3_2016_08_28_frozen.pb.tar.gz',
'filename' : 'inception_v3_2016_08_28_frozen.pb',
'tensor_out' : ['InceptionV3/Predictions/Softmax:0'],
'tensor_in' : ['input:0'],
'input_shape' : [[299, 299, 3]], # input_shape of the elem in tensor_in
'feed_dict' :lambda img: {'input:0':img},
'num_classes' : 1001,
},
'resnet_v1_50' : {
'url' : 'http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz',
'filename' : 'resnet_v1_50.ckpt',
'builder' : lambda : resnet_v1.resnet_v1_50,
'arg_scope' : resnet_v2.resnet_arg_scope,
'input' : lambda : tf.placeholder(name='input', dtype=tf.float32, shape=[None, 224, 224, 3]),
'num_classes' : 1000,
},
'resnet_v1_152' : {
'url' : 'http://download.tensorflow.org/models/resnet_v1_152_2016_08_28.tar.gz',
'filename' : 'resnet_v1_152.ckpt',
'builder' : lambda : resnet_v1.resnet_v1_152,
'arg_scope' : resnet_v2.resnet_arg_scope,
'input' : lambda : tf.placeholder(name='input', dtype=tf.float32, shape=[None, 224, 224, 3]),
'num_classes' : 1000,
},
'resnet_v2_50' : {
'url' : 'http://download.tensorflow.org/models/resnet_v2_50_2017_04_14.tar.gz',
'filename' : 'resnet_v2_50.ckpt',
'builder' : lambda : resnet_v2.resnet_v2_50,
'arg_scope' : resnet_v2.resnet_arg_scope,
'input' : lambda : tf.placeholder(name='input', dtype=tf.float32, shape=[None, 299, 299, 3]),
'num_classes' : 1001,
},
'resnet_v2_101' : {
'url' : 'http://download.tensorflow.org/models/resnet_v2_101_2017_04_14.tar.gz',
'filename' : 'resnet_v2_101.ckpt',
'builder' : lambda : resnet_v2.resnet_v2_101,
'arg_scope' : resnet_v2.resnet_arg_scope,
'input' : lambda : tf.placeholder(name='input', dtype=tf.float32, shape=[None, 299, 299, 3]),
'num_classes' : 1001,
},
'resnet_v2_152' : {
'url' : 'http://download.tensorflow.org/models/resnet_v2_152_2017_04_14.tar.gz',
'filename' : 'resnet_v2_152.ckpt',
'builder' : lambda : resnet_v2.resnet_v2_152,
'arg_scope' : resnet_v2.resnet_arg_scope,
'input' : lambda : tf.placeholder(name='input', dtype=tf.float32, shape=[None, 299, 299, 3]),
'num_classes' : 1001,
},
'resnet_v2_200' : {
'url' : 'http://download.tensorflow.org/models/resnet_v2_200_2017_04_14.tar.gz',
'filename' : 'resnet_v2_200.ckpt',
'builder' : lambda : resnet_v2.resnet_v2_200,
'arg_scope' : resnet_v2.resnet_arg_scope,
'input' : lambda : tf.placeholder(name='input', dtype=tf.float32, shape=[None, 299, 299, 3]),
'num_classes' : 1001,
},
'mobilenet_v1_1.0' : {
'url' : 'http://download.tensorflow.org/models/mobilenet_v1_1.0_224_2017_06_14.tar.gz',
'filename' : 'mobilenet_v1_1.0_224.ckpt',
'builder' : lambda : mobilenet_v1.mobilenet_v1,
'arg_scope' : mobilenet_v1.mobilenet_v1_arg_scope,
'input' : lambda : tf.placeholder(name='input', dtype=tf.float32, shape=[None, 224, 224, 3]),
'num_classes' : 1001,
},
'mobilenet_v1_1.0_frozen' : {
'url' : 'https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_1.0_224_frozen.tgz',
'filename' : 'mobilenet_v1_1.0_224/frozen_graph.pb',
'tensor_out' : ['MobilenetV1/Predictions/Softmax:0'],
'tensor_in' : ['input:0'],
'input_shape' : [[224, 224, 3]], # input_shape of the elem in tensor_in
'feed_dict' :lambda img: {'input:0':img},
'num_classes' : 1001,
},
'mobilenet_v2_1.0_224':{
'url' : 'https://storage.googleapis.com/mobilenet_v2/checkpoints/mobilenet_v2_1.0_224.tgz',
'filename' : 'mobilenet_v2_1.0_224.ckpt',
'builder' : lambda : mobilenet_v2.mobilenet,
'arg_scope' : mobilenet_v2.training_scope,
'input' : lambda : tf.placeholder(name='input', dtype=tf.float32, shape=[None, 224, 224, 3]),
'num_classes' : 1001,
},
'inception_resnet_v2' : {
'url' : 'http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz',
'filename' : 'inception_resnet_v2_2016_08_30.ckpt',
'builder' : lambda : inception_resnet_v2.inception_resnet_v2,
'arg_scope' : inception_resnet_v2.inception_resnet_v2_arg_scope,
'input' : lambda : tf.placeholder(name='input', dtype=tf.float32, shape=[None, 299, 299, 3]),
'num_classes' : 1001,
},
'nasnet-a_large' : {
'url' : 'https://storage.googleapis.com/download.tensorflow.org/models/nasnet-a_large_04_10_2017.tar.gz',
'filename' : 'model.ckpt',
'builder' : lambda : nasnet.build_nasnet_large,
'arg_scope' : nasnet.nasnet_large_arg_scope,
'input' : lambda : tf.placeholder(name='input', dtype=tf.float32, shape=[None, 331, 331, 3]),
'num_classes' : 1001,
},
'facenet' : {
'url' : MMDNN_BASE_URL + 'tensorflow/facenet/20180408-102900.zip',
'filename' : '20180408-102900/model-20180408-102900.ckpt-90',
'builder' : lambda : inception_resnet_v1.inception_resnet_v1,
'arg_scope' : inception_resnet_v1.inception_resnet_v1_arg_scope,
'input' : lambda : tf.placeholder(name='input', dtype=tf.float32, shape=[None, 160, 160, 3]),
'feed_dict' : lambda img: {'input:0':img,'phase_train:0':False},
'num_classes' : 0,
},
'facenet_frozen' : {
'url' : MMDNN_BASE_URL + 'tensorflow/facenet/20180408-102900.zip',
'filename' : '20180408-102900/20180408-102900.pb',
'tensor_out' : ['InceptionResnetV1/Logits/AvgPool_1a_8x8/AvgPool:0'],
'tensor_in' : ['input:0','phase_train:0'],
'input_shape' : [[160, 160, 3],1], # input_shape of the elem in tensor_in
'feed_dict' : lambda img: {'input:0':img,'phase_train:0':False},
'num_classes' : 0,
},
'rnn_lstm_gru_stacked': {
'url' : MMDNN_BASE_URL + 'tensorflow/tf_rnn/tf_rnn.zip', # Note this is just a model used for test, not a standard rnn model.
'filename' :'tf_rnn/tf_lstm_gru_stacked.ckpt',
'builder' :lambda: test_rnn.create_symbol,
'arg_scope' :test_rnn.dummy_arg_scope,
'input' :lambda: tf.placeholder(name='input', dtype=tf.int32, shape=[None, 150]),
'feed_dict' :lambda x:{'input:0': x},
'num_classes' : 0
}
}
@classmethod
def handle_checkpoint(cls, architecture, path):
with slim.arg_scope(cls.architecture_map[architecture]['arg_scope']()):
data_input = cls.architecture_map[architecture]['input']()
logits, endpoints = cls.architecture_map[architecture]['builder']()(
data_input,
num_classes=cls.architecture_map[architecture]['num_classes'],
is_training=False)
if logits.op.type == 'Squeeze':
labels = tf.identity(logits, name='MMdnn_Output')
else:
labels = tf.squeeze(logits, name='MMdnn_Output')
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
saver = tf.train.Saver()
saver.restore(sess, path + cls.architecture_map[architecture]['filename'])
save_path = saver.save(sess, path + "imagenet_{}.ckpt".format(architecture))
print("Model saved in file: %s" % save_path)
import tensorflow.contrib.keras as keras
keras.backend.clear_session()
@classmethod
def handle_frozen_graph(cls, architecture, path):
return
# raise NotImplementedError()
@classmethod
def get_frozen_para(cls, architecture):
frozenname = architecture + '_frozen'
tensor_in = list(map(lambda x:x.split(':')[0], cls.architecture_map[frozenname]['tensor_in']))
tensor_out = list(map(lambda x:x.split(':')[0], cls.architecture_map[frozenname]['tensor_out']))
return cls.architecture_map[frozenname]['filename'], cls.architecture_map[frozenname]['input_shape'], tensor_in, tensor_out
@classmethod
def download(cls, architecture, path="./"):
if cls.sanity_check(architecture):
architecture_file = download_file(cls.architecture_map[architecture]['url'], directory=path, auto_unzip=True)
if not architecture_file:
return None
tf.reset_default_graph()
if 'ckpt' in cls.architecture_map[architecture]['filename']:
cls.handle_checkpoint(architecture, path)
elif cls.architecture_map[architecture]['filename'].endswith('pb'):
cls.handle_frozen_graph(architecture, path)
else:
raise ValueError("Unknown file name [{}].".format(cls.architecture_map[architecture]['filename']))
return architecture_file
else:
return None
@classmethod
def inference(cls, architecture, files, path, test_input_path, is_frozen=False):
if is_frozen:
architecture_ = architecture + "_frozen"
else:
architecture_ = architecture
if cls.download(architecture_, path):
import numpy as np
if 'rnn' not in architecture_:
func = TestKit.preprocess_func['tensorflow'][architecture]
img = func(test_input_path)
img = np.expand_dims(img, axis=0)
input_data = img
else:
input_data = np.load(test_input_path)
if is_frozen:
tf_model_path = cls.architecture_map[architecture_]['filename']
with open(path + tf_model_path, 'rb') as f:
serialized = f.read()
tf.reset_default_graph()
original_gdef = tf.GraphDef()
original_gdef.ParseFromString(serialized)
tf_output_name = cls.architecture_map[architecture_]['tensor_out']
tf_input_name = cls.architecture_map[architecture_]['tensor_in']
feed_dict = cls.architecture_map[architecture_]['feed_dict']
with tf.Graph().as_default() as g:
tf.import_graph_def(original_gdef, name='')
with tf.Session(graph = g, config=config) as sess:
tf_out = sess.run(tf_output_name[0], feed_dict=feed_dict(input_data)) # temporarily think the num of out nodes is one
predict = np.squeeze(tf_out)
return predict
else:
with slim.arg_scope(cls.architecture_map[architecture]['arg_scope']()):
data_input = cls.architecture_map[architecture]['input']()
logits, endpoints = cls.architecture_map[architecture]['builder']()(
data_input,
num_classes=cls.architecture_map[architecture]['num_classes'],
is_training=False)
labels = tf.squeeze(logits)
init = tf.global_variables_initializer()
with tf.Session(config=config) as sess:
sess.run(init)
saver = tf.train.Saver()
saver.restore(sess, path + cls.architecture_map[architecture]['filename'])
predict = sess.run(logits, feed_dict = {data_input : input_data})
import tensorflow.contrib.keras as keras
keras.backend.clear_session()
predict = np.squeeze(predict)
return predict
else:
return None
| 50.018293
| 146
| 0.581007
| 15,028
| 0.916006
| 0
| 0
| 5,199
| 0.316896
| 0
| 0
| 5,022
| 0.306108
|
22752bc484df5df4799f2011d63b6f5871537908
| 2,546
|
py
|
Python
|
nostrint/command_line.py
|
zevtyardt/no-strint
|
47583d55e3c4cd12f00f46902d2fd7d5138c3275
|
[
"MIT"
] | 13
|
2019-03-13T04:14:45.000Z
|
2020-04-05T09:13:21.000Z
|
nostrint/command_line.py
|
zevtyardt/no-strint
|
47583d55e3c4cd12f00f46902d2fd7d5138c3275
|
[
"MIT"
] | null | null | null |
nostrint/command_line.py
|
zevtyardt/no-strint
|
47583d55e3c4cd12f00f46902d2fd7d5138c3275
|
[
"MIT"
] | 6
|
2019-03-22T04:48:59.000Z
|
2020-08-07T17:09:20.000Z
|
from redat import __version__
import argparse as _argparse
import sys as _sys
def CLI():
_parser = _argparse.ArgumentParser(
description='simple str & int obfuscator (c) zvtyrdt.id',
formatter_class=_argparse.RawTextHelpFormatter,
version=__version__)
_parser.add_argument('txt', metavar='STR | INT', nargs='*', help='strings or integers')
_parser.add_argument('-i', '--infile', metavar='FILE', help='specify the file name to process')
_parser.add_argument('-o', '--outfile', metavar='FILE', help='save the results as a file')
_parser.add_argument('-w', '--with-space', action='store_true', help='generate output strings with spaces')
_parser.add_argument('--obf', action='store_true', help='same as --rand-if, --remove-blanks, --indent\n--ignore-comments, --only-variable and --only-strint\n* default indentation is 1...')
_parser.add_argument('-V', '--only-variable', action='store_true', help='obfuscate all variables in the source code (beta)')
_parser.add_argument('-O', '--only-strint', action='store_true', help='just obfuscate strings and integers')
_parser.add_argument('-e', '--encode', action='store_true', help='convert string to integer before obfuscate')
_parser.add_argument('-s', '--stdout', action='store_true', help='add print function to output (string only)')
_parser.add_argument('-x', '--exec', action='store_true', dest='_exec', help='make the output an executable script')
_obfuscate = _parser.add_argument_group('additional', description='if the --only-string option is called')
_obfuscate.add_argument('-r', '--rand-if', action='store_true', help='add a random if statement to the source code')
_obfuscate.add_argument('-n', '--indent', help='Indentation to use', type=int)
_obfuscate.add_argument('-b', '--remove-blanks', action='store_true', help='remove blank lines, instead of obfuscate')
_obfuscate.add_argument('-c', '--ignore-comments', action='store_true', help='remove first block of comments as well')
_verbosity = _parser.add_argument_group('verbosity / simulation')
_verbosity.add_argument('-S', '--serialization', action='store_true', help='serialization of object data after being obscured')
_verbosity.add_argument('-E', '--eval', action='store_true', dest='_eval', help='try running output (experimental)')
_verbosity.add_argument('--verbose', action='store_true', help='verbose (debug)')
_verbosity.add_argument('--debug', action='store_true', help='enable debug mode')
return _parser
| 72.742857
| 193
| 0.710919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,328
| 0.521603
|
22754cccad56e7c435e32fdb50e3fc9c09afbc92
| 15,435
|
py
|
Python
|
simple_lib.py
|
rcmorehead/simplanets
|
3d9b3d1273a4f1a32ce656bdf5e9d6c6c38e3f7b
|
[
"MIT"
] | null | null | null |
simple_lib.py
|
rcmorehead/simplanets
|
3d9b3d1273a4f1a32ce656bdf5e9d6c6c38e3f7b
|
[
"MIT"
] | null | null | null |
simple_lib.py
|
rcmorehead/simplanets
|
3d9b3d1273a4f1a32ce656bdf5e9d6c6c38e3f7b
|
[
"MIT"
] | null | null | null |
"""
Useful classes and functions for SIMPLE.
"""
import numpy as np
import warnings
import math
from scipy import integrate
r_sun_au = 0.004649
r_earth_r_sun = 0.009155
day_hrs = 24.0
#@profile
def impact_parameter(a, e, i, w, r_star):
"""
Compute the impact parameter at for a transiting planet.
Parameters
----------
a : int, float or numpy array
Semimajor axis of planet's orbit in AU
e : int, float or numpy array
Eccentricity of planet. WARNING! This function breaks down at
high eccentricity (>> 0.9), so be careful!
i : int, float or numpy array
Inclination of planet in degrees. 90 degrees is edge-on.
w : int, float or numpy array
Longitude of ascending node defined with respect to sky-plane.
r_star : int, float or numpy array
Radius of star in solar radii.
Returns
-------
b : float or numpy array
The impact parameter, ie transit latitude in units of stellar radius.
Examples
--------
>>> impact_parameter(1, 0, 90, 0, 1)
1.3171077641937547e-14
>>> a = np.linspace(.1, 1.5, 3)
>>> e = np.linspace(0, .9, 3)
>>> i = np.linspace(89, 91, 3)
>>> w = np.linspace(0, 360, 3)
>>> r_star = np.linspace(0.1, 10, 3)
>>> impact_parameter(a, e, i, w, r_star)
array([ 3.75401300e+00, 1.66398961e-15, 1.06989371e-01])
Notes
-----
Using Eqn. (7), Chap. 4, Page 56 of Exoplanets, edited by S. Seager.
Tucson, AZ: University of Arizona Press, 2011, 526 pp.
ISBN 978-0-8165-2945-2.
"""
return abs(a/(r_star * r_sun_au) * np.cos(np.radians(i)) *
(1 - e**2) / (1 + e * np.sin(np.radians(w))))
#@profile
def inclination(fund_plane, mutual_inc, node):
"""
Compute the inclination of a planet.
Uses the law a spherical cosines to compute the sky plane of a orbit
given a reference plane inclination, angle from reference plane (ie mutual
inclination) and a nodal angle.
Parameters
----------
fund_plane: int, float or numpy array
Inclination of of the fundamental plane of the system in degrees with
respect to the sky plane 90 degrees is edge-on.
mutual_inc : int, float or numpy array
Angle in degrees of the orbital plane of the planet with respect to the
fundamental plane of the system.
node : int, float or numpy array
Rotation in degrees of the planet's orbit about the perpendicular of
the reference plane. I.e. the longitude of the node with respect to the
reference plane.
Returns
-------
i : float or numpy array
The inclination of the planet's orbit with respect to the sky plane.
Examples
--------
>>> inclination(90, 3, 0)
87.0
>>> fun_i = np.linspace(80, 110, 3)
>>> mi = np.linspace(0, 10, 3)
>>> node = np.linspace(30,100,3)
>>> inclination(fun_i, mi, node)
array([ 80. , 92.87347869, 111.41738591])
Notes
-----
See eqn. () in
"""
fund_plane = np.radians(fund_plane)
mutual_inc = np.radians(mutual_inc)
node = np.radians(node)
return np.degrees(np.arccos(np.cos(fund_plane) * np.cos(mutual_inc) +
np.sin(fund_plane) * np.sin(mutual_inc) * np.cos(node)))
#@profile
def semimajor_axis(period, mass):
"""
Compute the semimajor axis of an object.
This is a simple implementation of the general form Kepler's Third law.
Parameters
----------
period : int, float or numpy array
The orbital period of the orbiting body in units of days.
mass : int, float or array-like
The mass of the central body (or mass sum) in units of solar mass.
Returns
-------
a : float or numpy array
The semimajor axis in AU.
Examples
--------
>>> semimajor_axis(365.256363,1.00)
0.999985270598628
>>> semimajor_axis(np.linspace(1, 1000, 5),np.linspace(0.08, 4, 5))
array([ 0.00843254, 0.7934587 , 1.56461631, 2.33561574, 3.10657426])
"""
return (((2.959E-4*mass)/(4*np.pi**2))*period**2.0) ** (1.0/3.0)
#@profile
def transit_depth(r_star, r_planet):
"""
One-line description
Full description
Parameters
----------
Returns
-------
Examples
--------
"""
return ((r_planet * r_earth_r_sun)/r_star)**2 * 1e6
#@profile
def transit_duration(p, a, e, i, w, b, r_star, r_planet):
"""
Compute the full (Q1-Q4) transit duration.
Full description
Parameters
----------
p : int, float or numpy array
Period of planet orbit in days
a : int, float or numpy array
Semimajor axis of planet's orbit in AU
e : int, float or numpy array
Eccentricity of planet. WARNING! This function breaks down at
high eccentricity (>> 0.9), so be careful!
i : int, float or numpy array
Inclination of planet in degrees. 90 degrees is edge-on.
w : int, float or numpy array
Longitude of ascending node defined with respect to sky-plane.
b : int, float or numpy array
Impact parameter of planet.
r_star : int, float or numpy array
Radius of star in solar radii.
r_planet : int, float or numpy array
Radius of planet in Earth radii
Returns
-------
T : float or numpy array
The Q1-Q4 (full) transit duration of the planet in hours.
Examples
--------
Notes
-----
Using Eqns. (15) and (16), Chap. 4, Page 58 of Exoplanets, edited by S.
Seager. Tucson, AZ: University of Arizona Press, 2011, 526 pp.
ISBN 978-0-8165-2945-2.
"""
#TODO Make this robust against b > 1
#warnings.simplefilter("always")
#print "pars", p, a, e, i, w, b, r_star, r_planet
#print ""
#print (1 - (r_planet * r_earth_r_sun) / r_star)**2 - b**2
#print (1 - e**2)
#print ""
duration = np.where(e < 1.0, (p / np.pi *
np.arcsin((r_star * r_sun_au) / a * 1 / np.sin(np.radians(i)) *
np.sqrt((1 - (r_planet * r_earth_r_sun) / r_star)**2
- b**2)) *
1 / (1 + e*np.sin(np.radians(w))) * np.sqrt(1 - e**2)) * day_hrs, 0)
return duration
#@profile
def snr(catalog):
"""
Calculate Signal to Noise ratio for a planet transit
Full description
Parameters
----------
Returns
-------
Examples
--------
"""
return catalog['depth']/catalog['cdpp6'] * np.sqrt((catalog['days_obs'] /
catalog['period']) *
catalog['T']/6.0)
#@profile
def xi(catalog):
"""
One-line description
Full description
Parameters
----------
Returns
-------
Examples
--------
"""
catalog.sort(order=['ktc_kepler_id', 'period'])
p_in = np.roll(catalog['period'], 1)
t_in = np.roll(catalog['T'], 1)
kic_id = np.roll(catalog['ktc_kepler_id'], 1)
idx = np.where(catalog['ktc_kepler_id'] == kic_id)
P_ratio = catalog['period'][idx]/p_in[idx]
D_ratio = t_in[idx]/catalog['T'][idx]
#idx = np.where(P_ratio >= 1.0)
#print P_ratio
logxi = np.log10(D_ratio * P_ratio**(1./3.))
if logxi.size < 1:
xi_fraction = 0.0
else:
xi_fraction = logxi[logxi >= 0.0].size/float(logxi.size)
return logxi, xi_fraction
#@profile
def multi_count(catalog, stars):
"""
One-line description
Full description
Parameters
----------
Returns
-------
Examples
--------
"""
count = np.zeros(stars['ktc_kepler_id'].size)
bincount = np.bincount(catalog['ktc_kepler_id'])
bincount = bincount[bincount > 0]
count[:bincount.size] = bincount
return count
#@profile
def multies_only(catalog):
unq, unq_idx, unq_cnt = np.unique(catalog['ktc_kepler_id'],
return_inverse=True,
return_counts=True)
cnt_mask = unq_cnt > 1
cnt_idx, = np.nonzero(cnt_mask)
idx_mask = np.in1d(unq_idx, cnt_idx)
return catalog[idx_mask]
def duration_anomaly(catalog):
"""
Returns T/T_nu where T is the transit duration and T_nu is the
duration for a e = 0, b = 0 transit.
Full description
Parameters
----------
Returns
-------
Examples
--------
"""
catalog['T_nu'] = (catalog['T'] /
((catalog['radius'] * r_sun_au * catalog['period'])
/(np.pi * catalog['a']) * day_hrs))
return catalog
#@profile
def normed_duration(catalog):
"""
One-line description
Full description
Parameters
----------
Returns
-------
Examples
--------
"""
return (catalog['T']/day_hrs)/(catalog['period'])**(1/3.0)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in np.arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(np.float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / \
(Bj * (N - Bj) - N * lj / 4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in np.arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
A2 : float
Normalized k-sample Anderson-Darling test statistic.
critical : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
logp : float
The log (ln) of an approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
h = (1. / np.arange(1, N)).sum()
H = (1. / n).sum()
g = 0
for l in np.arange(1, N-1):
inner = np.array([1. / ((N - l) * m) for m in np.arange(l+1, N)])
g += inner.sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
return A2
def hellinger_funct(x,P,Q):
"""
P,Q should be numpy stats gkde objects
"""
return np.sqrt(P(x) * Q(x))
def hellinger_cont(P,Q):
"""
P,Q should be numpy stats gkde objects
F should be the hellinger_funct method
"""
return 1 - integrate.quad(hellinger_funct, -np.inf, np.inf, args=(P,Q))[0]
def hellinger_disc(P,Q):
"""
P,Q should be numpy histogram objects that have density=True
"""
if P[0].size == Q[0].size:
pass
else:
if P[0].size > Q[0].size:
Q[0].resize(P[0].size)
else:
P[0].resize(Q[0].size)
return 1 - np.sum(np.sqrt(P[0]*Q[0]))
| 27.464413
| 94
| 0.579981
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9,866
| 0.639197
|
2275883293755489ec49ada67afba2a65cceb970
| 179
|
py
|
Python
|
y10m/join.lines.py
|
goodagood/story
|
99dd959f4be44070144fe87313cf51595d928a11
|
[
"Apache-2.0"
] | 3
|
2019-12-03T02:08:55.000Z
|
2021-05-30T14:02:21.000Z
|
y10m/join.lines.py
|
goodagood/story
|
99dd959f4be44070144fe87313cf51595d928a11
|
[
"Apache-2.0"
] | null | null | null |
y10m/join.lines.py
|
goodagood/story
|
99dd959f4be44070144fe87313cf51595d928a11
|
[
"Apache-2.0"
] | 1
|
2020-08-07T23:09:45.000Z
|
2020-08-07T23:09:45.000Z
|
#inputFile = 'sand.407'
inputFile = 'sand.407'
outputFile= 'sand.out'
def joinLine():
pass
with open(inputFile) as OF:
lines = OF.readlines()
print(lines[0:3])
| 11.1875
| 27
| 0.631285
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 43
| 0.240223
|
227598bb20ab74c029eb76f22348999ce40f32c0
| 718
|
py
|
Python
|
web_programming/fetch_github_info.py
|
JB1959/Python
|
b6ca263983933c3ecc06ed0083dd11b6faf870c8
|
[
"MIT"
] | 14
|
2020-10-03T05:43:48.000Z
|
2021-11-01T21:02:26.000Z
|
web_programming/fetch_github_info.py
|
JB1959/Python
|
b6ca263983933c3ecc06ed0083dd11b6faf870c8
|
[
"MIT"
] | 3
|
2020-06-08T07:03:15.000Z
|
2020-06-08T08:41:22.000Z
|
web_programming/fetch_github_info.py
|
JB1959/Python
|
b6ca263983933c3ecc06ed0083dd11b6faf870c8
|
[
"MIT"
] | 12
|
2020-10-03T05:44:19.000Z
|
2022-01-16T05:37:54.000Z
|
#!/usr/bin/env python3
"""
Created by sarathkaul on 14/11/19
Basic authentication using an API password is deprecated and will soon no longer work.
Visit https://developer.github.com/changes/2020-02-14-deprecating-password-auth
for more information around suggested workarounds and removal dates.
"""
import requests
_GITHUB_API = "https://api.github.com/user"
def fetch_github_info(auth_user: str, auth_pass: str) -> dict:
"""
Fetch GitHub info of a user using the requests module
"""
return requests.get(_GITHUB_API, auth=(auth_user, auth_pass)).json()
if __name__ == "__main__":
for key, value in fetch_github_info("<USER NAME>", "<PASSWORD>").items():
print(f"{key}: {value}")
| 26.592593
| 86
| 0.71727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 450
| 0.626741
|
2275d1ae20d552ba2f46265e141e463daa5307b3
| 1,362
|
py
|
Python
|
ACM ICPC/Sorting/Merge Sort/Python/Merge_Sort.py
|
shreejitverma/GeeksforGeeks
|
d7bcb166369fffa9a031a258e925b6aff8d44e6c
|
[
"MIT"
] | 2
|
2022-02-18T05:14:28.000Z
|
2022-03-08T07:00:08.000Z
|
ACM ICPC/Sorting/Merge Sort/Python/Merge_Sort.py
|
shivaniverma1/Competitive-Programming-1
|
d7bcb166369fffa9a031a258e925b6aff8d44e6c
|
[
"MIT"
] | 6
|
2022-01-13T04:31:04.000Z
|
2022-03-12T01:06:16.000Z
|
ACM ICPC/Sorting/Merge Sort/Python/Merge_Sort.py
|
shivaniverma1/Competitive-Programming-1
|
d7bcb166369fffa9a031a258e925b6aff8d44e6c
|
[
"MIT"
] | 2
|
2022-02-14T19:53:53.000Z
|
2022-02-18T05:14:30.000Z
|
class MergeSort:
def __init__(self, lst):
self.lst = lst
def mergeSort(self, a):
midPoint = len(a) // 2
if a[len(a) - 1] < a[0]:
left = self.mergeSort(a[:midPoint])
right = self.mergeSort(a[midPoint:])
return self.merge(left, right)
else:
return a
def merge(self, left, right):
output = list()
leftCount, rightCount = 0, 0
while leftCount < len(left) or rightCount < len(right):
if leftCount < len(left) and rightCount < len(right):
if left[leftCount] < right[rightCount]:
output.append(left[leftCount])
leftCount += 1
else:
output.append(right[rightCount])
rightCount += 1
if leftCount == len(left) and rightCount < right(right):
output.append(right[rightCount])
rightCount += 1
elif leftCount < len(left) and rightCount == len(right):
output.append(left[leftCount])
leftCount += 1
return output
def sort(self):
temp = self.mergeSort(self.lst)
self.lst = temp
def show(self):
return self.lst
if __name__ == "__main__":
i = MergeSort([5, 4, 3, 2, 1])
i.sort()
print(i.show())
| 30.266667
| 68
| 0.509545
| 1,264
| 0.928047
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.007342
|
2277a209f052632755ba80cff0004cf66a4c0551
| 3,952
|
py
|
Python
|
nightly.py
|
insolar/insolar-jepsen
|
f95e05fdf0b3d28756f60de9aef1b8c44ef0d030
|
[
"Apache-2.0"
] | 6
|
2019-03-26T10:02:54.000Z
|
2019-09-13T15:31:39.000Z
|
nightly.py
|
insolar/insolar-jepsen
|
f95e05fdf0b3d28756f60de9aef1b8c44ef0d030
|
[
"Apache-2.0"
] | 17
|
2019-06-04T10:55:42.000Z
|
2020-03-10T09:22:52.000Z
|
nightly.py
|
insolar/insolar-jepsen
|
f95e05fdf0b3d28756f60de9aef1b8c44ef0d030
|
[
"Apache-2.0"
] | 3
|
2019-11-22T10:41:00.000Z
|
2021-02-18T12:03:38.000Z
|
#!/usr/bin/env python3
# vim: set ai et ts=4 sw=4:
import os
import subprocess
import argparse
import time
import calendar
import re
def run(cmd):
code = subprocess.call(['/bin/bash', '-o', 'pipefail', '-c', cmd])
if code != 0:
raise RuntimeError("Command `%s` returned non-zero status: %d" %
(cmd, code))
def get_output(cmd):
data = subprocess.check_output(cmd, shell=True)
data = data.decode('utf-8').strip()
return data
parser = argparse.ArgumentParser(
description='Run nightly Insolar Jepsen-like tests')
parser.add_argument(
'-b', '--branch', metavar='B', type=str, default='master',
help='git branch name (default: master)')
parser.add_argument(
'-r', '--repeat', metavar='N', type=int, default=100,
help='number of times to repeat tests (default: 100)')
parser.add_argument(
'-c', '--channel', metavar='C', type=str, default='#dev-backend',
help='slack channel (default: #dev-backend)')
parser.add_argument(
'-e', '--emoji', metavar='E', type=str, default='aphyr',
help='message emoji (default: aphyr)')
parser.add_argument(
'-s', '--slack', metavar='H', type=str, required=True,
help='slack hook string (it looks like base64 string)')
parser.add_argument(
'-l', '--logdir', metavar='DIR', type=str, required=True,
help='path to the directory where logfiles will be saved')
parser.add_argument(
'-u', '--url', metavar='URL', type=str, required=True,
help='URL where saved logfiles will be accessible')
args = parser.parse_args()
tests_passed = False
date = "FAILED_TO_GET_DATE"
try:
date = get_output('date +%Y%m%d%H%M00')
except Exception as e:
print("ERROR:")
print(str(e))
logfile_name = 'jepsen-' + date + '.txt'
logfile_fullname = args.logdir + '/' + logfile_name
try:
run('echo "=== BUILDING BRANCH '+args.branch +
' ===" | tee -a '+logfile_fullname)
run('./build-docker.py '+args.branch+' 2>&1 | tee -a '+logfile_fullname)
run('echo "==== RUNNING TESTS '+str(args.repeat) +
' TIMES ===" | tee -a '+logfile_fullname)
run('./run-test.py -i insolar-jepsen:latest -r ' +
str(args.repeat)+' 2>&1 | tee -a '+logfile_fullname)
tests_passed = True
except Exception as e:
print("ERROR:")
print(str(e))
podlogs_name = 'jepsen-' + date + '.tgz'
podlogs_fullname = args.logdir + '/' + podlogs_name
try:
run('echo "=== AGGREGATING LOGS TO ' +
podlogs_fullname+' ===" | tee -a '+logfile_fullname)
run('./aggregate-logs.py /tmp/jepsen-'+date)
run('gunzip /tmp/jepsen-'+date+'/*/*.log.gz || true')
run('tar -cvzf '+podlogs_fullname+' /tmp/jepsen-'+date)
run('rm -r /tmp/jepsen-'+date)
run('echo "=== CLEANING UP '+args.logdir+' ===" | tee -a '+logfile_fullname)
now = int(time.time())
os.chdir(args.logdir)
for fname in os.listdir("."):
m = re.search("jepsen-(\d{4}\d{2}\d{2})", fname)
if m is None:
run(' echo "File: ' + fname + ' - skipped" | tee -a '+logfile_fullname)
continue
ftime = calendar.timegm(time.strptime(m.group(1), "%Y%m%d"))
ndays = int((now - ftime) / (60 * 60 * 24))
delete = ndays > 15
run(' echo "File: ' + fname + ', ndays: ' + str(ndays) +
', delete: ' + str(delete) + '" | tee -a '+logfile_fullname)
if delete:
os.unlink(fname)
except Exception as e:
print("ERROR:")
print(str(e))
print("Test passed: "+str(tests_passed))
message = 'PASSED' if tests_passed else 'FAILED'
message = 'Nightly Jepsen-like tests '+message +\
'. Log: '+args.url+'/'+logfile_name +\
' Pod logs: '+args.url+'/'+podlogs_name
cmd = 'curl -X POST --data-urlencode \'payload={"channel": "'+args.channel +\
'", "username": "aphyr", "text": "'+message +\
'", "icon_emoji": ":'+args.emoji +\
':"}\' https://hooks.slack.com/services/'+args.slack
print("EXECUTING: "+cmd)
run(cmd)
| 34.365217
| 83
| 0.605263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,499
| 0.379302
|
2277e005db07cac1472613b25b5759c8831551c6
| 7,195
|
py
|
Python
|
ecart/ecart.py
|
micael-grilo/E-Cart
|
76e86b4c7ea5bd2becda23ef8c69470c86630c5e
|
[
"MIT"
] | null | null | null |
ecart/ecart.py
|
micael-grilo/E-Cart
|
76e86b4c7ea5bd2becda23ef8c69470c86630c5e
|
[
"MIT"
] | null | null | null |
ecart/ecart.py
|
micael-grilo/E-Cart
|
76e86b4c7ea5bd2becda23ef8c69470c86630c5e
|
[
"MIT"
] | null | null | null |
import redis
import copy
from functools import wraps
from .exception import ErrorMessage
from .decorators import raise_exception
from .serializer import Serializer
TTL = 604800
class Cart(object):
"""
Main Class for Cart, contains all functionality
"""
@raise_exception("E-Cart can't be initialized due to Error: ")
def __init__(self, user_id, redis_connection, ttl=TTL):
"""
Constructor for the class, initializes user_id and checks whether
the users' cart exists or not.
"""
self.__redis_user_hash_token = "E-CART"
self.user_id = user_id
self.user_redis_key = self.__get_user_redis_key(user_id)
self.redis_connection = redis_connection
self.ttl = ttl
self.user_cart_exists = self.cart_exists(user_id)
self.destroy = self.__del__
@raise_exception("ttl can't be set due to Error: ")
def set_ttl(self):
"""
Update the ttl of the cart
"""
return self.redis_connection.expire(self.user_redis_key, self.ttl)
@raise_exception("ttl can't be obtained due to Error: ")
def get_ttl(self):
ttl = self.redis_connection.ttl(self.user_redis_key)
if ttl:
return ttl
else:
raise ErrorMessage("User Cart does not exists")
def __product_dict(self, unit_cost, quantity, extra_data_dict={}):
"""
Returns the dictionary for a product, with the argument values.
"""
product_dict = {
"unit_cost": unit_cost,
"quantity": quantity
}
product_dict.update(extra_data_dict)
return product_dict
@raise_exception("Cart exists can't return a value due to Error: ")
def cart_exists(self, user_id):
"""
Confirm user's cart hash in Redis
"""
return self.redis_connection.exists(self.user_redis_key)
def __get_user_redis_key_prefix(self):
"""
Generate the prefix for the user's redis key.
"""
return ":".join([self.__redis_user_hash_token, "USER_ID"])
def __get_user_redis_key(self, user_id):
"""
Generates the name of the Hash used for storing User cart in Redis
"""
if user_id:
return self.__get_user_redis_key_prefix() + ":"+str(user_id)
else:
raise ErrorMessage("user_id can't be null")
@raise_exception("Redis user key can't be obtained due to Error: ")
def get_user_redis_key(self):
"""
Returns the name of the Hash used for storing User cart in Redis
"""
return self.user_redis_key
@raise_exception("Product can't be added to the User cart due to Error: ")
def add(self, product_id, unit_cost, quantity=1, **extra_data_dict):
"""
Returns True if the addition of the product of the given product_id and unit_cost with given quantity
is succesful else False.
Can also add extra details in the form of dictionary.
"""
product_dict = self.__product_dict(
unit_cost, quantity, extra_data_dict)
self.redis_connection.hset(
self.user_redis_key, product_id, Serializer.dumps(product_dict))
self.user_cart_exists = self.cart_exists(self.user_id)
self.set_ttl()
@raise_exception("Product can't be obtained due to Error: ")
def get_product(self, product_id):
"""
Returns the cart details as a Dictionary for the given product_id
"""
if self.user_cart_exists:
product_string = self.redis_connection.hget(
self.user_redis_key, product_id)
if product_string:
return Serializer.loads(product_string)
else:
return {}
else:
raise ErrorMessage("The user cart is Empty")
@raise_exception("contains can't function due to Error: ")
def contains(self, product_id):
"""
Checks whether the given product exists in the cart
"""
return self.redis_connection.hexists(self.user_redis_key, product_id)
def __get_raw_cart(self):
return self.redis_connection.hgetall(
self.user_redis_key)
@raise_exception("Cart can't be obtained due to Error: ")
def get(self):
"""
Returns all the products and their details present in the cart as a dictionary
"""
return {key: Serializer.loads(value) for key, value in self.__get_raw_cart().items()}
@raise_exception("count can't be obtained due to Error: ")
def count(self):
"""
Returns the number of types of products in the carts
"""
return self.redis_connection.hlen(self.user_redis_key)
@raise_exception("remove can't function due to Error: ")
def remove(self, product_id):
"""
Removes the product from the cart
"""
if self.user_cart_exists:
if self.redis_connection.hdel(self.user_redis_key, product_id):
self.set_ttl()
return True
else:
return False
else:
raise ErrorMessage("The user cart is Empty")
@raise_exception("Product dictionaries can't be obatined due to Error: ")
def get_product_dicts(self):
"""
Returns the list of all product details
"""
return [Serializer.loads(product_string) for product_string in self.redis_connection.hvals(self.user_redis_key)]
def __quantities(self):
return map(lambda product_dict: product_dict.get('quantity'), self.get_product_dicts())
@raise_exception("quantity can't be obtained due to Error: ")
def quantity(self):
"""
Returns the total number of units of all products in the cart
"""
return reduce(lambda result, quantity: quantity + result, self.__quantities())
@raise_exception("total_cost can't be obatined due to Error: ")
def total_cost(self):
"""
Returns the net total of all product cost from the cart
"""
return sum(self.__price_list())
@raise_exception("copy can't be made due to Error: ")
def copy(self, target_user_id):
"""
Copies the cart of the user to the target_user_id
"""
is_copied = self.redis_connection.hmset(
self.__get_user_redis_key(target_user_id), self.__get_raw_cart())
target_cart = Cart(target_user_id)
target_cart.set_ttl()
return target_cart if is_copied else None
def __product_price(self, product_dict):
"""
Returns the product of product_quantity and its unit_cost
"""
return product_dict['quantity'] * product_dict['unit_cost']
def __price_list(self):
"""
Returns the list of product's total_cost
"""
return map(lambda product_dict: self.__product_price(product_dict), self.get_product_dicts())
def __del__(self):
"""
Deletes the user's cart
"""
self.redis_connection.delete(self.user_redis_key)
| 34.927184
| 120
| 0.624739
| 7,014
| 0.974844
| 0
| 0
| 5,184
| 0.7205
| 0
| 0
| 2,660
| 0.369701
|
2278e9e4e492d486947a4dea8110d0c980581f65
| 1,172
|
py
|
Python
|
app/tests/test_transaction.py
|
geometry-labs/icon-filter-registration
|
5ac93268465a529be453a51447805a65f2e23415
|
[
"Apache-2.0"
] | null | null | null |
app/tests/test_transaction.py
|
geometry-labs/icon-filter-registration
|
5ac93268465a529be453a51447805a65f2e23415
|
[
"Apache-2.0"
] | 1
|
2021-03-02T22:41:58.000Z
|
2021-03-11T16:44:26.000Z
|
app/tests/test_transaction.py
|
geometry-labs/icon-filter-registration
|
5ac93268465a529be453a51447805a65f2e23415
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from app.main import app
from app.models import TransactionRegistration
from app.settings import settings
from httpx import AsyncClient
from tests.conftest import RequestCache
registration = TransactionRegistration(
to_address="cx0000000000000000000000000000000000000001",
from_address="cx0000000000000000000000000000000000000002",
)
@pytest.mark.asyncio
async def test_logevent_register():
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post(
settings.API_ENDPOINT_PREFIX + "/transaction/register",
data=registration.json(),
)
RequestCache.tx_id = response.json()["reg_id"]
assert response.status_code == 200
@pytest.mark.asyncio
async def test_broadcaster_unregister():
tx_id = RequestCache.tx_id
unregistration_object = registration
unregistration_object.reg_id = tx_id
async with AsyncClient(app=app, base_url="http://test") as ac:
response = await ac.post(
settings.API_ENDPOINT_PREFIX + "/transaction/unregister",
data=unregistration_object.json(),
)
assert response.status_code == 200
| 28.585366
| 69
| 0.729522
| 0
| 0
| 0
| 0
| 809
| 0.690273
| 767
| 0.654437
| 170
| 0.145051
|
22793242be75fa797dece7e56ce733139032b7be
| 33,565
|
py
|
Python
|
oslo_messaging/tests/rpc/test_server.py
|
ox12345/oslo.messaging
|
bdb21c0bcddfb2dac1e0f4d926e7df53d975bf0c
|
[
"Apache-1.1"
] | null | null | null |
oslo_messaging/tests/rpc/test_server.py
|
ox12345/oslo.messaging
|
bdb21c0bcddfb2dac1e0f4d926e7df53d975bf0c
|
[
"Apache-1.1"
] | null | null | null |
oslo_messaging/tests/rpc/test_server.py
|
ox12345/oslo.messaging
|
bdb21c0bcddfb2dac1e0f4d926e7df53d975bf0c
|
[
"Apache-1.1"
] | null | null | null |
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import warnings
import eventlet
import fixtures
from oslo_config import cfg
from six.moves import mock
import testscenarios
import oslo_messaging
from oslo_messaging import rpc
from oslo_messaging.rpc import dispatcher
from oslo_messaging.rpc import server as rpc_server_module
from oslo_messaging import server as server_module
from oslo_messaging.tests import utils as test_utils
load_tests = testscenarios.load_tests_apply_scenarios
class ServerSetupMixin(object):
class Server(object):
def __init__(self, transport, topic, server, endpoint, serializer):
self.controller = ServerSetupMixin.ServerController()
target = oslo_messaging.Target(topic=topic, server=server)
self.server = oslo_messaging.get_rpc_server(transport,
target,
[endpoint,
self.controller],
serializer=serializer)
def wait(self):
# Wait for the executor to process the stop message, indicating all
# test messages have been processed
self.controller.stopped.wait()
# Check start() does nothing with a running server
self.server.start()
self.server.stop()
self.server.wait()
def start(self):
self.server.start()
class ServerController(object):
def __init__(self):
self.stopped = threading.Event()
def stop(self, ctxt):
self.stopped.set()
class TestSerializer(object):
def serialize_entity(self, ctxt, entity):
return ('s' + entity) if entity else entity
def deserialize_entity(self, ctxt, entity):
return ('d' + entity) if entity else entity
def serialize_context(self, ctxt):
return dict([(k, 's' + v) for k, v in ctxt.items()])
def deserialize_context(self, ctxt):
return dict([(k, 'd' + v) for k, v in ctxt.items()])
def __init__(self):
self.serializer = self.TestSerializer()
def _setup_server(self, transport, endpoint, topic=None, server=None):
server = self.Server(transport,
topic=topic or 'testtopic',
server=server or 'testserver',
endpoint=endpoint,
serializer=self.serializer)
server.start()
return server
def _stop_server(self, client, server, topic=None):
if topic is not None:
client = client.prepare(topic=topic)
client.cast({}, 'stop')
server.wait()
def _setup_client(self, transport, topic='testtopic'):
return oslo_messaging.RPCClient(transport,
oslo_messaging.Target(topic=topic),
serializer=self.serializer)
class TestRPCServer(test_utils.BaseTestCase, ServerSetupMixin):
def __init__(self, *args):
super(TestRPCServer, self).__init__(*args)
ServerSetupMixin.__init__(self)
def setUp(self):
super(TestRPCServer, self).setUp(conf=cfg.ConfigOpts())
@mock.patch('warnings.warn')
def test_constructor(self, warn):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
target = oslo_messaging.Target(topic='foo', server='bar')
endpoints = [object()]
serializer = object()
access_policy = dispatcher.DefaultRPCAccessPolicy
warnings.simplefilter("always", FutureWarning)
server = oslo_messaging.get_rpc_server(transport,
target,
endpoints,
serializer=serializer,
access_policy=access_policy)
self.assertIs(server.conf, self.conf)
self.assertIs(server.transport, transport)
self.assertIsInstance(server.dispatcher, oslo_messaging.RPCDispatcher)
self.assertIs(server.dispatcher.endpoints, endpoints)
self.assertIs(server.dispatcher.serializer, serializer)
self.assertEqual('blocking', server.executor_type)
self.assertEqual([
mock.call("blocking executor is deprecated. Executor default will "
"be removed. Use explicitly threading or eventlet "
"instead in version 'pike' and will be removed in "
"version 'rocky'",
category=FutureWarning, stacklevel=3)
], warn.mock_calls)
@mock.patch('warnings.warn')
def test_constructor_without_explicit_RPCAccessPolicy(self, warn):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
target = oslo_messaging.Target(topic='foo', server='bar')
endpoints = [object()]
serializer = object()
warnings.simplefilter("always", FutureWarning)
oslo_messaging.get_rpc_server(transport, target,
endpoints, serializer=serializer)
self.assertEqual([
mock.call(mock.ANY, category=FutureWarning, stacklevel=3),
mock.call("blocking executor is deprecated. Executor default will "
"be removed. Use explicitly threading or eventlet "
"instead in version 'pike' and will be removed in "
"version 'rocky'",
category=FutureWarning, stacklevel=3)
], warn.mock_calls)
def test_server_wait_method(self):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
target = oslo_messaging.Target(topic='foo', server='bar')
endpoints = [object()]
serializer = object()
class MagicMockIgnoreArgs(mock.MagicMock):
"""MagicMock ignores arguments.
A MagicMock which can never misinterpret the arguments passed to
it during construction.
"""
def __init__(self, *args, **kwargs):
super(MagicMockIgnoreArgs, self).__init__()
server = oslo_messaging.get_rpc_server(transport, target, endpoints,
serializer=serializer)
# Mocking executor
server._executor_cls = MagicMockIgnoreArgs
server._create_listener = MagicMockIgnoreArgs()
server.dispatcher = MagicMockIgnoreArgs()
# Here assigning executor's listener object to listener variable
# before calling wait method, because in wait method we are
# setting executor to None.
server.start()
listener = server.listener
server.stop()
# call server wait method
server.wait()
self.assertEqual(1, listener.cleanup.call_count)
def test_no_target_server(self):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
server = oslo_messaging.get_rpc_server(
transport,
oslo_messaging.Target(topic='testtopic'),
[])
try:
server.start()
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex)
self.assertEqual('testtopic', ex.target.topic)
else:
self.assertTrue(False)
def test_no_server_topic(self):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
target = oslo_messaging.Target(server='testserver')
server = oslo_messaging.get_rpc_server(transport, target, [])
try:
server.start()
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex)
self.assertEqual('testserver', ex.target.server)
else:
self.assertTrue(False)
def _test_no_client_topic(self, call=True):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
client = self._setup_client(transport, topic=None)
method = client.call if call else client.cast
try:
method({}, 'ping', arg='foo')
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.InvalidTarget, ex)
self.assertIsNotNone(ex.target)
else:
self.assertTrue(False)
def test_no_client_topic_call(self):
self._test_no_client_topic(call=True)
def test_no_client_topic_cast(self):
self._test_no_client_topic(call=False)
def test_client_call_timeout(self):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
finished = False
wait = threading.Condition()
class TestEndpoint(object):
def ping(self, ctxt, arg):
with wait:
if not finished:
wait.wait()
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
try:
client.prepare(timeout=0).call({}, 'ping', arg='foo')
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.MessagingTimeout, ex)
else:
self.assertTrue(False)
with wait:
finished = True
wait.notify()
self._stop_server(client, server_thread)
def test_unknown_executor(self):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
try:
oslo_messaging.get_rpc_server(transport, None, [], executor='foo')
except Exception as ex:
self.assertIsInstance(ex, oslo_messaging.ExecutorLoadFailure)
self.assertEqual('foo', ex.executor)
else:
self.assertTrue(False)
def test_cast(self):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
class TestEndpoint(object):
def __init__(self):
self.pings = []
def ping(self, ctxt, arg):
self.pings.append(arg)
endpoint = TestEndpoint()
server_thread = self._setup_server(transport, endpoint)
client = self._setup_client(transport)
client.cast({}, 'ping', arg='foo')
client.cast({}, 'ping', arg='bar')
self._stop_server(client, server_thread)
self.assertEqual(['dsfoo', 'dsbar'], endpoint.pings)
def test_call(self):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
class TestEndpoint(object):
def ping(self, ctxt, arg):
return arg
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
self.assertIsNone(client.call({}, 'ping', arg=None))
self.assertEqual(0, client.call({}, 'ping', arg=0))
self.assertFalse(client.call({}, 'ping', arg=False))
self.assertEqual([], client.call({}, 'ping', arg=[]))
self.assertEqual({}, client.call({}, 'ping', arg={}))
self.assertEqual('dsdsfoo', client.call({}, 'ping', arg='foo'))
self._stop_server(client, server_thread)
def test_direct_call(self):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
class TestEndpoint(object):
def ping(self, ctxt, arg):
return arg
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
direct = client.prepare(server='testserver')
self.assertIsNone(direct.call({}, 'ping', arg=None))
self.assertEqual(0, client.call({}, 'ping', arg=0))
self.assertFalse(client.call({}, 'ping', arg=False))
self.assertEqual([], client.call({}, 'ping', arg=[]))
self.assertEqual({}, client.call({}, 'ping', arg={}))
self.assertEqual('dsdsfoo', direct.call({}, 'ping', arg='foo'))
self._stop_server(client, server_thread)
def test_context(self):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
class TestEndpoint(object):
def ctxt_check(self, ctxt, key):
return ctxt[key]
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
self.assertEqual('dsdsb',
client.call({'dsa': 'b'},
'ctxt_check',
key='a'))
self._stop_server(client, server_thread)
def test_failure(self):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
class TestEndpoint(object):
def ping(self, ctxt, arg):
raise ValueError(arg)
debugs = []
errors = []
def stub_debug(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
debugs.append(str(msg) % a)
def stub_error(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
errors.append(str(msg) % a)
self.useFixture(fixtures.MockPatchObject(
rpc_server_module.LOG, 'debug', stub_debug))
self.useFixture(fixtures.MockPatchObject(
rpc_server_module.LOG, 'error', stub_error))
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
try:
client.call({}, 'ping', arg='foo')
except Exception as ex:
self.assertIsInstance(ex, ValueError)
self.assertEqual('dsfoo', str(ex))
self.assertTrue(len(debugs) == 0)
self.assertGreater(len(errors), 0)
else:
self.assertTrue(False)
self._stop_server(client, server_thread)
def test_expected_failure(self):
transport = oslo_messaging.get_rpc_transport(self.conf, url='fake:')
debugs = []
errors = []
def stub_debug(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
debugs.append(str(msg) % a)
def stub_error(msg, *a, **kw):
if (a and len(a) == 1 and isinstance(a[0], dict) and a[0]):
a = a[0]
errors.append(str(msg) % a)
self.useFixture(fixtures.MockPatchObject(
rpc_server_module.LOG, 'debug', stub_debug))
self.useFixture(fixtures.MockPatchObject(
rpc_server_module.LOG, 'error', stub_error))
class TestEndpoint(object):
@oslo_messaging.expected_exceptions(ValueError)
def ping(self, ctxt, arg):
raise ValueError(arg)
server_thread = self._setup_server(transport, TestEndpoint())
client = self._setup_client(transport)
try:
client.call({}, 'ping', arg='foo')
except Exception as ex:
self.assertIsInstance(ex, ValueError)
self.assertEqual('dsfoo', str(ex))
self.assertGreater(len(debugs), 0)
self.assertTrue(len(errors) == 0)
else:
self.assertTrue(False)
self._stop_server(client, server_thread)
class TestMultipleServers(test_utils.BaseTestCase, ServerSetupMixin):
_exchanges = [
('same_exchange', dict(exchange1=None, exchange2=None)),
('diff_exchange', dict(exchange1='x1', exchange2='x2')),
]
_topics = [
('same_topic', dict(topic1='t', topic2='t')),
('diff_topic', dict(topic1='t1', topic2='t2')),
]
_server = [
('same_server', dict(server1=None, server2=None)),
('diff_server', dict(server1='s1', server2='s2')),
]
_fanout = [
('not_fanout', dict(fanout1=None, fanout2=None)),
('fanout', dict(fanout1=True, fanout2=True)),
]
_method = [
('call', dict(call1=True, call2=True)),
('cast', dict(call1=False, call2=False)),
]
_endpoints = [
('one_endpoint',
dict(multi_endpoints=False,
expect1=['ds1', 'ds2'],
expect2=['ds1', 'ds2'])),
('two_endpoints',
dict(multi_endpoints=True,
expect1=['ds1'],
expect2=['ds2'])),
]
@classmethod
def generate_scenarios(cls):
cls.scenarios = testscenarios.multiply_scenarios(cls._exchanges,
cls._topics,
cls._server,
cls._fanout,
cls._method,
cls._endpoints)
# fanout call not supported
def filter_fanout_call(scenario):
params = scenario[1]
fanout = params['fanout1'] or params['fanout2']
call = params['call1'] or params['call2']
return not (call and fanout)
# listening multiple times on same topic/server pair not supported
def filter_same_topic_and_server(scenario):
params = scenario[1]
single_topic = params['topic1'] == params['topic2']
single_server = params['server1'] == params['server2']
return not (single_topic and single_server)
# fanout to multiple servers on same topic and exchange
# each endpoint will receive both messages
def fanout_to_servers(scenario):
params = scenario[1]
fanout = params['fanout1'] or params['fanout2']
single_exchange = params['exchange1'] == params['exchange2']
single_topic = params['topic1'] == params['topic2']
multi_servers = params['server1'] != params['server2']
if fanout and single_exchange and single_topic and multi_servers:
params['expect1'] = params['expect1'][:] + params['expect1']
params['expect2'] = params['expect2'][:] + params['expect2']
return scenario
# multiple endpoints on same topic and exchange
# either endpoint can get either message
def single_topic_multi_endpoints(scenario):
params = scenario[1]
single_exchange = params['exchange1'] == params['exchange2']
single_topic = params['topic1'] == params['topic2']
if single_topic and single_exchange and params['multi_endpoints']:
params['expect_either'] = (params['expect1'] +
params['expect2'])
params['expect1'] = params['expect2'] = []
else:
params['expect_either'] = []
return scenario
for f in [filter_fanout_call, filter_same_topic_and_server]:
cls.scenarios = [i for i in cls.scenarios if f(i)]
for m in [fanout_to_servers, single_topic_multi_endpoints]:
cls.scenarios = [m(i) for i in cls.scenarios]
def __init__(self, *args):
super(TestMultipleServers, self).__init__(*args)
ServerSetupMixin.__init__(self)
def setUp(self):
super(TestMultipleServers, self).setUp(conf=cfg.ConfigOpts())
def test_multiple_servers(self):
url1 = 'fake:///' + (self.exchange1 or '')
url2 = 'fake:///' + (self.exchange2 or '')
transport1 = oslo_messaging.get_rpc_transport(self.conf, url=url1)
if url1 != url2:
transport2 = oslo_messaging.get_rpc_transport(self.conf, url=url1)
else:
transport2 = transport1
class TestEndpoint(object):
def __init__(self):
self.pings = []
def ping(self, ctxt, arg):
self.pings.append(arg)
def alive(self, ctxt):
return 'alive'
if self.multi_endpoints:
endpoint1, endpoint2 = TestEndpoint(), TestEndpoint()
else:
endpoint1 = endpoint2 = TestEndpoint()
server1 = self._setup_server(transport1, endpoint1,
topic=self.topic1, server=self.server1)
server2 = self._setup_server(transport2, endpoint2,
topic=self.topic2, server=self.server2)
client1 = self._setup_client(transport1, topic=self.topic1)
client2 = self._setup_client(transport2, topic=self.topic2)
client1 = client1.prepare(server=self.server1)
client2 = client2.prepare(server=self.server2)
if self.fanout1:
client1.call({}, 'alive')
client1 = client1.prepare(fanout=True)
if self.fanout2:
client2.call({}, 'alive')
client2 = client2.prepare(fanout=True)
(client1.call if self.call1 else client1.cast)({}, 'ping', arg='1')
(client2.call if self.call2 else client2.cast)({}, 'ping', arg='2')
self._stop_server(client1.prepare(fanout=None),
server1, topic=self.topic1)
self._stop_server(client2.prepare(fanout=None),
server2, topic=self.topic2)
def check(pings, expect):
self.assertEqual(len(expect), len(pings))
for a in expect:
self.assertIn(a, pings)
if self.expect_either:
check(endpoint1.pings + endpoint2.pings, self.expect_either)
else:
check(endpoint1.pings, self.expect1)
check(endpoint2.pings, self.expect2)
TestMultipleServers.generate_scenarios()
class TestServerLocking(test_utils.BaseTestCase):
def setUp(self):
super(TestServerLocking, self).setUp(conf=cfg.ConfigOpts())
def _logmethod(name):
def method(self, *args, **kwargs):
with self._lock:
self._calls.append(name)
return method
executors = []
class FakeExecutor(object):
def __init__(self, *args, **kwargs):
self._lock = threading.Lock()
self._calls = []
executors.append(self)
submit = _logmethod('submit')
shutdown = _logmethod('shutdown')
self.executors = executors
class MessageHandlingServerImpl(oslo_messaging.MessageHandlingServer):
def _create_listener(self):
return mock.Mock()
def _process_incoming(self, incoming):
pass
self.server = MessageHandlingServerImpl(mock.Mock(), mock.Mock())
self.server._executor_cls = FakeExecutor
def test_start_stop_wait(self):
# Test a simple execution of start, stop, wait in order
eventlet.spawn(self.server.start)
self.server.stop()
self.server.wait()
self.assertEqual(1, len(self.executors))
self.assertEqual(['shutdown'], self.executors[0]._calls)
self.assertTrue(self.server.listener.cleanup.called)
def test_reversed_order(self):
# Test that if we call wait, stop, start, these will be correctly
# reordered
eventlet.spawn(self.server.wait)
# This is non-deterministic, but there's not a great deal we can do
# about that
eventlet.sleep(0)
eventlet.spawn(self.server.stop)
eventlet.sleep(0)
eventlet.spawn(self.server.start)
self.server.wait()
self.assertEqual(1, len(self.executors))
self.assertEqual(['shutdown'], self.executors[0]._calls)
def test_wait_for_running_task(self):
# Test that if 2 threads call a method simultaneously, both will wait,
# but only 1 will call the underlying executor method.
start_event = threading.Event()
finish_event = threading.Event()
running_event = threading.Event()
done_event = threading.Event()
_runner = [None]
class SteppingFakeExecutor(self.server._executor_cls):
def __init__(self, *args, **kwargs):
# Tell the test which thread won the race
_runner[0] = eventlet.getcurrent()
running_event.set()
start_event.wait()
super(SteppingFakeExecutor, self).__init__(*args, **kwargs)
done_event.set()
finish_event.wait()
self.server._executor_cls = SteppingFakeExecutor
start1 = eventlet.spawn(self.server.start)
start2 = eventlet.spawn(self.server.start)
# Wait until one of the threads starts running
running_event.wait()
runner = _runner[0]
waiter = start2 if runner == start1 else start2
waiter_finished = threading.Event()
waiter.link(lambda _: waiter_finished.set())
# At this point, runner is running start(), and waiter() is waiting for
# it to complete. runner has not yet logged anything.
self.assertEqual(0, len(self.executors))
self.assertFalse(waiter_finished.is_set())
# Let the runner log the call
start_event.set()
done_event.wait()
# We haven't signalled completion yet, so submit shouldn't have run
self.assertEqual(1, len(self.executors))
self.assertEqual([], self.executors[0]._calls)
self.assertFalse(waiter_finished.is_set())
# Let the runner complete
finish_event.set()
waiter.wait()
runner.wait()
# Check that both threads have finished, start was only called once,
# and execute ran
self.assertTrue(waiter_finished.is_set())
self.assertEqual(1, len(self.executors))
self.assertEqual([], self.executors[0]._calls)
def test_start_stop_wait_stop_wait(self):
# Test that we behave correctly when calling stop/wait more than once.
# Subsequent calls should be noops.
self.server.start()
self.server.stop()
self.server.wait()
self.server.stop()
self.server.wait()
self.assertEqual(len(self.executors), 1)
self.assertEqual(['shutdown'], self.executors[0]._calls)
self.assertTrue(self.server.listener.cleanup.called)
def test_state_wrapping(self):
# Test that we behave correctly if a thread waits, and the server state
# has wrapped when it it next scheduled
# Ensure that if 2 threads wait for the completion of 'start', the
# first will wait until complete_event is signalled, but the second
# will continue
complete_event = threading.Event()
complete_waiting_callback = threading.Event()
start_state = self.server._states['start']
old_wait_for_completion = start_state.wait_for_completion
waited = [False]
def new_wait_for_completion(*args, **kwargs):
if not waited[0]:
waited[0] = True
complete_waiting_callback.set()
complete_event.wait()
old_wait_for_completion(*args, **kwargs)
start_state.wait_for_completion = new_wait_for_completion
# thread1 will wait for start to complete until we signal it
thread1 = eventlet.spawn(self.server.stop)
thread1_finished = threading.Event()
thread1.link(lambda _: thread1_finished.set())
self.server.start()
complete_waiting_callback.wait()
# The server should have started, but stop should not have been called
self.assertEqual(1, len(self.executors))
self.assertEqual([], self.executors[0]._calls)
self.assertFalse(thread1_finished.is_set())
self.server.stop()
self.server.wait()
# We should have gone through all the states, and thread1 should still
# be waiting
self.assertEqual(1, len(self.executors))
self.assertEqual(['shutdown'], self.executors[0]._calls)
self.assertFalse(thread1_finished.is_set())
# Start again
self.server.start()
# We should now record 4 executors (2 for each server)
self.assertEqual(2, len(self.executors))
self.assertEqual(['shutdown'], self.executors[0]._calls)
self.assertEqual([], self.executors[1]._calls)
self.assertFalse(thread1_finished.is_set())
# Allow thread1 to complete
complete_event.set()
thread1_finished.wait()
# thread1 should now have finished, and stop should not have been
# called again on either the first or second executor
self.assertEqual(2, len(self.executors))
self.assertEqual(['shutdown'], self.executors[0]._calls)
self.assertEqual([], self.executors[1]._calls)
self.assertTrue(thread1_finished.is_set())
@mock.patch.object(server_module, 'DEFAULT_LOG_AFTER', 1)
@mock.patch.object(server_module, 'LOG')
def test_logging(self, mock_log):
# Test that we generate a log message if we wait longer than
# DEFAULT_LOG_AFTER
log_event = threading.Event()
mock_log.warning.side_effect = lambda _, __: log_event.set()
# Call stop without calling start. We should log a wait after 1 second
thread = eventlet.spawn(self.server.stop)
log_event.wait()
# Redundant given that we already waited, but it's nice to assert
self.assertTrue(mock_log.warning.called)
thread.kill()
@mock.patch.object(server_module, 'LOG')
def test_logging_explicit_wait(self, mock_log):
# Test that we generate a log message if we wait longer than
# the number of seconds passed to log_after
log_event = threading.Event()
mock_log.warning.side_effect = lambda _, __: log_event.set()
# Call stop without calling start. We should log a wait after 1 second
thread = eventlet.spawn(self.server.stop, log_after=1)
log_event.wait()
# Redundant given that we already waited, but it's nice to assert
self.assertTrue(mock_log.warning.called)
thread.kill()
@mock.patch.object(server_module, 'LOG')
def test_logging_with_timeout(self, mock_log):
# Test that we log a message after log_after seconds if we've also
# specified an absolute timeout
log_event = threading.Event()
mock_log.warning.side_effect = lambda _, __: log_event.set()
# Call stop without calling start. We should log a wait after 1 second
thread = eventlet.spawn(self.server.stop, log_after=1, timeout=2)
log_event.wait()
# Redundant given that we already waited, but it's nice to assert
self.assertTrue(mock_log.warning.called)
thread.kill()
def test_timeout_wait(self):
# Test that we will eventually timeout when passing the timeout option
# if a preceding condition is not satisfied.
self.assertRaises(server_module.TaskTimeout,
self.server.stop, timeout=1)
def test_timeout_running(self):
# Test that we will eventually timeout if we're waiting for another
# thread to complete this task
# Start the server, which will also instantiate an executor
self.server.start()
self.server.stop()
shutdown_called = threading.Event()
# Patch the executor's stop method to be very slow
def slow_shutdown(wait):
shutdown_called.set()
eventlet.sleep(10)
self.executors[0].shutdown = slow_shutdown
# Call wait in a new thread
thread = eventlet.spawn(self.server.wait)
# Wait until the thread is in the slow stop method
shutdown_called.wait()
# Call wait again in the main thread with a timeout
self.assertRaises(server_module.TaskTimeout,
self.server.wait, timeout=1)
thread.kill()
@mock.patch.object(server_module, 'LOG')
def test_log_after_zero(self, mock_log):
# Test that we do not log a message after DEFAULT_LOG_AFTER if the
# caller gave log_after=1
# Call stop without calling start.
self.assertRaises(server_module.TaskTimeout,
self.server.stop, log_after=0, timeout=2)
# We timed out. Ensure we didn't log anything.
self.assertFalse(mock_log.warning.called)
class TestRPCExposeDecorator(test_utils.BaseTestCase):
def foo(self):
pass
@rpc.expose
def bar(self):
"""bar docstring"""
pass
def test_undecorated(self):
self.assertRaises(AttributeError, lambda: self.foo.exposed)
def test_decorated(self):
self.assertEqual(True, self.bar.exposed)
self.assertEqual("""bar docstring""", self.bar.__doc__)
self.assertEqual('bar', self.bar.__name__)
| 36.404555
| 79
| 0.601847
| 32,444
| 0.966602
| 0
| 0
| 7,710
| 0.229704
| 0
| 0
| 5,870
| 0.174885
|
227a7fa4744296865be9c842b020f4d289542d47
| 3,683
|
py
|
Python
|
3.03-pdDataOps.py
|
pgiardiniere/notes-PythonDataScienceHandbook
|
ddb6662d2fbeedd5b6b09ce4d8ddee55813ec589
|
[
"MIT"
] | 2
|
2019-05-01T02:23:02.000Z
|
2019-05-04T03:26:39.000Z
|
3.03-pdDataOps.py
|
pgiardiniere/notes-PythonDataScienceHandbook
|
ddb6662d2fbeedd5b6b09ce4d8ddee55813ec589
|
[
"MIT"
] | null | null | null |
3.03-pdDataOps.py
|
pgiardiniere/notes-PythonDataScienceHandbook
|
ddb6662d2fbeedd5b6b09ce4d8ddee55813ec589
|
[
"MIT"
] | null | null | null |
# Recall material on NP Universal Functions from Ch2
# PD builds on ufuncs functionality a few ways:
# first, for unary operations (negation / trig funcs), ufuncs preserve
# index and column labels in the output
# second, for binary operations (addition / multiplication) PD aligns
# indices when passing objects to the ufunc
# the automatic handling of these makes error-prone NP ufuncs, PD-bulletproof
# additionally, there are operations when crossing Series/DataFrame structs
##############################
### Ufuncs: Index Preservation
# As PD designed to work with NP, NP Ufuncs work on PD Series/DataFrame
import pandas as pd
import numpy as np
rng = np.random.RandomState(42)
ser = pd.Series(rnd.randint(0, 10, 4))
ser
df = pd.DataFrame(rng.randint(0, 10, (3, 4)), columns=['A', 'B', 'C', 'D'])
df
# applying a NP ufunc on either of these objects,
# result with be another PD object with the indeces preserved:
np.exp(ser)
np.sin(df * np.pi / 4)
##############################
### UFuncs: Index Alignment
## Index Alignment in Series
# suppose we are combining two differnce data sources, want top 3 us states
# by area, and top 3 by population
area = pd.Series({'Alaska': 1723337, 'Texas': 695662,
'California': 423967}, name='area')
population = pd.Series({'California': 38332521, 'Texas': 26448193,
'New York': 19651127}, name='population')
# now, divide to compute population density
population / area
# we see the resulting array contains the Union of indeces of two input arrs
# we can verify that using standard Python set arithmetic on the indices
area.index | population.index
# any item for which one or the other doesn't have an entry is marked "NaN"
A = pd.Series([2, 4, 6], index=[0, 1, 2])
B = pd.Series([1, 3, 5], index=[1, 2, 3])
A + B
# if NaN vals isn't desired, fill val can be modified using object methods
# in place of the operators (with attribute "fill_value" used)
A.add(B, fill_value=0)
## Index Alignment in DataFrame
# similar alignment on both columns AND indices when using DataFrames:
A = pd.DataFrame(rng.randint(0, 20, (2, 2)), columns=list('AB'))
A
B = pd.DataFrame(rng.randint(0, 10, (3, 3)), columns=list('BAC'))
B
A + B
# note that indices are aligned correctly irrespective of order in objects,
# and indices in the result are sorted
# as before, can use object method with "fill_value" attribute to replace NaN
# here, we fill with the mean of all values stored in "A" instead of 0
fill = A.stack().mean()
A.add(B, fill_value=fill)
# Table: Python operators and equivalent PD Object methods:
# + add()
# - sub(), subtract()
# * mul(), multiply()
# / truediv(), div(), divide()
# // floordiv()
# % mod()
# ** pow()
##############################
### Ufuncs: Operations Between DataFrame and Series
# index & col alignment is similar when crossing DF and Series
# Remember: as DF is to Series in Pandas
# 1D arr is to 2d Arr in NumPy
# Find difference between a two-dimensional array and one of its rows:
A = rng.randint(10, size=(3, 4))
A
A - A[0]
# Per NP broadcasting rules, subtraction b/w 2D arr and row is done row-wise
# In Pandas, convention similarly operates row-wise by default:
df = pd.DataFrame(A, columns=list('QRST'))
df - df.iloc[0]
# to operate column-wise, use object methods and specify "axis" keywork
df.subtract(df['R'], axis=0)
# as before, indices are automatically aligned between 2 elements:
halfrow = df.iloc[0, ::2]
halfrow
df - halfrow
# as mentioned, automatic preservation + alignment of indices/cols means
# operations on data in Pandas will maintain data context
# more seamlessly than NP arrs
| 32.59292
| 77
| 0.688298
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,705
| 0.734456
|
227b1e1bc0c209d9e1b5a1176eb8edcc2f765f16
| 1,286
|
py
|
Python
|
cogs/feelings.py
|
Surice/dc_sophie
|
fa42f457b7b9d68a156a4b6db41e3d849238384c
|
[
"MIT"
] | null | null | null |
cogs/feelings.py
|
Surice/dc_sophie
|
fa42f457b7b9d68a156a4b6db41e3d849238384c
|
[
"MIT"
] | null | null | null |
cogs/feelings.py
|
Surice/dc_sophie
|
fa42f457b7b9d68a156a4b6db41e3d849238384c
|
[
"MIT"
] | null | null | null |
from itertools import chain
from components.config import getConfig
from components.convert import fetchUser, pretRes
import discord
from discord import channel
from discord.ext import commands
class Feelings(commands.Cog):
def __init__(self, client) -> None:
self.client = client
self.config = getConfig()
@commands.command()
async def love(self, msg: commands.Context, user: discord.User = None):
user = await fetchUser(self.client, user)
await msg.channel.send(f"> {self.client.user.mention} hat {user.mention} ganz dolle lieb ❤️");
@commands.command()
async def arsch(self, msg: commands.Context, user: discord.User = None):
user = await fetchUser(self.client, user)
await msg.channel.send(f"> {user.display_name} ist ein Arsch! <:nani:663857832256471084>");
@commands.command()
async def unimpressed(self, msg: commands.Context, user: discord.User = None):
user = await fetchUser(self.client, user)
userPropertie = ""
if(user != None):
userPropertie = f"von {user.mention} "
await msg.channel.send(f"> {self.client.user.mention} ist {userPropertie}nicht beeindruckt... ");
def setup(client: commands.Bot) -> None:
client.add_cog(Feelings(client))
| 33.842105
| 105
| 0.681182
| 1,014
| 0.786047
| 0
| 0
| 863
| 0.668992
| 791
| 0.613178
| 235
| 0.182171
|
227bae7ab6f777a68303a1c49a615d5f64a02cfd
| 2,400
|
py
|
Python
|
Solutions/arrays/Median_of_Two_Sorted_Arrays.py
|
HuitingZhengAvery/Leetcode-solutions
|
ac21cef395717abab188e76895ad83cf212fd60f
|
[
"MIT"
] | 1
|
2019-06-21T16:28:59.000Z
|
2019-06-21T16:28:59.000Z
|
Solutions/arrays/Median_of_Two_Sorted_Arrays.py
|
HuitingZhengAvery/Leetcode-solutions
|
ac21cef395717abab188e76895ad83cf212fd60f
|
[
"MIT"
] | null | null | null |
Solutions/arrays/Median_of_Two_Sorted_Arrays.py
|
HuitingZhengAvery/Leetcode-solutions
|
ac21cef395717abab188e76895ad83cf212fd60f
|
[
"MIT"
] | null | null | null |
'''
There are two sorted arrays nums1 and nums2 of size m and n respectively.
Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).
You may assume nums1 and nums2 cannot be both empty.
'''
### Nature: the meaning of MEDIAN, is that, the number of elements less than it,
### is equal to that is more than it.
### len(left) == len(right)
### It is NOT important that if these two parts are sorted.
## Time: O(log(min(m, n))), Space: O(1) --> we need fixed number of variables
# Iterative approach
# Central logics: there exists i, j where i+j = (m+n+1) // 2 AND
# A[i-1] (leftmax of A) < B[j] (rightmin of B) AND B[j-1] < A[i]
# (in general, all left <= all right)
def findMedianSortedArrays(nums1, nums2):
m, n = len(nums1), len(nums2)
# To ensure j will not be negative
if m > n:
m, n = n, m
nums1, nums2 = nums2, nums1
# (m+n+1) plus 1 makes sure i & j are the minimums of the right part, AND
# that j-1 (which is left max) will not be negative
imin, imax, half = 0, m, (m+n+1) / 2
while imin <= imax: # This will directly handle edge cases like len(A) == 0 etc
i = (imin+imax) / 2
j = half - i
# case one: i hasn't exceeded array 1 and is too small
if i < m and nums2[j-1] > nums1[i]:
imin = i+1
# case two: i-1 hasn't exceeded the smallest and i is too big
elif i > 0 and nums1[i-1] > nums2[j]:
imax = i-1
# case three: i is perfect
else:
# edge case 1:
# all nums in nums1 is bigger than nums2
if i == 0:
max_of_left = nums2[j-1] # j-1 >= 0 is ensured
# edge case 2:
# the opposite, AND m==n or m=n-1
elif j == 0:
max_of_left = nums1[m-1]
# general case:
else:
max_of_left = max(nums1[i-1], nums2[j-1])
if (m+n) % 2 == 1:
return max_of_left
# edge case: when A[i] would be out of index bound
if i == m:
min_of_right = nums2[j]
# edge case: when B[j] would be out of index bound
elif j == n:
min_of_right = nums1[i]
else:
min_of_right = min(nums1[i], nums2[j])
return (max_of_left + min_of_right) / 2.0
| 36.923077
| 97
| 0.542083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,295
| 0.539583
|
227c213f9c9f02d257d21830222edf425fe68721
| 781
|
py
|
Python
|
carl/envs/mario/mario_game.py
|
automl/genRL
|
b7382fec9006d7da768ad7252194c6c5f1b2bbd7
|
[
"Apache-2.0"
] | 27
|
2021-09-13T21:50:10.000Z
|
2022-03-30T15:35:38.000Z
|
carl/envs/mario/mario_game.py
|
automl/genRL
|
b7382fec9006d7da768ad7252194c6c5f1b2bbd7
|
[
"Apache-2.0"
] | 35
|
2021-09-15T07:20:29.000Z
|
2022-03-02T15:14:31.000Z
|
carl/envs/mario/mario_game.py
|
automl/genRL
|
b7382fec9006d7da768ad7252194c6c5f1b2bbd7
|
[
"Apache-2.0"
] | 2
|
2022-01-13T11:13:12.000Z
|
2022-03-14T06:11:13.000Z
|
from abc import ABC, abstractmethod
class MarioGame(ABC):
@abstractmethod
def getPort(self) -> int:
pass
@abstractmethod
def initGame(self):
pass
@abstractmethod
def stepGame(self, left: bool, right: bool, down: bool, speed: bool, jump: bool):
pass
@abstractmethod
def resetGame(self, level: str, timer: int, mario_state: int, inertia: float):
pass
@abstractmethod
def computeObservationRGB(self):
pass
@abstractmethod
def computeReward(self) -> float:
pass
@abstractmethod
def computeDone(self) -> bool:
pass
@abstractmethod
def getCompletionPercentage(self) -> float:
pass
@abstractmethod
def getFrameSize(self) -> int:
pass
| 19.525
| 85
| 0.62484
| 742
| 0.950064
| 0
| 0
| 668
| 0.855314
| 0
| 0
| 0
| 0
|
227c29400ceb467de41b94027e6c73ad4c909b28
| 16,832
|
py
|
Python
|
sec5.2/train.py
|
Z-T-WANG/ConvergentDQN
|
1b7f1857e33bc0a41b16ed6fe3251cb78220c691
|
[
"MIT"
] | 1
|
2021-08-20T11:38:58.000Z
|
2021-08-20T11:38:58.000Z
|
sec5.2/train.py
|
Z-T-WANG/ConvergentDQN
|
1b7f1857e33bc0a41b16ed6fe3251cb78220c691
|
[
"MIT"
] | null | null | null |
sec5.2/train.py
|
Z-T-WANG/ConvergentDQN
|
1b7f1857e33bc0a41b16ed6fe3251cb78220c691
|
[
"MIT"
] | null | null | null |
import torch
import torch.optim as optim
import torch.nn.functional as F
import optimizers
import time, os, random
import numpy as np
import math, copy
from collections import deque
from common.utils import epsilon_scheduler, beta_scheduler, update_target, print_log, load_model, print_args
from model import DQN
from common.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
#from matplotlib import pyplot
def train(env, args):
image_shape = env.observation_space.shape[1:]
current_model = DQN(env, args).to(args.device)
target_model = DQN(env, args).to(args.device)
print(' Total params: %.2fM' % (sum(p.numel() for p in current_model.parameters())/1000000.0))
for para in target_model.parameters(): para.requires_grad = False
update_target(current_model, target_model)
epsilon_by_frame = epsilon_scheduler(args.eps_start, args.eps_mid, args.eps_final)
beta_by_frame = beta_scheduler(args.beta_start, args.beta_frames)
if args.prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(args.buffer_size, args.alpha, args.IS_weight_only_smaller, allowed_avg_min_ratio = args.ratio_min_prio)
else:
replay_buffer = ReplayBuffer(args.buffer_size)
#args.action_space = env.unwrapped.get_action_meanings()
args.init_lives = env.unwrapped.ale.lives()
print_args(args)
args.do_update_target = False
# specify the RL algorithm to use
if args.algorithm != "DQN" and args.algorithm != "Residual" and args.algorithm != "CDQN" :
currentTask = "DQN"
args.currentTask = currentTask
else:
currentTask = args.algorithm
args.currentTask = args.algorithm
# prepare the optimizer
lr = args.lr
beta1 = args.beta1
beta2 = args.beta2
parameters = current_model.parameters
args.optim = args.optim.lower()
if args.optim=='sgd':
optimizer = optim.SGD(parameters(), lr=lr, momentum=beta1)
elif args.optim=='adam':
optimizer = optimizers.AdamW(parameters(), lr=lr, eps = args.adam_eps, betas=(beta1,beta2), amsgrad=False)
elif args.optim.startswith("adamb"):
args.optim = "adambelief"
optimizer = optimizers.AdamBelief(parameters(), lr=lr, eps = args.adam_eps, betas=(beta1,beta2), amsgrad=False)
elif args.optim=='laprop':
optimizer = optimizers.LaProp(parameters(), lr=lr, eps = args.adam_eps, betas=(beta1,beta2), amsgrad=False)
else:
assert False, "The specified optimizer name {} is non-existent".format(args.optim)
print(currentTask)
reward_list, length_list, loss_list, off_policy_rate_list, gen_loss_list = [], [], [], [], []
clip_reward = True ###
state = env.reset()
# the number of parallelized computation is maximally "arg.train_freq" to guarantee that the computation order is still consistent with the original method
num_task = args.train_freq
args.num_task = num_task
episode_rewards = [0. for _i in range(num_task)]
life_lengths = [0 for _i in range(num_task)]
envs = [env]
for _i in range(num_task-1): envs.append(copy.deepcopy(env))
for env in envs: env.seed(random.randrange(1000000))
states = [state for _i in range(num_task)]
rewards = [0 for _i in range(num_task)]
evaluation_interval = args.evaluation_interval
data_to_store = []
prev_time = time.time()
prev_step = 0
step_idx = 1 # initialization of step_idx
image_shape = env.observation_space.shape[1:]
if args.save_best:
recent_performances = deque([], maxlen = 40)
recent_models = deque([], maxlen = 20)
best_performance = -float("inf")
while step_idx <= args.max_steps:
# decide the action
epsilons = ( epsilon_by_frame(idx) for idx in range(step_idx, step_idx + num_task) ) if step_idx>args.learning_start else (1. for idx in range(num_task))
tensored_states = torch.from_numpy(np.array([state._frames for state in states]).reshape((num_task, -1) + image_shape)).to(args.device).float().div_(255.)
actions, evaluateds, (Qss, bestActions) = current_model.act(tensored_states, epsilons)
for _i, (env, state, action, Qs, bestAction, reward) in enumerate(zip(envs, states, actions, Qss, bestActions, rewards)):
# the environment proceeds by one step (4 frames)
next_state, reward, done, info = env.step(action)
if clip_reward:
raw_reward, reward = reward
else:
raw_reward = reward
rewards[_i] = float(reward)
episode_rewards[_i] += raw_reward
life_lengths[_i] += 1
# store the transition into the memory replay
if not args.randomly_discard_experience or (args.randomly_discard_experience and random.random()>=0.5): # the data may be randomly discarded
data_to_store.append((state, action, reward, next_state, float(done)))
if data_to_store:
for data in data_to_store:
replay_buffer.add(*data)
if args.randomly_replace_memory and len(replay_buffer) >= args.buffer_size:
# probably randomly choose an index to replace
replay_buffer._next_idx = random.randrange(args.buffer_size)
data_to_store.clear()
# record the performance of a trajectory
if done:
length_list.append(life_lengths[_i])
life_lengths[_i] = 0
# only the reward of a real full episode is recorded
if env.unwrapped.ale.game_over() or "TimeLimit.truncated" in info:
reward_list.append(episode_rewards[_i])
if not args.silent:
if not os.path.isdir(args.env): os.mkdir(args.env)
with open(os.path.join(args.env, '{}_{}.txt'.format(currentTask, args.comment)), 'a') as f:
f.write('{:.0f}\t{}\n'.format(step_idx*4, episode_rewards[_i]))
if args.save_best and step_idx > args.learning_start:
recent_performances.append(episode_rewards[_i])
mean_performance = np.mean(recent_performances)
if best_performance < mean_performance and len(recent_performances)>=40:
assert len(recent_models)==20
best_performance = mean_performance
torch.save((recent_models[0], step_idx), os.path.join(args.env, '{}_{}.pth'.format(currentTask, args.comment)))
recent_models.append(current_model.state_dict().copy())
episode_rewards[_i] = 0.
states[_i] = env.reset()
else:
states[_i] = next_state
# optimize
if step_idx % args.train_freq == 0 and step_idx > max(args.learning_start, 2*args.batch_size):
beta = beta_by_frame(step_idx)
loss, off_policy_rate = compute_td_loss(current_model, target_model, replay_buffer, optimizer, args, beta)
loss_list.append(loss); off_policy_rate_list.append(off_policy_rate)
# update the target network
if step_idx % args.update_target == 0 and currentTask != "Residual":
# we defer the update of the target network to the optimization routine to ensure that the target network is not exactly equal to current network
args.do_update_target = True
#update_target(current_model, target_model)
# print the statistics
if step_idx % evaluation_interval == 0:
# it works only if there is at least one episode to report; otherwise "evaluation_interval" is increased
if len(reward_list) > 0:
kwargs = {}
kwargs["Off-Policy"] = off_policy_rate_list
print_log(step_idx, prev_step, prev_time, reward_list, length_list, loss_list, args, '{}{:.0e}{}'.format(currentTask, args.lr, args.comment), **kwargs)
reward_list.clear(); length_list.clear(); loss_list.clear()
for v in kwargs.values():
if type(v)==list: v.clear()
prev_step = step_idx
prev_time = time.time()
else:
evaluation_interval += args.evaluation_interval
step_idx += 1
i_count=0
accu1, accu2 = 0., 0.
accu_loss = 0.
def compute_td_loss(current_model, target_model, replay_buffer, optimizer, args, beta=None):
"""
Calculate loss and optimize
"""
global i_count, accu1, accu2, accu_loss
# sample data
if args.prioritized_replay:
state_next_state, action_, reward_, done, weights_, true_weights, indices = replay_buffer.sample(args.batch_size, beta)
weights = torch.from_numpy(weights_).to(args.device, non_blocking=True)
else:
state_next_state, action_, reward_, done, indices = replay_buffer.sample(args.batch_size)
weights = torch.ones(args.batch_size); weights_ = weights.numpy(); true_weights = weights_
weights = weights.to(args.device, non_blocking=True)
# we move data to GPU in chunks
state_next_state = torch.from_numpy(state_next_state).to(args.device, non_blocking=True).float().div_(255)
state, next_state = state_next_state
action = torch.from_numpy(action_).to(args.device, non_blocking=True)
gamma_mul_one_minus_done_ = (args.gamma * (1. - done)).astype(np.float32)
if args.currentTask == "DQN":
# in some cases these data do not really need to be copied to GPU
reward, gamma_mul_one_minus_done = torch.from_numpy(np.stack((reward_, gamma_mul_one_minus_done_))).to(args.device, non_blocking=True)
##### start training #####
optimizer.zero_grad()
# we use "values" to refer to Q values for all state-actions, and use "value" to refer to Q values for states
if args.currentTask == "DQN":
if args.double:
with torch.no_grad():
next_q_values = current_model(next_state)
next_q_action = next_q_values.max(1)[1].unsqueeze(1) # **unsqueeze
target_next_q_values = target_model(next_state)
next_q_value = target_next_q_values.gather(1, next_q_action).squeeze()
next_q_action = next_q_action.squeeze()
else:
with torch.no_grad():
next_q_value, next_q_action = target_model(next_state).max(1)
expected_q_value = torch.addcmul(reward, tensor1=next_q_value, tensor2=gamma_mul_one_minus_done)
q_values = current_model(state)
q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)
loss = F.mse_loss(q_value, expected_q_value, reduction='none')
if args.prioritized_replay:
diff = (q_value.detach() - expected_q_value).cpu().numpy()
prios = np.abs(diff) + args.prio_eps #
loss = (loss * weights).mean()/2.
loss.backward()
# we report the mean squared error instead of the Huber loss as the loss
with torch.no_grad():
report_loss = (F.mse_loss(q_value, expected_q_value, reduction='none')*weights).mean().item()
if args.currentTask == "CDQN":
# compute the current and next state values in a single pass
size = list(state_next_state.size())
current_and_next_states = state_next_state.view([-1]+size[2:])
# compute the q values and the gradient
all_q_values = current_model(current_and_next_states)
with torch.no_grad():
q_values, next_q_values = all_q_values[:args.batch_size], all_q_values[args.batch_size:2*args.batch_size]
q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)
next_q_value, next_q_action = next_q_values.max(1)
q_value, next_q_value = torch.stack((q_value, next_q_value)).cpu().numpy()
next_q_values_target = target_model(next_state)
if args.double:
next_q_value_target = next_q_values_target.gather(1, next_q_action.unsqueeze(1)).squeeze().cpu().numpy()
else:
next_q_value_target = np.max(next_q_values_target.cpu().numpy(), axis=1)
expected_q_value_self = reward_ + gamma_mul_one_minus_done_ * next_q_value
expected_q_value_target = reward_ + gamma_mul_one_minus_done_ * next_q_value_target
target_mask = (np.abs(q_value - expected_q_value_target) >= np.abs(q_value - expected_q_value_self))
expected_q_value = np.where(target_mask, expected_q_value_target, expected_q_value_self)
target_mask = target_mask.astype(np.float32)
diff = q_value - expected_q_value
if args.prioritized_replay:
prio_diff = diff
prios = np.abs(prio_diff) + args.prio_eps
# the Huber loss is used
weighted_diff = weights_ * diff
q_value_grad = 1./args.batch_size *weighted_diff
all_grads = torch.zeros_like(all_q_values)
# manually backpropagate the gradient through the term "expected_q_value"
next_q_value_grad = - (1.-target_mask) * q_value_grad
next_q_value_grad = next_q_value_grad * gamma_mul_one_minus_done_
grads = torch.from_numpy(np.concatenate([q_value_grad, next_q_value_grad], axis=0)).unsqueeze(1).to(args.device)
all_grads.scatter_(1, torch.cat([action, next_q_action], dim=0).unsqueeze(1), grads)
all_q_values.backward(all_grads) # this method makes it run faster
report_loss = np.dot(diff, weights_ * diff)/args.batch_size
if args.currentTask == "Residual":
# compute the current and next state values in a single pass
size = list(state_next_state.size())
current_and_next_states = state_next_state.view([-1]+size[2:])
# compute the q values and the gradient
all_q_values = current_model(current_and_next_states)
with torch.no_grad():
q_values, next_q_values = all_q_values[:args.batch_size], all_q_values[args.batch_size:2*args.batch_size]
q_value = q_values.gather(1, action.unsqueeze(1)).squeeze(1)
next_q_value, next_q_action = next_q_values.max(1)
q_value, next_q_value = torch.stack((q_value, next_q_value)).cpu().numpy()
expected_q_value = reward_ + gamma_mul_one_minus_done_ * next_q_value
# then compute the q values and the loss
diff = q_value - expected_q_value
if args.prioritized_replay:
prio_diff = diff
prios = np.abs(prio_diff) + args.prio_eps
# the Huber loss is used
weighted_diff = weights_ * diff
q_value_grad = 1./args.batch_size *weighted_diff
all_grads = torch.zeros_like(all_q_values)
# manually backpropagate the gradient through the term "expected_q_value"
next_q_value_grad = - q_value_grad
next_q_value_grad = next_q_value_grad * gamma_mul_one_minus_done_
grads = torch.from_numpy(np.concatenate([q_value_grad, next_q_value_grad], axis=0)).unsqueeze(1).to(args.device)
all_grads.scatter_(1, torch.cat([action, next_q_action], dim=0).unsqueeze(1), grads)
all_q_values.backward(all_grads) # this method makes it run faster
report_loss = np.dot(diff, weights_ * diff)/args.batch_size
if args.prioritized_replay:
replay_buffer.update_priorities(indices, prios)
# gradient clipping
if args.grad_clip > 0.:
grad_norm = torch.nn.utils.clip_grad_norm_(current_model.parameters(), max_norm = args.grad_clip)
accu1 += grad_norm
accu2 += grad_norm**2
if args.do_update_target: update_target(current_model, target_model); args.do_update_target=False
optimizer.step()
off_policy_rate = np.mean((np.argmax(q_values.detach().cpu().numpy(), axis=1)!=action_).astype(np.float)*true_weights)
i_count += 1
accu_loss += report_loss
report_period = math.ceil(args.evaluation_interval/args.train_freq)
if i_count % report_period == 0 and accu1 != 0.:
print("gradient norm {:.3f} +- {:.3f}".format(accu1/report_period, math.sqrt(accu2/report_period-(accu1/report_period)**2)))
accu1, accu2 = 0., 0.
if not args.silent:
with open(os.path.join(args.env, '{}mse_{}.txt'.format(args.currentTask, args.comment)), 'a') as f:
f.write('{:.0f}\t{}\n'.format((i_count*args.train_freq+args.learning_start)*4, accu_loss/report_period))
accu_loss = 0.
return report_loss, off_policy_rate
| 49.798817
| 172
| 0.649774
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,208
| 0.131179
|
227d0c74c72ef68b3f928e3787684e5cdd3c8d18
| 6,290
|
py
|
Python
|
tests/broker/test_update_chassis.py
|
ned21/aquilon
|
6562ea0f224cda33b72a6f7664f48d65f96bd41a
|
[
"Apache-2.0"
] | 7
|
2015-07-31T05:57:30.000Z
|
2021-09-07T15:18:56.000Z
|
tests/broker/test_update_chassis.py
|
ned21/aquilon
|
6562ea0f224cda33b72a6f7664f48d65f96bd41a
|
[
"Apache-2.0"
] | 115
|
2015-03-03T13:11:46.000Z
|
2021-09-20T12:42:24.000Z
|
tests/broker/test_update_chassis.py
|
ned21/aquilon
|
6562ea0f224cda33b72a6f7664f48d65f96bd41a
|
[
"Apache-2.0"
] | 13
|
2015-03-03T11:17:59.000Z
|
2021-09-09T09:16:41.000Z
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2012,2013,2015,2016,2017,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the update chassis command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
from chassistest import VerifyChassisMixin
class TestUpdateChassis(TestBrokerCommand, VerifyChassisMixin):
def test_100_update_ut3c5(self):
ip = self.net["unknown0"].usable[6]
self.dsdb_expect_add("ut3c5.aqd-unittest.ms.com", ip, "oa",
comments="Some new chassis comments")
command = ["update", "chassis", "--chassis", "ut3c5.aqd-unittest.ms.com",
"--rack", "ut3", "--serial", "ABC5678",
"--model", "c-class", "--ip", ip,
"--comments", "Some new chassis comments"]
self.noouttest(command)
def test_110_verify_ut3c5(self):
self.verifychassis("ut3c5.aqd-unittest.ms.com", "hp", "c-class",
"ut3", "a", "3", "ABC5678",
comments="Some new chassis comments",
ip=self.net["unknown0"].usable[6],
grn="grn:/ms/ei/aquilon/aqd")
def test_200_update_bad_ip(self):
ip = self.net["unknown0"].usable[6]
command = ["update", "chassis", "--ip", ip,
"--chassis", "ut3c1.aqd-unittest.ms.com"]
out = self.badrequesttest(command)
self.matchoutput(out,
"IP address %s is already in use by on-board admin "
"interface oa of chassis "
"ut3c5.aqd-unittest.ms.com." % ip,
command)
def test_200_update_bad_model(self):
command = ["update", "chassis", "--model", "uttorswitch",
"--chassis", "ut3c1.aqd-unittest.ms.com"]
out = self.notfoundtest(command)
self.matchoutput(out,
"Not Found: Model uttorswitch, model type 'chassis' or 'aurora_chassis' not found.",
command)
def test_200_not_chassis(self):
command = ["update", "chassis", "--chassis",
"ut3gd1r01.aqd-unittest.ms.com",
"--comments", "Not a chassis"]
out = self.badrequesttest(command)
self.matchoutput(out,
"Switch ut3gd1r01.aqd-unittest.ms.com exists, but "
"is not a chassis.",
command)
# Moving this test here from test_add_chassis so that
# test_add_chassis can run before test_add_netdev
def test_200_primary_reuse(self):
command = ["add", "chassis", "--chassis",
"ut3gd1r01.aqd-unittest.ms.com",
"--rack", "ut3", "--model", "utchassis"]
out = self.badrequesttest(command)
self.matchoutput(out,
"DNS Record ut3gd1r01.aqd-unittest.ms.com is already "
"used as the primary name of switch ut3gd1r01.",
command)
def test_201_update_dsdb_fail(self):
command = ["update", "chassis", "--comment", "TEST DSDB FAIL",
"--chassis", "ut3c1.aqd-unittest.ms.com"]
out, err = self.successtest(command)
self.matchoutput(err, "Chassis ut3c1 update in DSDB failed!", command)
self.matchoutput(err, "Update chassis ut3c1 in DSDB failed, "
"proceeding in AQDB.", command)
def test_202_update_dsdb_verify(self):
command = ["show_chassis", "--chassis", "ut3c1.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Comments: TEST DSDB FAIL",
command)
def test_300_update_chassis_grn(self):
command = ["update_chassis", "--chassis", "ut3c5.aqd-unittest.ms.com",
"--grn", "grn:/ms/ei/aquilon/ut2"]
self.noouttest(command)
def test_310_verify_update_chassis_grn(self):
command = ["show_chassis", "--chassis", "ut3c5.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Primary Name: ut3c5.aqd-unittest.ms.com",
command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/ut2", command)
def test_320_update_chassis_eon_id(self):
command = ["update_chassis", "--chassis", "ut3c5.aqd-unittest.ms.com",
"--eon_id", "3"]
self.noouttest(command)
def test_330_verify_update_chassis_eon_id(self):
command = ["show_chassis", "--chassis", "ut3c5.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Primary Name: ut3c5.aqd-unittest.ms.com",
command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/unittest",
command)
def test_340_update_chassis_clear_grn(self):
command = ["update_chassis", "--chassis", "ut3c5.aqd-unittest.ms.com",
"--clear_grn"]
self.noouttest(command)
def test_350_verify_update_chassis_clear_grn(self):
command = ["show_chassis", "--chassis", "ut3c5.aqd-unittest.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Primary Name: ut3c5.aqd-unittest.ms.com",
command)
self.matchoutput(out, "Owned by GRN: grn:/ms/ei/aquilon/aqd", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateChassis)
unittest.TextTestRunner(verbosity=2).run(suite)
| 43.082192
| 109
| 0.592687
| 5,173
| 0.822417
| 0
| 0
| 0
| 0
| 0
| 0
| 2,784
| 0.442607
|
97d3b4402a419951038455ac0b5764d606d2b2b1
| 11,289
|
py
|
Python
|
label_propagation/label_propagation.py
|
lujiaxuan0520/NAIC-ReID-2020-contest
|
51953a6927afb71733e39845fec9723210d37a1b
|
[
"MIT"
] | 1
|
2020-12-13T12:39:30.000Z
|
2020-12-13T12:39:30.000Z
|
label_propagation/label_propagation.py
|
lujiaxuan0520/NAIC-ReID-2020-contest
|
51953a6927afb71733e39845fec9723210d37a1b
|
[
"MIT"
] | null | null | null |
label_propagation/label_propagation.py
|
lujiaxuan0520/NAIC-ReID-2020-contest
|
51953a6927afb71733e39845fec9723210d37a1b
|
[
"MIT"
] | null | null | null |
#########################################################################################
# semi-supervised learning: use label propagation to make pseudo labels for no label data
# This is not the parallel implement of label propagation, may requires a lot of time
# Author: Jiaxuan Lu
#########################################################################################
import time
import numpy as np
import math
import os, sys
import os.path as osp
sys.path.append("..")
sys.path.extend([os.path.join(root, name) for root, dirs, _ in os.walk("../") for name in dirs])
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torchreid.dataset_loader import ImageDataset
from torchreid import transforms as T
from torchreid import models
from torchreid.utils.avgmeter import AverageMeter
from torchreid.utils.torchtools import count_num_param
gpu_devices = "7" # gpu devices
extended_data = False # whether to use extended data
model_weight = "./log/resnet50-xent/vmgn_hgnn13/checkpoint_ep65.pth.tar"
arch = "vmgn_hgnn"
test_batch = 500
dataset_name = "pclreid"
global_branch = True
dist_metric = "cosine"
root = "./"
height = 256
width = 128
seed = 1
workers = 4
# return k neighbors index
def navie_knn(dataSet, query, k):
numSamples = dataSet.shape[0]
## step 1: calculate Euclidean distance
diff = np.tile(query, (numSamples, 1)) - dataSet
squaredDiff = diff ** 2
squaredDist = np.sum(squaredDiff, axis=1) # sum is performed by row
## step 2: sort the distance
sortedDistIndices = np.argsort(squaredDist)
if k > len(sortedDistIndices):
k = len(sortedDistIndices)
return sortedDistIndices[0:k]
# build a big graph (normalized weight matrix)
def buildGraph(MatX, kernel_type, rbf_sigma=None, knn_num_neighbors=None):
num_samples = MatX.shape[0]
affinity_matrix = np.zeros((num_samples, num_samples), np.float32)
if kernel_type == 'rbf':
if rbf_sigma == None:
raise ValueError('You should input a sigma of rbf kernel!')
for i in range(num_samples):
row_sum = 0.0
for j in range(num_samples):
diff = MatX[i, :] - MatX[j, :]
affinity_matrix[i][j] = np.exp(sum(diff ** 2) / (-2.0 * rbf_sigma ** 2))
row_sum += affinity_matrix[i][j]
affinity_matrix[i][:] /= row_sum
elif kernel_type == 'knn':
if knn_num_neighbors == None:
raise ValueError('You should input a k of knn kernel!')
for i in range(num_samples):
k_neighbors = navie_knn(MatX, MatX[i, :], knn_num_neighbors)
affinity_matrix[i][k_neighbors] = 1.0 / knn_num_neighbors
else:
raise NameError('Not support kernel type! You can use knn or rbf!')
return affinity_matrix
# label propagation
def labelPropagation(Mat_Label, Mat_Unlabel, labels, kernel_type='rbf', rbf_sigma=1.5, \
knn_num_neighbors=10, max_iter=500, tol=1e-3):
# initialize
num_label_samples = Mat_Label.shape[0]
num_unlabel_samples = Mat_Unlabel.shape[0]
num_samples = num_label_samples + num_unlabel_samples
labels_list = np.unique(labels)
num_classes = len(labels_list)
MatX = np.vstack((Mat_Label, Mat_Unlabel))
clamp_data_label = np.zeros((num_label_samples, num_classes), np.float32)
for i in range(num_label_samples):
clamp_data_label[i][labels[i]] = 1.0
label_function = np.zeros((num_samples, num_classes), np.float32)
label_function[0: num_label_samples] = clamp_data_label
label_function[num_label_samples: num_samples] = -1
# graph construction
print("start build graph.")
affinity_matrix = buildGraph(MatX, kernel_type, rbf_sigma, knn_num_neighbors)
print("build graph done.")
# start to propagation
iter = 0
pre_label_function = np.zeros((num_samples, num_classes), np.float32)
changed = np.abs(pre_label_function - label_function).sum()
while iter < max_iter and changed > tol:
if iter % 1 == 0:
print("---> Iteration %d/%d, changed: %f" % (iter, max_iter, changed))
pre_label_function = label_function
iter += 1
# propagation
label_function = np.dot(affinity_matrix, label_function)
# clamp
label_function[0: num_label_samples] = clamp_data_label
# check converge
changed = np.abs(pre_label_function - label_function).sum()
# get terminate label of unlabeled data
unlabel_data_labels = np.zeros(num_unlabel_samples)
for i in range(num_unlabel_samples):
unlabel_data_labels[i] = np.argmax(label_function[i + num_label_samples])
return unlabel_data_labels
def process_dir_label(list_path,cam):
with open(list_path, 'r') as txt:
lines = txt.readlines()
dataset = []
pid_container = set()
for img_idx, img_info in enumerate(lines):
img_path, pid = img_info.split(':')
pid = int(pid) # no need to relabel
camid = cam
# img_path = osp.join(dir_path, img_path)
dataset.append((img_path, pid, camid))
pid_container.add(pid)
num_imgs = len(dataset)
num_pids = len(pid_container)
# check if pid starts from 0 and increments with 1
return dataset, num_pids, num_imgs
def process_dir_unlabel(list_path,cam):
with open(list_path, 'r') as txt:
lines = txt.readlines()
dataset = []
for img_idx, img_info in enumerate(lines):
img_path = img_info.replace("\n","")
camid = cam
# img_path = osp.join(dir_path, img_path)
dataset.append((img_path, camid))
num_imgs = len(dataset)
return dataset, num_imgs
def test(model, labelloader, unlabelloader, use_gpu):
batch_time = AverageMeter()
model.eval()
with torch.no_grad():
label_feature, label_pids, q_camids = [], [], []
for batch_idx, (imgs, pids, camids) in enumerate(labelloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
label_feature.append(features)
label_pids.extend(pids)
# q_camids.extend(camids)
label_feature = torch.cat(label_feature, 0)
label_pids = np.asarray(label_pids)
# q_camids = np.asarray(q_camids)
unlabel_feature, unlabel_img_path, g_camids = [], [], []
for batch_idx, (imgs, img_path, camids) in enumerate(unlabelloader):
if use_gpu:
imgs = imgs.cuda()
end = time.time()
features = model(imgs)
batch_time.update(time.time() - end)
features = features.data.cpu()
unlabel_feature.append(features)
unlabel_img_path.extend(img_path)
# g_camids.extend(camids)
unlabel_feature = torch.cat(unlabel_feature, 0)
# unlabel_img_path = np.asarray(unlabel_img_path)
# g_camids = np.asarray(g_camids)
return label_feature, unlabel_feature, label_pids, unlabel_img_path
# main function
if __name__ == "__main__":
torch.manual_seed(seed)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_devices
use_gpu = torch.cuda.is_available()
if use_gpu:
print("Currently using GPU {}".format(gpu_devices))
cudnn.benchmark = True
torch.cuda.manual_seed_all(seed)
else:
print("Currently using CPU (GPU is highly recommended)")
print("Initializing dataset {}".format(dataset_name))
dataset_dir = osp.join(root, 'PCL_ReID')
list_label_path = osp.join(dataset_dir, 'train_extended_list.txt') if extended_data else \
osp.join(dataset_dir, 'train_list.txt')
list_unlabel_path = osp.join(dataset_dir, 'no_label_extended_list.txt') if extended_data else \
osp.join(dataset_dir, 'no_label_list.txt')
label_data, num_label_pids, num_label_imgs = process_dir_label(list_label_path, cam=0)
unlabel_data, num_unlabel_imgs = process_dir_unlabel(list_unlabel_path, cam=1)
transform_test = T.Compose([
T.Resize((height, width)),
T.ToTensor(),
# T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
T.Normalize(mean=[0.3495, 0.3453, 0.3941], std=[0.2755, 0.2122, 0.2563]),
])
pin_memory = True if use_gpu else False
labelloader = DataLoader(
ImageDataset(label_data, transform=transform_test),
batch_size=test_batch, shuffle=False, num_workers=workers,
pin_memory=pin_memory, drop_last=False
)
unlabelloader = DataLoader(
ImageDataset(unlabel_data, transform=transform_test, isFinal=True),
batch_size=test_batch, shuffle=False, num_workers=workers,
pin_memory=pin_memory, drop_last=False
)
print("Initializing model: {}".format(arch))
'''
vmgn_hgnn model, arch chosen from {'resnet50','resnet101','resnet152'}
efficientnet_hgnn model, arch chosen from {'efficientnet-b0', 'efficientnet-b1', 'efficientnet-b2', 'efficientnet-b3',
'efficientnet-b4', 'efficientnet-b5', 'efficientnet-b6', 'efficientnet-b7','efficientnet-b8'}
'''
model = models.init_model(name=arch,
num_classes=29626, # 29626 or 34394
# num_classes=19658,
isFinal=False,
global_branch=global_branch,
arch="resnet101")
print("Model size: {:.3f} M".format(count_num_param(model)))
checkpoint = torch.load(model_weight)
pretrain_dict = checkpoint['state_dict']
# model_dict = model.state_dict()
# pretrain_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict and model_dict[k].size() == v.size()}
# model_dict.update(pretrain_dict)
model.load_state_dict(pretrain_dict)
if use_gpu:
model = nn.DataParallel(model).cuda()
print("Evaluate only")
Mat_Label, Mat_Unlabel, labels, unlabel_img_path = test(model, labelloader, unlabelloader, use_gpu)
# num_unlabel_samples = 800
# Mat_Label, labels, Mat_Unlabel = loadBandData(num_unlabel_samples)
# Mat_Label, labels, Mat_Unlabel = loadCircleData(num_unlabel_samples)
## Notice: when use 'rbf' as our kernel, the choice of hyper parameter 'sigma' is very import! It should be
## chose according to your dataset, specific the distance of two data points. I think it should ensure that
## each point has about 10 knn or w_i,j is large enough. It also influence the speed of converge. So, may be
## 'knn' kernel is better!
# unlabel_data_labels = labelPropagation(Mat_Label, Mat_Unlabel, labels, kernel_type = 'rbf', rbf_sigma = 0.2)
print("start label propagation")
unlabel_data_labels = labelPropagation(Mat_Label, Mat_Unlabel, labels, kernel_type='knn', knn_num_neighbors=5,
max_iter=400)
# show(Mat_Label, labels, Mat_Unlabel, unlabel_data_labels)
for idx in range(len(unlabel_img_path)):
unlabel_img_path[idx] += ':' + str(unlabel_data_labels[idx])
np.savetxt("pseudo_label_for_no_label.txt", unlabel_img_path)
| 37.257426
| 129
| 0.652582
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,071
| 0.272035
|