hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
688159bdf8610b835edab2ced8f2deeade5b818b | 498 | py | Python | accounts/models.py | asandeep/pseudo-electronics | 2ce9f03a43c917448bfc340f8011eaf4bac159da | [
"MIT"
] | null | null | null | accounts/models.py | asandeep/pseudo-electronics | 2ce9f03a43c917448bfc340f8011eaf4bac159da | [
"MIT"
] | null | null | null | accounts/models.py | asandeep/pseudo-electronics | 2ce9f03a43c917448bfc340f8011eaf4bac159da | [
"MIT"
] | null | null | null | from django.contrib.auth import models as auth_models
from django.db import models
from django.urls import reverse
class User(auth_models.AbstractUser):
"""Extends default model to add project specific fields."""
birth_date = models.DateField(help_text="Employee Date of birth.")
address = models.TextField(help_text="Employee permanent address")
locked = models.BooleanField(
default=False,
help_text="Whether the account has been locked by Store owner.",
)
| 33.2 | 72 | 0.740964 | 380 | 0.763052 | 0 | 0 | 0 | 0 | 0 | 0 | 165 | 0.331325 |
688316b56cd53635e3fbd2f9611352d74bf1174a | 3,623 | py | Python | pytorch_trainmodel/pytorch_train.py | hoangcaobao/projectube-nlp | b0471b1626b241082eac3b7a30cacf54b9668375 | [
"MIT"
] | 5 | 2021-09-16T15:05:20.000Z | 2022-02-04T00:39:12.000Z | pytorch_trainmodel/pytorch_train.py | hoangcaobao/projectube-sentiment-analysis | b0471b1626b241082eac3b7a30cacf54b9668375 | [
"MIT"
] | null | null | null | pytorch_trainmodel/pytorch_train.py | hoangcaobao/projectube-sentiment-analysis | b0471b1626b241082eac3b7a30cacf54b9668375 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from pytorch_model import *
from pytorch_clean import *
from sklearn.metrics import classification_report
from torch.optim import optimizer
from transformers import AutoModel, AutoTokenizer
from vncorenlp import VnCoreNLP
from vncorenlp.vncorenlp import VnCoreNLP
from transformers import AdamW
#calling pretrained model
phobert=AutoModel.from_pretrained('vinai/phobert-base')
tokenizer=AutoTokenizer.from_pretrained('vinai/phobert-base')
rdrsegmenter=VnCoreNLP("vncorenlp/VnCoreNLP-1.1.1.jar", annotators="wseg", max_heap_size='-Xmx500m')
#get data
sentences,labels=get_data(['sacarism_dataset.json','normal_dataset.json'])
sentences_segment(sentences, rdrsegmenter)
padded,labels=shuffle_and_tokenize(sentences,labels,check_maxlen(sentences), tokenizer)
X_train,X_val,X_test, y_train,y_val, y_test=split_data(padded, labels)
train_dataloader, val_dataloader= Data_Loader(X_train,X_val,y_train,y_val)
#freeze all the parameters
for param in phobert.parameters():
param.requires_grad=False
#loss
cross_entropy=nn.NLLLoss()
model=classify(phobert,2)
optimizer=AdamW(model.parameters(),lr=1e-5)
def train():
model.train()
total_loss,acc=0,0
total_preds=[]
for step , batch in enumerate(train_dataloader):
if step%50==0 and step!=0:
print("BATCH {} of {}".format(step, len(train_dataloader)))
input,labels=batch
model.zero_grad()
preds=model(input)
loss=cross_entropy(preds, labels)
total_loss=total_loss+loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
preds=preds.detach().numpy()
total_preds.append(preds)
avg_loss=total_loss/len(train_dataloader)
total_preds=np.concatenate(total_preds,axis=0)
return avg_loss, total_preds
def evaluate():
model.eval()
total_loss,acc=0,0
total_preds=[]
for step, batch in enumerate(val_dataloader):
if step%50==0 and step!=0:
print("BATCH {} of {}".format(step, len(val_dataloader)))
input,labels=batch
with torch.no_grad():
preds=model(input)
loss=cross_entropy(preds, labels)
total_loss+=loss.item()
preds=preds.detach().numpy()
total_preds.append(preds)
avg_loss=total_loss/len(val_dataloader)
total_preds=np.concatenate(total_preds,axis=0)
return avg_loss, total_preds
def run(epochs):
best_valid_loss=float("inf")
train_losses=[]
valid_losses=[]
for epoch in range(epochs):
print("EPOCH {}/{}".format(epoch,epochs))
train_loss,_ =train()
valid_loss,_ =evaluate()
if valid_loss<best_valid_loss:
best_valid_loss=valid_loss
torch.save(model.state_dict(),"pytorch_trainmodel/save_weights.pt")
train_losses.append(train_loss)
valid_losses.append(valid_loss)
print(train_loss)
print(valid_loss)
print("======TRAINING=======")
run(10)
print("======CHECKING=======")
path = 'save_weights.pt'
model.load_state_dict(torch.load(path))
sentence=input("Your sentence you want to preidct: ")
def result(sentence):
tokens=rdrsegmenter.tokenize(sentence)
statement=""
for token in tokens:
statement+=" ".join(token)
sentence=statement
sequence=tokenizer.encode(sentence)
while(len(sequence)<check_maxlen(sentences)):
sequence.insert(0,0)
padded=torch.tensor([sequence])
with torch.no_grad():
preds=model(padded)
preds=np.argmax(preds,axis=1)
return preds
print(result(sentence))
#check test
with torch.no_grad():
preds=model(X_test)
preds=preds.detach().numpy()
preds=np.argmax(preds,axis=1)
print(classification_report(y_test, preds)) | 29.696721 | 100 | 0.743031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 400 | 0.110406 |
68855b61bab7f85695a5b52312067872e87f33fd | 295 | py | Python | tests/basics/builtin_sorted.py | 84KaliPleXon3/micropython-esp32 | a64dc82742749cf4a4bbe5688dde05122fb38f56 | [
"MIT"
] | 1 | 2020-04-24T06:09:44.000Z | 2020-04-24T06:09:44.000Z | tests/basics/builtin_sorted.py | 84KaliPleXon3/micropython-esp32 | a64dc82742749cf4a4bbe5688dde05122fb38f56 | [
"MIT"
] | null | null | null | tests/basics/builtin_sorted.py | 84KaliPleXon3/micropython-esp32 | a64dc82742749cf4a4bbe5688dde05122fb38f56 | [
"MIT"
] | null | null | null | # test builtin sorted
try:
sorted
set
except:
import sys
print("SKIP")
sys.exit()
print(sorted(set(range(100))))
print(sorted(set(range(100)), key=lambda x: x + 100*(x % 2)))
# need to use keyword argument
try:
sorted([], None)
except TypeError:
print("TypeError")
| 16.388889 | 61 | 0.630508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.230508 |
6885709e154f6ae3d0952842e393c4c290d87e3b | 643 | py | Python | addons/mendeley/tests/test_serializer.py | gaybro8777/osf.io | 30408511510a40bc393565817b343ef5fd76ab14 | [
"Apache-2.0"
] | 628 | 2015-01-15T04:33:22.000Z | 2022-03-30T06:40:10.000Z | addons/mendeley/tests/test_serializer.py | gaybro8777/osf.io | 30408511510a40bc393565817b343ef5fd76ab14 | [
"Apache-2.0"
] | 4,712 | 2015-01-02T01:41:53.000Z | 2022-03-30T14:18:40.000Z | addons/mendeley/tests/test_serializer.py | Johnetordoff/osf.io | de10bf249c46cede04c78f7e6f7e352c69e6e6b5 | [
"Apache-2.0"
] | 371 | 2015-01-12T16:14:08.000Z | 2022-03-31T18:58:29.000Z | # -*- coding: utf-8 -*-
"""Serializer tests for the Mendeley addon."""
import pytest
from addons.base.tests.serializers import CitationAddonSerializerTestSuiteMixin
from addons.base.tests.utils import MockFolder
from addons.mendeley.tests.factories import MendeleyAccountFactory
from addons.mendeley.serializer import MendeleySerializer
from tests.base import OsfTestCase
pytestmark = pytest.mark.django_db
class TestMendeleySerializer(CitationAddonSerializerTestSuiteMixin, OsfTestCase):
addon_short_name = 'mendeley'
Serializer = MendeleySerializer
ExternalAccountFactory = MendeleyAccountFactory
folder = MockFolder()
| 30.619048 | 81 | 0.819596 | 231 | 0.359253 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.122862 |
688600d6cad34394de4e5a48da1144fe75003447 | 465 | py | Python | src/project/_tests/base.py | pviojo/boilerplate-flask-api | 4e3832cc1a6979019311e01f60227ace0f2c3259 | [
"MIT"
] | null | null | null | src/project/_tests/base.py | pviojo/boilerplate-flask-api | 4e3832cc1a6979019311e01f60227ace0f2c3259 | [
"MIT"
] | null | null | null | src/project/_tests/base.py | pviojo/boilerplate-flask-api | 4e3832cc1a6979019311e01f60227ace0f2c3259 | [
"MIT"
] | null | null | null | from app import run_migration
from flask import current_app as app
from flask_testing import TestCase
from project import db
class BaseTestCase(TestCase):
def create_app(self):
app.config.from_object('project.configs.TestingConfig')
return app
def setUp(self):
run_migration()
def tearDown(self):
db.session.remove()
db.drop_all()
def assert201(self, response):
self.assert_status(response, 201)
| 22.142857 | 63 | 0.692473 | 337 | 0.724731 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.066667 |
688701927338303df5deb153d6634d0b60ce9445 | 956 | py | Python | models/three_layers_nn.py | erwanlecarpentier/optimistic-regressor | 8daac40289b591b88aa0658c531df15c92792d23 | [
"CC0-1.0"
] | null | null | null | models/three_layers_nn.py | erwanlecarpentier/optimistic-regressor | 8daac40289b591b88aa0658c531df15c92792d23 | [
"CC0-1.0"
] | null | null | null | models/three_layers_nn.py | erwanlecarpentier/optimistic-regressor | 8daac40289b591b88aa0658c531df15c92792d23 | [
"CC0-1.0"
] | null | null | null | """
N layers optimistic neural network
"""
import torch
class ThreeLayersNN(torch.nn.Module):
def __init__(self, in_dim, out_dim, h_dim, activation='relu'):
"""
:param in_dim: (int) input dimension
:param out_dim: (int) output dimension
"""
super(ThreeLayersNN, self).__init__()
self.activation = activation
# Initialize layers
self.h_dim = h_dim
self.linear1 = torch.nn.Linear(in_dim, h_dim[0])
self.linear2 = torch.nn.Linear(h_dim[0], h_dim[1])
self.linear3 = torch.nn.Linear(h_dim[1], out_dim)
def forward(self, x):
if self.activation == 'sigmoid':
h = torch.sigmoid(self.linear1(x))
h = torch.sigmoid(self.linear2(h))
return self.linear3(h)
elif self.activation == 'relu':
h = self.linear1(x).clamp(min=0)
h = self.linear2(h).clamp(min=0)
return self.linear3(h)
| 28.969697 | 66 | 0.585774 | 896 | 0.937238 | 0 | 0 | 0 | 0 | 0 | 0 | 189 | 0.197699 |
68880d2a92cdd46241a4f5ad354ad532bcf2dac3 | 630 | py | Python | helper/trap.py | shivaroast/AidenBot | 6b6463d48baec55d5df707d31d0fd6f7ed77a345 | [
"MIT"
] | null | null | null | helper/trap.py | shivaroast/AidenBot | 6b6463d48baec55d5df707d31d0fd6f7ed77a345 | [
"MIT"
] | null | null | null | helper/trap.py | shivaroast/AidenBot | 6b6463d48baec55d5df707d31d0fd6f7ed77a345 | [
"MIT"
] | null | null | null | '''
?
(c) 2018 - laymonage
'''
import os
import random
import requests
from .dropson import dbx_dl, get_json
def surprise(safe=False):
'''
?
'''
cat_api = 'http://thecatapi.com/api/images/get'
prev_url = requests.get(cat_api)
prev_url = prev_url.url.replace('http://', 'https://')
if safe:
orig_url = requests.get(cat_api)
orig_url = orig_url.url.replace('http://', 'https://')
else:
surprise_links = os.getenv('SURPRISES_FILE_PATH', None)
surprises = get_json(dbx_dl(surprise_links))
orig_url = random.choice(surprises)
return (orig_url, prev_url)
| 21.724138 | 63 | 0.633333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.226984 |
68881d8d1f87bdfd089d83ddb17e5dd84d4f78de | 11,019 | py | Python | defaulter.py | tipabu/swift_defaulter | fd72f5652fd7ab766dcef73c41fcbfa9a6e0e368 | [
"Apache-2.0"
] | 4 | 2015-08-12T19:47:05.000Z | 2017-08-25T06:04:27.000Z | defaulter.py | tipabu/swift_defaulter | fd72f5652fd7ab766dcef73c41fcbfa9a6e0e368 | [
"Apache-2.0"
] | null | null | null | defaulter.py | tipabu/swift_defaulter | fd72f5652fd7ab766dcef73c41fcbfa9a6e0e368 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015-2016 Tim Burke
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Middleware to set default headers for PUT requests.
End Users / Application Developers
===================================
With this middleware enabled, users can set X-Default-Object-* headers on
accounts and containers to automatically set default headers for subsequent
object PUTs, or X-Default-Container-* headers on accounts to set defaults for
subsequent container PUTs. If a default is specified at multiple levels (for
example, an object default is specified both at the account and container),
the more-specific level's default is used. For example, in the sequence::
POST /v1/acct
X-Default-Object-X-Delete-After: 2592000
POST /v1/acct/foo
X-Default-Object-X-Delete-After: 86400
PUT /v1/acct/foo/o1
PUT /v1/acct/foo/o2
X-Delete-After: 3600
PUT /v1/acct/bar/o3
PUT /v1/acct/baz/o4
POST /v1/acct/baz/o4
X-Remove-Delete-At: 1
PUT /v1/other_acct/quux/o5
* ``acct/foo/o1`` will get its ``X-Delete-After`` header from the container
default, so it will be automatically be deleted after 24 hours.
* ``acct/foo/o2`` had its ``X-Delete-After`` header explicitly set by the
client, so it will be automatically be deleted after one hour.
* ``acct/bar/o3`` will get its ``X-Delete-After`` header from the account
default, so it will be deleted after 30 days.
* ``acct/baz/04`` will initially be set to delete after 30 days as well.
However, nothing prevents you from later changing or removing the defaulted
header. After the subsequent ``POST``, the object will not be automatically
deleted.
* ``other_acct/quux/o5`` will not be automatically deleted, as neither its
account nor its container specified a default expiration time.
.. note::
You may not specify defaults for any X-*-Sysmeta-* or X-Backend-* headers.
This is comparable to the behavior of the gatekeeper middleware.
Cluster Operators
=================
Requires Swift >= 1.12.0
Pipeline Placement
------------------
This middleware should be placed as far left as possible while still being
right of Swift's sane-WSGI-environment middlewares. Immediately right of
``cache`` should be reasonable.
Configuration Options
---------------------
use_formatting
If true, expose {account}, {container}, and {object} formatting
variables. This can be useful for things like setting::
X-Default-Container-X-Versions-Location: .{container}_versions
Default: False
default-account-*
default-container-*
default-object-*
Used to set defaults across the entire cluster. These have lower precedence
than account-level defaults.
Middleware Developers
=====================
This middleware adds two keys to the request environment:
swift.defaulter_headers
This is a comma-delimited list of the headers for which this middleware has
set default values. Note that other middlewares may have modified some or
all of these after the defaults were set.
swift.defaulter_hook
This is a callback that may be used to populate defaults for subrequests.
It will only modify PUT requests. It accepts a swob.Request as an argument.
"""
from swift.common.request_helpers import get_sys_meta_prefix
from swift.common.swob import wsgify
from swift.common.utils import config_true_value
from swift.common.utils import register_swift_info
from swift.proxy.controllers.base import get_account_info
from swift.proxy.controllers.base import get_container_info
BLACKLIST = set('x-timestamp')
BLACKLIST_PREFIXES = (
get_sys_meta_prefix('account'),
get_sys_meta_prefix('container'),
get_sys_meta_prefix('object'),
'x-backend-',
)
CALLBACK_ENV_KEY = 'swift.defaulter_hook'
HEADERS_ENV_KEY = 'swift.defaulter_headers'
class DefaulterMiddleware(object):
def __init__(self, app, config):
self.app = app
self.conf = config
@wsgify
def __call__(self, req):
req.environ[CALLBACK_ENV_KEY] = self.defaulter_hook
req.environ['swift.copy_hook'] = self.copy_hook(req.environ.get(
'swift.copy_hook', lambda src_req, src_resp, sink_req: src_resp))
try:
vers, acct, cont, obj = req.split_path(2, 4, True)
except ValueError:
# /info request, or something similar
return self.app
handler = getattr(self, 'do_%s' % req.method.lower(), None)
if not callable(handler):
handler = self.get_response_and_translate
if obj is not None:
req_type = 'object'
elif cont is not None:
req_type = 'container'
elif acct is not None:
req_type = 'account'
return handler(req, req_type)
def client_to_sysmeta(self, req, req_type):
subresources = {
'account': ('container', 'object'),
'container': ('object', ),
}.get(req_type, ())
header_formats = (
('x-remove-default-%s-', True),
('x-default-%s-', False),
)
for header_format, clear in header_formats:
for header, value in req.headers.items():
for subresource in subresources:
prefix = header_format % subresource
if header.lower().startswith(prefix):
header_to_default = header[len(prefix):].lower()
if header_to_default.startswith(BLACKLIST_PREFIXES):
continue
if header_to_default in BLACKLIST:
continue
sysmeta_header = '%sdefault-%s-%s' % (
get_sys_meta_prefix(req_type),
subresource,
header_to_default)
req.headers[sysmeta_header] = '' if clear else value
def sysmeta_to_client(self, resp, req_type):
prefix = get_sys_meta_prefix(req_type) + 'default-'
for header, value in resp.headers.items():
if header.lower().startswith(prefix):
client_header = 'x-default-%s' % header[len(prefix):]
resp.headers[client_header] = value
def get_response_and_translate(self, req, req_type):
resp = req.get_response(self.app)
self.sysmeta_to_client(resp, req_type)
return resp
def do_post(self, req, req_type):
if req_type == 'object':
return self.get_response_and_translate(req, req_type)
self.client_to_sysmeta(req, req_type)
return self.get_response_and_translate(req, req_type)
def defaulter_hook(self, req):
'''Callback so middlewares that make subrequests can populate defaults.
:param req: the swob.Request that should have its headers defaulted
'''
if HEADERS_ENV_KEY in req.environ:
return # We've already tried setting defaults; pass
if req.method != 'PUT':
return # Only set defaults during PUTs
try:
pieces = req.split_path(2, 4, True)
except ValueError:
return # /info, or something? but it's a put... what?
if pieces.pop(0) != 'v1':
return # Swift3 request, maybe? Doesn't look like Swift API
# OK, we're reasonably assured that we're working with an account,
# container or object request for which we should populate defaults.
format_args = {}
for val, val_type in zip(pieces, ('account', 'container', 'object')):
if val is not None:
format_args[val_type] = val
req_type = val_type
defaulted = []
for header, value in self.get_defaults(
req, req_type, format_args).items():
if header not in req.headers:
defaulted.append(header)
req.headers[header] = value
req.environ[HEADERS_ENV_KEY] = ','.join(defaulted)
# Go ahead and translate to sysmeta; it allows users to set things like
# X-Default-Container-X-Default-Object-X-Object-Meta-Color: blue
# on their account (if they really want to) and it will Just Work.
self.client_to_sysmeta(req, req_type)
def copy_hook(self, inner_hook):
def outer_hook(src_req, src_resp, sink_req):
src_resp = inner_hook(src_req, src_resp, sink_req)
if 'swift.post_as_copy' not in src_req.environ:
self.defaulter_hook(sink_req)
return src_resp
return outer_hook
def do_put(self, req, req_type):
self.defaulter_hook(req)
# Once we've set the defaults, we just follow the POST flow
return self.do_post(req, req_type)
def get_defaults(self, req, req_type, format_args):
acct_sysmeta = get_account_info(req.environ, self.app)['sysmeta']
if req_type == 'object':
cont_sysmeta = get_container_info(req.environ, self.app)['sysmeta']
else:
cont_sysmeta = {}
defaults = {}
prefix = 'default-%s-' % req_type
for src in (self.conf, acct_sysmeta, cont_sysmeta):
for key, value in src.items():
if not key.lower().startswith(prefix):
continue
header_to_default = key[len(prefix):].lower()
if header_to_default.startswith(BLACKLIST_PREFIXES):
continue
if header_to_default in BLACKLIST:
continue
if self.conf['use_formatting']:
try:
value = value.format(**format_args)
except KeyError:
# This user may not have specified the default;
# don't fail because of someone else
pass
defaults[header_to_default] = value
return defaults
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
conf['use_formatting'] = config_true_value(conf.get(
'use_formatting', False))
defaulting_prefixes = tuple('default-%s-' % typ
for typ in ('account', 'container', 'object'))
conf_to_register = {
k: v for k, v in conf.items()
if k == 'use_formatting' or k.startswith(defaulting_prefixes)}
register_swift_info('defaulter', **conf_to_register)
def filt(app):
return DefaulterMiddleware(app, conf)
return filt
| 36.486755 | 79 | 0.63808 | 6,141 | 0.55731 | 0 | 0 | 816 | 0.074054 | 0 | 0 | 5,041 | 0.457483 |
688966ef062d193a314e93a1d3dc28aa32628e39 | 42,873 | py | Python | src/sage/coding/information_set_decoder.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | 1,742 | 2015-01-04T07:06:13.000Z | 2022-03-30T11:32:52.000Z | src/sage/coding/information_set_decoder.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | 66 | 2015-03-19T19:17:24.000Z | 2022-03-16T11:59:30.000Z | src/sage/coding/information_set_decoder.py | UCD4IDS/sage | 43474c96d533fd396fe29fe0782d44dc7f5164f7 | [
"BSL-1.0"
] | 495 | 2015-01-10T10:23:18.000Z | 2022-03-24T22:06:11.000Z | # -*- coding: utf-8 -*-
r"""
Information-set decoding for linear codes
Information-set decoding is a probabilistic decoding strategy that
essentially tries to guess `k` correct positions in the received word,
where `k` is the dimension of the code. A codeword agreeing with the
received word on the guessed position can easily be computed, and their
difference is one possible error vector. A "correct" guess is assumed when
this error vector has low Hamming weight.
This simple algorithm is not very efficient in itself, but there are numerous
refinements to the strategy that make it very capable over rather large codes.
Still, the decoding algorithm is exponential in dimension of the code and the
log of the field size.
The ISD strategy requires choosing how many errors is deemed acceptable. One
choice could be `d/2`, where `d` is the minimum distance of the code, but
sometimes `d` is not known, or sometimes more errors are expected. If one
chooses anything above `d/2`, the algorithm does not guarantee to return a
nearest codeword.
AUTHORS:
- David Lucas, Johan Rosenkilde, Yann Laigle-Chapuy (2016-02, 2017-06): initial
version
"""
#******************************************************************************
# Copyright (C) 2017 David Lucas <david.lucas@inria.fr>
# Johan Rosenkilde <jsrn@jsrn.dk>
# Yann Laigle-Chapuy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.all import ZZ, Integer, vector, SageObject, binomial
from .decoder import Decoder
def _format_decoding_interval(decoding_interval):
r"""
Format the decoding interval of an ISD decoder when calling ``_repr_`` or
``_latex_``.
EXAMPLES::
sage: from sage.coding.information_set_decoder import _format_decoding_interval
sage: _format_decoding_interval((0,3))
'up to 3'
sage: _format_decoding_interval((2,3))
'between 2 and 3'
sage: _format_decoding_interval((3,3))
'exactly 3'
"""
if decoding_interval[0] == 0:
return "up to {0}".format(decoding_interval[1])
if decoding_interval[0] == decoding_interval[1]:
return "exactly {0}".format(decoding_interval[0])
return "between {0} and {1}".format(decoding_interval[0], decoding_interval[1])
class InformationSetAlgorithm(SageObject):
r"""
Abstract class for algorithms for
:class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`.
To sub-class this class, override ``decode`` and ``calibrate``, and call the
super constructor from ``__init__``.
INPUT:
- ``code`` -- A linear code for which to decode.
- ``number_errors`` -- an integer, the maximal number of errors to accept as
correct decoding. An interval can also be specified by giving a pair of
integers, where both end values are taken to be in the interval.
- ``algorithm_name`` -- A name for the specific ISD algorithm used (used for
printing).
- ``parameters`` -- (optional) A dictionary for setting the parameters of
this ISD algorithm. Note that sanity checking this dictionary for the
individual sub-classes should be done in the sub-class constructor.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: LeeBrickellISDAlgorithm(codes.GolayCode(GF(2)), (0,4))
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
A minimal working example of how to sub-class::
sage: from sage.coding.information_set_decoder import InformationSetAlgorithm
sage: from sage.coding.decoder import DecodingError
sage: class MinimalISD(InformationSetAlgorithm):
....: def __init__(self, code, decoding_interval):
....: super(MinimalISD, self).__init__(code, decoding_interval, "MinimalISD")
....: def calibrate(self):
....: self._parameters = { } # calibrate parameters here
....: self._time_estimate = 10.0 # calibrated time estimate
....: def decode(self, r):
....: # decoding algorithm here
....: raise DecodingError("I failed")
sage: MinimalISD(codes.GolayCode(GF(2)), (0,4))
ISD Algorithm (MinimalISD) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
"""
def __init__(self, code, decoding_interval, algorithm_name, parameters = None):
r"""
TESTS::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: LeeBrickellISDAlgorithm(codes.GolayCode(GF(2)), (0,4))
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
"""
self._code = code
self._decoding_interval = decoding_interval
self._algorithm_name = algorithm_name
if parameters:
self._parameters = parameters
self._parameters_specified = True
else:
self._parameters_specified = False
def name(self):
r"""
Return the name of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,2))
sage: A.name()
'Lee-Brickell'
"""
return self._algorithm_name
def decode(self, r):
r"""
Decode a received word using this ISD decoding algorithm.
Must be overridden by sub-classes.
EXAMPLES::
sage: M = matrix(GF(2), [[1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0],\
[0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1],\
[0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0],\
[0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1],\
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1]])
sage: C = codes.LinearCode(M)
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (2,2))
sage: r = vector(GF(2), [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
sage: A.decode(r)
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
"""
raise NotImplementedError
def time_estimate(self):
"""
Estimate for how long this ISD algorithm takes to perform a single decoding.
The estimate is for a received word whose number of errors is within the
decoding interval of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,2))
sage: A.time_estimate() #random
0.0008162108571427874
"""
if not hasattr(self, "_time_estimate"):
self.calibrate()
return self._time_estimate
def calibrate(self):
"""
Uses test computations to estimate optimal values for any parameters
this ISD algorithm may take.
Must be overridden by sub-classes.
If ``self._parameters_specified`` is ``False``, this method shall set
``self._parameters`` to the best parameters estimated. It shall always
set ``self._time_estimate`` to the time estimate of using
``self._parameters``.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3))
sage: A.calibrate()
sage: A.parameters() #random
{'search_size': 1}
"""
raise NotImplementedError
def code(self):
r"""
Return the code associated to this ISD algorithm.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3))
sage: A.code()
[24, 12, 8] Extended Golay code over GF(2)
"""
return self._code
def decoding_interval(self):
r"""
A pair of integers specifying the interval of number of errors this
ISD algorithm will attempt to correct.
The interval includes both end values.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,2))
sage: A.decoding_interval()
(0, 2)
"""
return self._decoding_interval
def parameters(self):
"""
Return any parameters this ISD algorithm uses.
If the parameters have not already been set, efficient values will first
be calibrated and returned.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4), search_size=3)
sage: A.parameters()
{'search_size': 3}
If not set, calibration will determine a sensible value::
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: A.parameters() #random
{'search_size': 1}
"""
if not hasattr(self, "_parameters"):
self.calibrate()
return self._parameters
def __eq__(self, other):
r"""
Tests equality between ISD algorithm objects.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: A == LeeBrickellISDAlgorithm(C, (0,4))
True
sage: A == LeeBrickellISDAlgorithm(C, (0,5))
False
sage: other_search = 1 if A.parameters()['search_size'] != 1 else 2
sage: A == LeeBrickellISDAlgorithm(C, (0,4), search_size=other_search)
False
ISD Algorithm objects can be equal only if they have both calibrated
the parameters, or if they both had it set and to the same value::
sage: A2 = LeeBrickellISDAlgorithm(C, (0,4), search_size=A.parameters()['search_size'])
sage: A == A2
False
sage: A2 == LeeBrickellISDAlgorithm(C, (0,4), search_size=A.parameters()['search_size'])
True
"""
return isinstance(other, self.__class__)\
and self.code() == other.code()\
and self.decoding_interval() == other.decoding_interval()\
and self._parameters_specified == other._parameters_specified\
and (not self._parameters_specified or self.parameters() == other.parameters())
def __hash__(self):
r"""
Returns the hash value of ``self``.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: hash(A) #random
5884357732955478461
sage: C2 = codes.GolayCode(GF(3))
sage: A2 = LeeBrickellISDAlgorithm(C2, (0,4))
sage: hash(A) != hash(A2)
True
"""
return hash(str(self))
def _repr_(self):
r"""
Returns a string representation of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
"""
return "ISD Algorithm ({}) for {} decoding {} errors ".format(self._algorithm_name, self.code(), _format_decoding_interval(self.decoding_interval()))
def _latex_(self):
r"""
Returns a latex representation of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: latex(A)
\textnormal{ISD Algorithm (Lee-Brickell) for }[24, 12, 8] \textnormal{ Extended Golay Code over } \Bold{F}_{2} \textnormal{decoding up to 4 errors}
"""
return "\\textnormal{{ISD Algorithm ({}) for }}{} \\textnormal{{decoding {} errors}}".format(self._algorithm_name, self.code()._latex_(), _format_decoding_interval(self.decoding_interval()))
class LeeBrickellISDAlgorithm(InformationSetAlgorithm):
r"""
The Lee-Brickell algorithm for information-set decoding.
For a description of the information-set decoding paradigm (ISD), see
:class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`.
This implements the Lee-Brickell variant of ISD, see [LB1988]_ for the
original binary case, and [Pet2010]_ for the `q`-ary extension.
Let `C` be a `[n, k]`-linear code over `GF(q)`, and let `r \in GF(q)^{n}` be
a received word in a transmission. We seek the codeword whose Hamming
distance from `r` is minimal. Let `p` and `w` be integers, such that `0\leq
p\leq w`, Let `G` be a generator matrix of `C`, and for any set of indices
`I`, we write `G_{I}` for the matrix formed by the columns of `G` indexed by
`I`. The Lee-Brickell ISD loops the following until it is successful:
1. Choose an information set `I` of `C`.
2. Compute `r' = r - r_{I}\times G_I^{-1} \times G`
3. Consider every size-`p` subset of `I`, `\{a_1, \dots, a_p\}`.
For each `m = (m_1, \dots, m_p) \in GF(q)^{p}`, compute
the error vector `e = r' - \sum_{i=1}^{p} m_i\times g_{a_i}`,
4. If `e` has a Hamming weight at most `w`, return `r-e`.
INPUT:
- ``code`` -- A linear code for which to decode.
- ``decoding_interval`` -- a pair of integers specifying an interval of
number of errors to correct. Includes both end values.
- ``search_size`` -- (optional) the size of subsets to use on step 3 of the
algorithm as described above. Usually a small number. It has to be at most
the largest allowed number of errors. A good choice will be approximated
if this option is not set; see
:meth:`sage.coding.LeeBrickellISDAlgorithm.calibrate`
for details.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (2,3)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding between 2 and 3 errors
"""
def __init__(self, code, decoding_interval, search_size = None):
r"""
TESTS:
If ``search_size`` is not a positive integer, or is bigger than the
decoding radius, an error will be raised::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: LeeBrickellISDAlgorithm(C, (1, 3), search_size=-1)
Traceback (most recent call last):
...
ValueError: The search size parameter has to be a positive integer
sage: LeeBrickellISDAlgorithm(C, (1, 3), search_size=4)
Traceback (most recent call last):
...
ValueError: The search size parameter has to be at most the maximal number of allowed errors
"""
if search_size is not None:
if not isinstance(search_size, (Integer, int)) or search_size < 0:
raise ValueError("The search size parameter has to be a positive integer")
if search_size > decoding_interval[1]:
raise ValueError("The search size parameter has to be at most"
" the maximal number of allowed errors")
super(LeeBrickellISDAlgorithm, self).__init__(code, decoding_interval, "Lee-Brickell",
parameters={ 'search_size': search_size })
self._parameters_specified = True
else:
self._parameters_specified = False
super(LeeBrickellISDAlgorithm, self).__init__(code, decoding_interval, "Lee-Brickell")
def decode(self, r):
r"""
The Lee-Brickell algorithm as described in the class doc.
Note that either parameters must be given at construction time or
:meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.calibrate()`
should be called before calling this method.
INPUT:
- `r` -- a received word, i.e. a vector in the ambient space of
:meth:`decoder.Decoder.code`.
OUTPUT: A codeword whose distance to `r` satisfies ``self.decoding_interval()``.
EXAMPLES::
sage: M = matrix(GF(2), [[1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0],\
[0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1],\
[0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0],\
[0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1],\
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1]])
sage: C = codes.LinearCode(M)
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (2,2))
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: c_out = A.decode(r)
sage: (r - c).hamming_weight() == 2
True
"""
import itertools
from sage.misc.prandom import sample
C = self.code()
n, k = C.length(), C.dimension()
tau = self.decoding_interval()
p = self.parameters()['search_size']
F = C.base_ring()
G = C.generator_matrix()
Fstar = F.list()[1:]
while True:
# step 1.
I = sample(range(n), k)
Gi = G.matrix_from_columns(I)
try:
Gi_inv = Gi.inverse()
except ZeroDivisionError:
# I was not an information set
continue
Gt = Gi_inv * G
#step 2.
y = r - vector([r[i] for i in I]) * Gt
g = Gt.rows()
#step 3.
for pi in range(p+1):
for A in itertools.combinations(range(k), pi):
for m in itertools.product(Fstar, repeat=pi):
e = y - sum(m[i]*g[A[i]] for i in range(pi))
errs = e.hamming_weight()
if errs >= tau[0] and errs <= tau[1]:
return r - e
def calibrate(self):
r"""
Run some test computations to estimate the optimal search size.
Let `p` be the search size. We should simply choose `p` such that the
average expected time is minimal. The algorithm succeeds when it chooses
an information set with at least `k - p` correct positions, where `k` is
the dimension of the code and `p` the search size. The expected number
of trials we need before this occurs is:
.. MATH::
\binom{n}{k}/(\rho \sum_{i=0}^p \binom{n-\tau}{k-i} \binom{\tau}{i})
Here `\rho` is the fraction of `k` subsets of indices which are
information sets. If `T` is the average time for steps 1 and 2
(including selecting `I` until an information set is found), while `P(i)`
is the time for the body of the ``for``-loop in step 3 for `m` of weight
`i`, then each information set trial takes roughly time `T +
\sum_{i=0}^{p} P(i) \binom{k}{i} (q-1)^i`, where `\GF{q}` is the base
field.
The values `T` and `P` are here estimated by running a few test
computations similar to those done by the decoding algorithm.
We don't explicitly estimate `\rho`.
OUTPUT: Does not output anything but sets private fields used by
:meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.parameters()`
and
:meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.time_estimate()``.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors
sage: A.calibrate()
sage: A.parameters() #random
{'search_size': 1}
sage: A.time_estimate() #random
0.0008162108571427874
If we specify the parameter at construction time, calibrate does not override this choice::
sage: A = LeeBrickellISDAlgorithm(C, (0,3), search_size=2); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors
sage: A.parameters()
{'search_size': 2}
sage: A.calibrate()
sage: A.parameters()
{'search_size': 2}
sage: A.time_estimate() #random
0.0008162108571427874
"""
from sage.matrix.special import random_matrix
from sage.misc.prandom import sample, randint
from sage.modules.free_module_element import random_vector
from time import process_time
C = self.code()
G = C.generator_matrix()
n, k = C.length(), C.dimension()
tau = self.decoding_interval()[1]
F = C.base_ring()
q = F.cardinality()
Fstar = F.list()[1:]
def time_information_set_steps():
before = process_time()
while True:
I = sample(range(n), k)
Gi = G.matrix_from_columns(I)
try:
Gi_inv = Gi.inverse()
except ZeroDivisionError:
continue
return process_time() - before
def time_search_loop(p):
y = random_vector(F, n)
g = random_matrix(F, p, n).rows()
scalars = [ [ Fstar[randint(0,q-2)] for i in range(p) ]
for s in range(100) ]
before = process_time()
for m in scalars:
e = y - sum(m[i]*g[i] for i in range(p))
return (process_time() - before) / 100.
T = sum([ time_information_set_steps() for s in range(5) ]) / 5.
P = [ time_search_loop(p) for p in range(tau+1) ]
def compute_estimate(p):
iters = 1.* binomial(n, k)/ \
sum( binomial(n-tau, k-i)*binomial(tau,i) for i in range(p+1) )
estimate = iters*(T + \
sum(P[pi] * (q-1)**pi * binomial(k, pi) for pi in range(p+1) ))
return estimate
if self._parameters_specified:
self._time_estimate = compute_estimate(self._parameters['search_size'])
else:
self._calibrate_select([ compute_estimate(p) for p in range(tau+1) ])
def _calibrate_select(self, estimates):
r"""
Internal method used by ``self.calibrate()``.
Given the timing estimates, select the best parameter and set the
appropriate private fields.
INPUT:
- `estimates` - list of time estimates, for the search size set to the
index of the list entry.
OUTPUT: None, but sets the private fields `self._parameters` and
`self._time_estimate`.
TESTS::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors
sage: A._calibrate_select([ 1.0, 2.0, 3.0, 0.5, 0.6, 1.0 ])
sage: A._time_estimate
0.500000000000000
sage: A._parameters
{'search_size': 3}
"""
search_size = 0
for p in range(1, len(estimates)):
if estimates[p] < estimates[search_size]:
search_size = p
self._parameters = { 'search_size': search_size }
self._time_estimate = estimates[search_size]
class LinearCodeInformationSetDecoder(Decoder):
r"""
Information-set decoder for any linear code.
Information-set decoding is a probabilistic decoding strategy that
essentially tries to guess `k` correct positions in the received word,
where `k` is the dimension of the code. A codeword agreeing with the
received word on the guessed position can easily be computed, and their
difference is one possible error vector. A "correct" guess is assumed when
this error vector has low Hamming weight.
The ISD strategy requires choosing how many errors is deemed acceptable. One
choice could be `d/2`, where `d` is the minimum distance of the code, but
sometimes `d` is not known, or sometimes more errors are expected. If one
chooses anything above `d/2`, the algorithm does not guarantee to return a
nearest codeword.
This simple algorithm is not very efficient in itself, but there are numerous
refinements to the strategy. Specifying which strategy to use among those
that Sage knows is done using the ``algorithm`` keyword. If this is not set,
an efficient choice will be made for you.
The various ISD algorithms all need to select a number of parameters. If you
choose a specific algorithm to use, you can pass these parameters as named
parameters directly to this class' constructor. If you don't, efficient
choices will be calibrated for you.
.. WARNING::
If there is no codeword within the specified decoding distance, then the
decoder may never terminate, or it may raise a
:exc:`sage.coding.decoder.DecodingError` exception, depending on the ISD
algorithm used.
INPUT:
- ``code`` -- A linear code for which to decode.
- ``number_errors`` -- an integer, the maximal number of errors to accept as
correct decoding. An interval can also be specified by giving a pair of
integers, where both end values are taken to be in the interval.
- ``algorithm`` -- (optional) the string name of the ISD algorithm to
employ. If this is not set, an appropriate one will be chosen.
A constructed
:class:`sage.coding.information_set_decoder.InformationSetAlgorithm`
object may also be given. In this case ``number_errors`` must match that
of the passed algorithm.
- ``**kwargs`` -- (optional) any number of named arguments passed on to the
ISD algorithm. Such are usually not required, and they can only be set if
``algorithm`` is set to a specific algorithm. See the documentation for
each individual ISD algorithm class for information on any named arguments
they may accept. The easiest way to access this documentation is to first
construct the decoder without passing any named arguments, then accessing
the ISD algorithm using
:meth:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder.algorithm`,
and then reading the `?` help on the constructed object.
EXAMPLES:
The principal way to access this class is through the
:meth:`sage.code.linear_code.AbstractLinearCode.decoder` method::
sage: C = codes.GolayCode(GF(3))
sage: D = C.decoder("InformationSet", 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
You can specify which algorithm you wish to use, and you should do so in
order to pass special parameters to it::
sage: C = codes.GolayCode(GF(3))
sage: D2 = C.decoder("InformationSet", 2, algorithm="Lee-Brickell", search_size=2); D2
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
sage: D2.algorithm()
ISD Algorithm (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
sage: D2.algorithm().parameters()
{'search_size': 2}
If you specify an algorithm which is not known, you get a friendly error message::
sage: C.decoder("InformationSet", 2, algorithm="NoSuchThing")
Traceback (most recent call last):
...
ValueError: Unknown ISD algorithm 'NoSuchThing'. The known algorithms are ['Lee-Brickell'].
You can also construct an ISD algorithm separately and pass that. This is
mostly useful if you write your own ISD algorithms::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0, 2))
sage: D = C.decoder("InformationSet", 2, algorithm=A); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
When passing an already constructed ISD algorithm, you can't also pass
parameters to the ISD algorithm when constructing the decoder::
sage: C.decoder("InformationSet", 2, algorithm=A, search_size=2)
Traceback (most recent call last):
...
ValueError: ISD algorithm arguments are not allowed when supplying a constructed ISD algorithm
We can also information-set decode non-binary codes::
sage: C = codes.GolayCode(GF(3))
sage: D = C.decoder("InformationSet", 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
There are two other ways to access this class::
sage: D = codes.decoders.LinearCodeInformationSetDecoder(C, 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
sage: from sage.coding.information_set_decoder import LinearCodeInformationSetDecoder
sage: D = LinearCodeInformationSetDecoder(C, 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
"""
def __init__(self, code, number_errors, algorithm=None, **kwargs):
r"""
TESTS:
``number_errors`` has to be either a list of Integers/ints, a tuple of Integers/ints,
or an Integer/int::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", "aa")
Traceback (most recent call last):
...
ValueError: number_errors should be an integer or a pair of integers
If ``number_errors`` is passed as a list/tuple, it has to contain only
two values, the first one being at most the second one::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", (4, 2))
Traceback (most recent call last):
...
ValueError: number_errors should be a positive integer or a valid interval within the positive integers
You cannot ask the decoder to correct more errors than the code length::
sage: D = C.decoder("InformationSet", 25)
Traceback (most recent call last):
...
ValueError: The provided number of errors should be at most the code's length
If ``algorithm`` is not set, additional parameters cannot be passed to
the ISD algorithm::
sage: D = C.decoder("InformationSet", 2, search_size=2)
Traceback (most recent call last):
...
ValueError: Additional arguments to an information-set decoder algorithm are only allowed if a specific algorithm is selected by setting the algorithm keyword
If ``algorithm`` is set to a constructed ISD algorithm, additional
parameters cannot be passed to the ISD algorithm::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0, 2))
sage: D = C.decoder("InformationSet", 2, A, search_size=3)
Traceback (most recent call last):
...
ValueError: ISD algorithm arguments are not allowed when supplying a constructed ISD algorithm
If ``algorithm`` is set to a constructed
:class:`sage.coding.information_set_decoder.InformationSetAlgorithm`,
then ``number_errors`` must match that of the algorithm::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0, 2))
sage: D = C.decoder("InformationSet", 2, A); D
Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors
sage: D = C.decoder("InformationSet", (0,2), A); D
Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors
sage: D = C.decoder("InformationSet", 3, A); D
Traceback (most recent call last):
...
ValueError: number_errors must match that of the passed ISD algorithm
"""
if isinstance(number_errors, (Integer, int)):
number_errors = (0, number_errors)
if isinstance(number_errors, (tuple, list)) and len(number_errors) == 2 \
and number_errors[0] in ZZ and number_errors[1] in ZZ:
if 0 > number_errors[0] or number_errors[0] > number_errors[1]:
raise ValueError(
"number_errors should be a positive integer or"
" a valid interval within the positive integers")
if number_errors[1] > code.length():
raise ValueError("The provided number of errors should be at"
" most the code's length")
else:
raise ValueError("number_errors should be an integer or a pair of integers")
self._number_errors = number_errors
super(LinearCodeInformationSetDecoder, self).__init__(
code, code.ambient_space(), code._default_encoder_name)
if algorithm is None:
if kwargs:
raise ValueError("Additional arguments to an information-set decoder"
" algorithm are only allowed if a specific"
" algorithm is selected by setting the algorithm"
" keyword")
algorithm = "Lee-Brickell"
algorithm_names = LinearCodeInformationSetDecoder.known_algorithms(dictionary=True)
if isinstance(algorithm, InformationSetAlgorithm):
if kwargs:
raise ValueError("ISD algorithm arguments are not allowed when"
" supplying a constructed ISD algorithm")
if number_errors != algorithm.decoding_interval():
raise ValueError("number_errors must match that of the passed"
" ISD algorithm")
self._algorithm = algorithm
elif algorithm in algorithm_names:
self._algorithm = algorithm_names[algorithm](code, number_errors, **kwargs)
else:
raise ValueError("Unknown ISD algorithm '{}'."
" The known algorithms are {}."\
.format(algorithm, sorted(algorithm_names)))
_known_algorithms = {
"Lee-Brickell": LeeBrickellISDAlgorithm
}
@staticmethod
def known_algorithms(dictionary=False):
r"""
Return the list of ISD algorithms that Sage knows.
Passing any of these to the constructor of
:class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`
will make the ISD decoder use that algorithm.
INPUT:
- ``dictionary`` - optional. If set to ``True``, return a ``dict``
mapping decoding algorithm name to its class.
OUTPUT: a list of strings or a ``dict`` from string to ISD algorithm class.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LinearCodeInformationSetDecoder
sage: sorted(LinearCodeInformationSetDecoder.known_algorithms())
['Lee-Brickell']
"""
if dictionary:
return LinearCodeInformationSetDecoder._known_algorithms
else:
return LinearCodeInformationSetDecoder._known_algorithms.keys()
def algorithm(self):
r"""
Return the ISD algorithm used by this ISD decoder.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", (2,4), "Lee-Brickell")
sage: D.algorithm()
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding between 2 and 4 errors
"""
return self._algorithm
def decode_to_code(self, r):
r"""
Decodes a received word with respect to the associated code of this decoder.
.. WARNING::
If there is no codeword within the decoding radius of this decoder, this
method may never terminate, or it may raise a
:exc:`sage.coding.decoder.DecodingError` exception, depending on the ISD
algorithm used.
INPUT:
- ``r`` -- a vector in the ambient space of :meth:`decoder.Decoder.code`.
OUTPUT: a codeword of :meth:`decoder.Decoder.code`.
EXAMPLES::
sage: M = matrix(GF(2), [[1,0,0,0,0,0,1,0,1,0,1,1,0,0,1],\
[0,1,0,0,0,1,1,1,1,0,0,0,0,1,1],\
[0,0,1,0,0,0,0,1,0,1,1,1,1,1,0],\
[0,0,0,1,0,0,1,0,1,0,0,0,1,1,0],\
[0,0,0,0,1,0,0,0,1,0,1,1,0,1,0]])
sage: C = LinearCode(M)
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: D = C.decoder('InformationSet', 2)
sage: c == D.decode_to_code(r)
True
Information-set decoding a non-binary code::
sage: C = codes.GolayCode(GF(3)); C
[12, 6, 6] Extended Golay code over GF(3)
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: D = C.decoder('InformationSet', 2)
sage: c == D.decode_to_code(r)
True
Let's take a bigger example, for which syndrome decoding or
nearest-neighbor decoding would be infeasible: the `[59, 30]` Quadratic
Residue code over `\GF{3}` has true minimum distance 17, so we can
correct 8 errors::
sage: C = codes.QuadraticResidueCode(59, GF(3))
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: D = C.decoder('InformationSet', 8)
sage: c == D.decode_to_code(r) # long time
True
"""
C = self.code()
if r in C:
return r
return self.algorithm().decode(r)
def decoding_radius(self):
r"""
Return the maximal number of errors this decoder can decode.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", 2)
sage: D.decoding_radius()
2
"""
return self._number_errors[1]
def decoding_interval(self):
r"""
A pair of integers specifying the interval of number of errors this
decoder will attempt to correct.
The interval includes both end values.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", 2)
sage: D.decoding_interval()
(0, 2)
"""
return self._number_errors
def _repr_(self):
r"""
Returns a string representation of this decoding algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", 2)
sage: D
Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors
"""
return "Information-set decoder ({}) for {} decoding {} errors ".format(self.algorithm().name(), self.code(), _format_decoding_interval(self.decoding_interval()))
def _latex_(self):
r"""
Returns a latex representation of this decoding algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: D = C.decoder("InformationSet", 2)
sage: latex(D)
\textnormal{Information-set decoder (Lee-Brickell) for }[24, 12, 8] \textnormal{ Extended Golay Code over } \Bold{F}_{2} \textnormal{decoding up to 2 errors}
"""
return "\\textnormal{{Information-set decoder ({}) for }}{} \\textnormal{{decoding {} errors}}".format(self.algorithm().name(), self.code()._latex_(), _format_decoding_interval(self.decoding_interval()))
LinearCodeInformationSetDecoder._decoder_type = {"hard-decision",
"probabilistic", "not-always-closest", "bounded-distance", "might-fail"}
| 42.197835 | 211 | 0.606746 | 40,084 | 0.934947 | 0 | 0 | 972 | 0.022672 | 0 | 0 | 33,746 | 0.787115 |
6889889d0c9df269b34f5ffdec65275a1517a339 | 3,380 | py | Python | satelitedl/__init__.py | RyosukeDTomita/satelite-dl | f205adb9f97b67b8375147f6c9320a6742e032b0 | [
"MIT"
] | null | null | null | satelitedl/__init__.py | RyosukeDTomita/satelite-dl | f205adb9f97b67b8375147f6c9320a6742e032b0 | [
"MIT"
] | null | null | null | satelitedl/__init__.py | RyosukeDTomita/satelite-dl | f205adb9f97b67b8375147f6c9320a6742e032b0 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Usage: python3 -m satelitedl --date 19900101
Authore: Ryosuke Tomita
Date: 2021/11/02
"""
import os
from os.path import abspath, dirname, join
from datetime import datetime
from typing import Union
from .options import parse_args
from .scrapeimg import ScrapingImg
def judge_date_type(date: str) -> str:
"""judge_date_type.
Using "date"(str) length, judge date type.
Args:
date (str): date
Returns:
str:
"""
if len(date) == 8:
date_type = "day"
elif len(date) == 6:
date_type = "month"
elif len(date) == 4:
date_type = "year"
else:
raise Exception (f'{date} is not valid value.')
return date_type
def create_url(date: str) -> Union[str, None]:
"""create_url.
Args:
date (str): date
Returns:
Union[str, None]:
"""
try:
date_str = datetime.strptime(date, "%Y%m%d")
except ValueError:
return None
url_data_part = date_str.strftime("%Y/%m/%d/")
base_url = "http://weather.is.kochi-u.ac.jp/sat/ALL/"
return base_url + url_data_part
def _run_scrape_img(date: str):
"""run_scrape_img.
Args:
_date (str): _date
"""
url = create_url(date)
if url is None:
return
scrape_img = ScrapingImg(url)
scrape_img.fetch_img_url()
scrape_img.download_file()
def _mk_save_dir(date_day: str, outdir: str):
year, month, day = date_day[0:4], date_day[4:6], date_day[6:8]
year_dir = join(abspath(outdir), year)
month_dir = join(year_dir, month)
day_dir = join(month_dir, day)
for dir_ in [year_dir, month_dir, day_dir]:
if not os.path.exists(dir_):
os.mkdir(dir_)
return day_dir
def use_scrapeimg(date: str, date_type: str, outdir: str):
"""use_scrapeimg.
use module "scrapeimg".
url are created by create_url().
Args:
date (str): date
date_type (str): date_type
"""
month_list = tuple([f'{m+1:02}' for m in range(12)])
day_list = tuple([f'{y+1:02}' for y in range(31)])
if date_type == "year":
for month in month_list:
for day in day_list:
date_day = (date + month + day)
save_dir = _mk_save_dir(date_day, outdir)
os.chdir(save_dir)
_run_scrape_img(date_day)
os.chdir(abspath(dirname(__file__)))
elif date_type == "month":
for day in day_list:
date_day = (date + day)
save_dir = _mk_save_dir(date_day, outdir)
os.chdir(save_dir)
_run_scrape_img(date_day)
os.chdir(abspath(dirname(__file__)))
elif date_type == "day":
date_day = date
save_dir = _mk_save_dir(date_day, outdir)
os.chdir(save_dir)
_run_scrape_img(date_day)
os.chdir(abspath(dirname(__file__)))
def main():
"""main
1. get argument.
2. scrape satelite picture from
"http://weather.is.kochi-u.ac.jp/sat/ALL/"
"""
args = parse_args()
date = args["date"]
if date is None:
raise Exception('No argument about date.')
outdir = args["outdir"]
if outdir is None:
outdir = dirname(__file__)
date_type = judge_date_type(date)
use_scrapeimg(date, date_type, outdir)
__all__ = ["main", "create_url", "judge_date_type", "use_scrapeimg"]
| 25.413534 | 68 | 0.601479 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 922 | 0.272781 |
688d68c26f6147245edb5881ed9bf81be6cc0b77 | 602 | py | Python | ostrawling/event/particle.py | JacobRawling/OstrawlingMC | d87c12c8502a12c150ece0eac0fbe2c829062f6e | [
"MIT"
] | null | null | null | ostrawling/event/particle.py | JacobRawling/OstrawlingMC | d87c12c8502a12c150ece0eac0fbe2c829062f6e | [
"MIT"
] | null | null | null | ostrawling/event/particle.py | JacobRawling/OstrawlingMC | d87c12c8502a12c150ece0eac0fbe2c829062f6e | [
"MIT"
] | null | null | null | from .four_momentum import FourMomentum
class Particle:
def __init__(self, pdg_id, momentum):
self.pdg_id = pdg_id
self.momentum = momentum
self.momentum.set_basis(
(momentum.px,momentum.py, momentum.pz,
Particle.get_mass(pdg_id)),
'x,y,z,m'
)
def mass(self):
return self.momentum.m
@staticmethod
def get_mass( pdg_id):
# Electrons
if abs(pdg_id) == 11:
return 0.5e-3
# Muons
if abs(pdg_id) == 13:
return 106e-3
return 1.0
| 24.08 | 54 | 0.528239 | 560 | 0.930233 | 0 | 0 | 210 | 0.348837 | 0 | 0 | 28 | 0.046512 |
688e64a4f428342bad4484cacd07ec145d18e8f0 | 2,572 | py | Python | src/icolos/core/workflow_steps/gromacs/genion.py | jharrymoore/Icolos | c60cc00c34208ab7011d41d52a74651763673e7a | [
"Apache-2.0"
] | null | null | null | src/icolos/core/workflow_steps/gromacs/genion.py | jharrymoore/Icolos | c60cc00c34208ab7011d41d52a74651763673e7a | [
"Apache-2.0"
] | null | null | null | src/icolos/core/workflow_steps/gromacs/genion.py | jharrymoore/Icolos | c60cc00c34208ab7011d41d52a74651763673e7a | [
"Apache-2.0"
] | null | null | null | from icolos.utils.enums.step_enums import StepBaseEnum, StepGromacsEnum
from icolos.utils.enums.program_parameters import GromacsEnum
from icolos.core.workflow_steps.gromacs.base import StepGromacsBase
from icolos.utils.execute_external.gromacs import GromacsExecutor
from pydantic import BaseModel
from icolos.core.workflow_steps.step import _LE
import os
_GE = GromacsEnum()
_SGE = StepGromacsEnum()
_SBE = StepBaseEnum
class StepGMXGenion(StepGromacsBase, BaseModel):
"""
Wrapper for gmx genion
"""
def __init__(self, **data):
super().__init__(**data)
self._initialize_backend(executor=GromacsExecutor)
self._check_backend_availability()
def execute(self):
tmp_dir = self._make_tmpdir()
self._write_input_files(tmp_dir)
arguments = self._parse_arguments(
{
# input file paths are handled internally
"-o": _SGE.STD_STRUCTURE,
"-p": self.data.generic.get_argument_by_extension(_SGE.FIELD_KEY_TOPOL),
"-s": self.data.generic.get_argument_by_extension(_SGE.FIELD_KEY_TPR),
}
)
result = self._backend_executor.execute(
command=_GE.GENION,
arguments=arguments,
location=tmp_dir,
pipe_input=self.construct_pipe_arguments(
tmp_dir, self.settings.additional[_SBE.PIPE_INPUT]
),
)
for line in result.stdout.split("\n"):
self._logger_blank.log(line, _LE.DEBUG)
self._logger.log(
f"Completed execution for {self.step_id} successfully", _LE.INFO
)
# this is the last structural change to the topology in a regular gromacs setup,
# update the index groups here
make_ndx_args = ["-f", _SGE.STD_STRUCTURE, "-o", _SGE.STD_INDEX]
index_files = [f for f in os.listdir(tmp_dir) if f.endswith(".ndx")]
# remove any existing index files
for f in index_files:
self._remove_temporary(os.path.join(tmp_dir, f))
# generate new index file
result = self._backend_executor.execute(
command=_GE.MAKE_NDX,
arguments=make_ndx_args,
location=tmp_dir,
check=True,
pipe_input='echo -e "1 | 12 \nq"',
)
for line in result.stdout.split("\n"):
self._logger_blank.log(line, _LE.DEBUG)
self._logger.log('Added index group to "index.ndx"', _LE.DEBUG)
self._parse_output(tmp_dir)
self._remove_temporary(tmp_dir)
| 36.742857 | 88 | 0.641524 | 2,146 | 0.83437 | 0 | 0 | 0 | 0 | 0 | 0 | 391 | 0.152022 |
688e6a6789ccea72d0b875d4cb59a3a4a8aed53b | 6,811 | py | Python | mayo/session/train.py | deep-fry/mayo | 7211a11fdb9bb0a036d496a3eba16c96db122f89 | [
"MIT"
] | 110 | 2018-06-07T17:52:29.000Z | 2022-03-28T08:04:02.000Z | mayo/session/train.py | kypomon/mayo | 7211a11fdb9bb0a036d496a3eba16c96db122f89 | [
"MIT"
] | 6 | 2019-10-17T12:00:29.000Z | 2021-10-21T13:41:22.000Z | mayo/session/train.py | kypomon/mayo | 7211a11fdb9bb0a036d496a3eba16c96db122f89 | [
"MIT"
] | 22 | 2018-07-05T01:30:49.000Z | 2021-10-19T06:15:40.000Z | import math
import tensorflow as tf
from mayo.log import log
from mayo.util import (
Percent, memoize_method, memoize_property, object_from_params)
from mayo.session.base import SessionBase
class Train(SessionBase):
mode = 'train'
def __init__(self, config):
super().__init__(config)
self._run_train_ops = True
self._setup_train_operation()
self._init()
self._checkpoint_epoch = ''
@memoize_property
def learning_rate(self):
params = self.config.train.learning_rate
lr_class, params = object_from_params(params)
if lr_class is tf.train.piecewise_constant:
# `tf.train.piecewise_constant` uses argument name 'x' instead
# just to make life more difficult
step_name = 'x'
else:
step_name = 'global_step'
params[step_name] = self.num_epochs
log.debug(
'Using learning rate {!r} with params {}.'
.format(lr_class.__name__, params))
return lr_class(**params)
@memoize_property
def optimizer(self):
params = self.config.train.optimizer
optimizer_class, params = object_from_params(params)
log.debug('Using optimizer {!r}.'.format(optimizer_class.__name__))
return optimizer_class(self.learning_rate, **params)
@staticmethod
def _average_gradients(tower_grads):
tower_grads = list(tower_grads)
if len(tower_grads) == 1:
return tower_grads[0]
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, v in grad_and_vars:
# add 0 dimension to the gradients to represent the tower
if g is None:
raise ValueError(
'Gradient for variable {} is None, please check '
'connection.'.format(v))
g = tf.expand_dims(g, 0)
grads.append(g)
# average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# simply return the first tower's pointer to the Variable
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
@staticmethod
def _loss_formatter(key, name):
def formatter(estimator):
loss_mean, loss_std = estimator.get_mean_std(key)
if math.isnan(loss_mean):
raise ValueError('Model diverged with a nan-valued loss.')
loss_std = '±{}'.format(Percent(loss_std / loss_mean))
return '{}: {:10f}{:5}'.format(name, loss_mean, loss_std)
return formatter
@memoize_method
def _losses_and_gradients(self):
formatter = self._loss_formatter('regularization', 'regu')
regularization = self.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES, first_gpu=True)
if regularization:
self.estimator.register(
tf.add_n(regularization), 'regularization',
formatter=formatter)
def gradient(net, prediction, truth):
loss = [self.task.train(net, prediction, truth)] + regularization
loss = tf.add_n(loss)
return loss, self.optimizer.compute_gradients(loss)
tower_losses, tower_grads = zip(*self.task.map(gradient))
return tower_losses, self._average_gradients(tower_grads)
def _setup_train_operation(self):
ops = {}
self._losses, gradients = self._losses_and_gradients()
self._mean_loss = tf.reduce_mean(self._losses)
ops['app_grad'] = self.optimizer.apply_gradients(gradients)
# update ops
update_ops = list(self.get_collection(tf.GraphKeys.UPDATE_OPS))
ops['update'] = tf.group(*update_ops, name='update')
log.debug('Using update operations: {}'.format(update_ops))
log.debug('Using training operations: {}'.format(ops))
if self.extra_train_ops:
ops['extra'] = self.extra_train_ops
self._train_op = ops
def _init(self):
self.load_checkpoint(self.config.system.checkpoint.load)
formatter = self._loss_formatter('loss', 'loss')
self.estimator.register(self._mean_loss, 'loss', formatter=formatter)
def reset_num_epochs(self):
log.info('Reseting number of training epochs of the model...')
self.run(self.imgs_seen.initializer)
self.change.reset('checkpoint.epoch')
self.change.reset('step')
def once(self):
train_op = self._train_op if self._run_train_ops else []
tasks = [train_op, self.num_epochs]
_, num_epochs = self.run(tasks, batch=True)
return num_epochs
def overriders_assign(self):
log.info('Assigning overridden values of parameters to parameters...')
self._overriders_call('assign')
def overriders_update(self):
log.info('Updating overrider internal variables...')
self._overriders_call('update')
def overriders_reset(self):
log.info('Resetting overriders internal variables...')
self._overriders_call('reset')
def _iteration(self, max_epochs=None):
system = self.config.system
epoch = self.once()
floor_epoch = math.floor(epoch)
cp_interval = system.checkpoint.get('save.interval', 0)
if self.change.every('checkpoint.epoch', floor_epoch, cp_interval):
log.info(
'Saving checkpoint at epoch {}...'.format(epoch), update=True)
with log.demote():
self.save_checkpoint(floor_epoch)
self._checkpoint_epoch = floor_epoch
max_epochs = max_epochs or system.max_epochs
if max_epochs and epoch >= max_epochs:
log.info(
'Maximum epoch count {} reached.'.format(max_epochs))
if self._checkpoint_epoch and floor_epoch > self._checkpoint_epoch:
log.info('Saving final checkpoint...')
self.save_checkpoint(floor_epoch)
return False
return True
def train(self, max_epochs=None):
# final debug outputs
lr = self.run(self.learning_rate)
log.info('Training start with a learning rate {}.'.format(lr))
try:
# train iterations
while self._iteration(max_epochs=max_epochs):
pass
except KeyboardInterrupt:
log.info('Stopped.')
save = self.config.system.checkpoint.get('save', {})
if save:
countdown = save.get('countdown', 0)
if log.countdown('Saving checkpoint', countdown):
self.save_checkpoint('latest')
| 38.698864 | 79 | 0.616356 | 6,613 | 0.970787 | 0 | 0 | 3,055 | 0.448473 | 0 | 0 | 1,129 | 0.165737 |
688f82d913086422926e43ef0486a06096928207 | 570 | py | Python | lazysignup/utils.py | yoccodog/django-lazysignup | 18d273b97083c8e2a21e54bf326f41c0b4e231fb | [
"BSD-3-Clause"
] | null | null | null | lazysignup/utils.py | yoccodog/django-lazysignup | 18d273b97083c8e2a21e54bf326f41c0b4e231fb | [
"BSD-3-Clause"
] | null | null | null | lazysignup/utils.py | yoccodog/django-lazysignup | 18d273b97083c8e2a21e54bf326f41c0b4e231fb | [
"BSD-3-Clause"
] | 1 | 2018-06-22T13:07:34.000Z | 2018-06-22T13:07:34.000Z | def is_lazy_user(user):
""" Return True if the passed user is a lazy user. """
# Anonymous users are not lazy.
if user.is_anonymous:
return False
# Check the user backend. If the lazy signup backend
# authenticated them, then the user is lazy.
backend = getattr(user, 'backend', None)
if backend == 'lazysignup.backends.LazySignupBackend':
return True
# Otherwise, we have to fall back to checking the database.
from lazysignup.models import LazyUser
return bool(LazyUser.objects.filter(user=user).count() > 0)
| 33.529412 | 63 | 0.685965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 288 | 0.505263 |
688fe87882b842c1041f5df8b971fea0174982fb | 2,753 | py | Python | md/obs-tntq_md2md.py | unfoldingWord-dev/tools | 7251d64b4750f1615125dab3c09d6d00a9c284b4 | [
"MIT"
] | 6 | 2015-07-27T21:50:39.000Z | 2020-06-25T14:32:35.000Z | md/obs-tntq_md2md.py | unfoldingWord-dev/tools | 7251d64b4750f1615125dab3c09d6d00a9c284b4 | [
"MIT"
] | 89 | 2015-06-24T09:35:40.000Z | 2022-02-13T14:40:31.000Z | md/obs-tntq_md2md.py | unfoldingWord-dev/tools | 7251d64b4750f1615125dab3c09d6d00a9c284b4 | [
"MIT"
] | 12 | 2015-07-13T17:31:04.000Z | 2021-08-06T06:50:21.000Z | # -*- coding: utf-8 -*-
# This script copies a directory of OBS-tQ or OBS-tN markdown files to a second location.
# It cleans up the files in these ways:
# Ensures blank lines surrounding markdown headers.
# Fixes links of this form [[:en:...]]
# Removes leading spaces.
# Global variables
source_dir = r'C:\DCS\Russian\OBS-TN\content'
target_dir = r'C:\DCS\Russian\ru_obs-tn.work\content' # path should end with "\content"
resource_type = 'obs-tn'
language_code = 'ru'
import re
import io
import os
import sys
import convert2md
# Returns path of .md file in target directory.
def makeMdPath(story, fname):
mdPath = os.path.join(target_dir, story)
if not os.path.isdir(mdPath):
os.mkdir(mdPath)
return os.path.join(mdPath, fname)
#prefix_re = re.compile(r'C:\\DCS')
def shortname(longpath):
shortname = longpath
if source_dir in longpath:
shortname = longpath[len(source_dir)+1:]
return shortname
# Converts .md file in fullpath location to .md file in target dir.
def convertFile(story, fname, fullpath):
if os.access(fullpath, os.F_OK):
mdPath = makeMdPath(story, fname)
convert2md.md2md(fullpath, mdPath, language_code, shortname)
# This method is called to convert the text files in the specified story folder.
# It renames files that have only a single digit in the name.
def convertStory(story, fullpath):
for fname in os.listdir(fullpath):
if re.match('\d\.md', fname):
goodPath = os.path.join(fullpath, '0' + fname)
if not os.path.exists(goodPath):
badPath = os.path.join(fullpath, fname)
os.rename(badPath, goodPath)
fname = '0' + fname
if (re.match('\d\d\.md', fname) and fname != '00.md'):
convertFile(story, fname, os.path.join(fullpath, fname))
# It looks like OBS-sQ repos consist only of 50 .md files in one folder
def convertSQfolder(folder):
for fname in os.listdir(folder):
if re.match('\d\d\.md', fname):
convertFile("", fname, os.path.join(folder, fname))
# Converts the stories contained in the specified folder
def convert(source_dir):
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
if resource_type == 'obs-sq':
convertSQfolder(source_dir)
else:
for item in os.listdir(source_dir):
folder = os.path.join(source_dir, item)
if os.path.isdir(folder):
convertStory(item, folder)
# Processes each directory and its files one at a time
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] != 'hard-coded-path':
source_dir = sys.argv[1]
if os.path.isdir(source_dir):
convert(source_dir)
print("\nDone.")
| 34.848101 | 90 | 0.658918 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 967 | 0.351253 |
688feb8d7e8fef2bae057b526cc5ad3bba5366d4 | 2,620 | py | Python | hw10_conine/thomas.py | gconine88/MATH_6204 | ecff4ecd3ae423c113d8259fe24b76b4a67de6ea | [
"MIT"
] | null | null | null | hw10_conine/thomas.py | gconine88/MATH_6204 | ecff4ecd3ae423c113d8259fe24b76b4a67de6ea | [
"MIT"
] | null | null | null | hw10_conine/thomas.py | gconine88/MATH_6204 | ecff4ecd3ae423c113d8259fe24b76b4a67de6ea | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 20 23:06:49 2017
@author: Grant
An implementation of the Thomas algorithm in Python, using just-in-time
compiling from numba for additional speed
"""
import numpy as np
from numba import njit, f8
def solve(A, d):
'''Helper function for Thomas algorith. Breaks matrix into tridiagonal
elements for easier processing by algorithm. '''
# pass numba float64 dtype np.arrays to the solve function - need to
# perform this step to allow for nopython execution of thomas algorithm
# which yields maximum speed
a = f8(np.diagonal(A, offset=0))
b = f8(np.diagonal(A, offset=1))
c = f8(np.diagonal(A, offset=-1))
dfloat = f8(d)
D = np.diag(a, 0) + np.diag(b, 1) + np.diag(c, -1) #create test matrix
# test if D is 'close enough' to A - if not that means A was not
# tridiagonal and the function raises an exception
if not np.allclose(A, D):
raise Exception('The given A is not tridiagonal')
# pass to thomas algorithm solver
x = solve_body(a, b, c, dfloat)
return x
# chose to use njit decorator to force nopython implementation and
# get faster speed. Downside is I lose flexibility in input of solver, must
# wrap in another function which will format data correctly
@njit
def solve_body(a, b, c, d):
''' Thomas algorithm to solve a tridiagonal system of equations
INPUTS
========
a: numpy array
the diagonal entries
b: numpy array
the superdiagonal entries
c: numpy array
the subdiagonal entries
d: numpy array
the right-hand side of the system of equations
RETURNS
========
The solution for the given tri-diagonal system of equations.
'''
n = len(a) # determine number of equations in system
#initialize
alpha = np.zeros(n)
beta = np.zeros(n)
alpha[0] = a[0]
beta[0] = d[0]
# first (forward) loop to zero c[i]'s
for i in range(1, n, 1):
# in python, c's index is from 0 to n-2, not 1 to n-1, have to subtract 1
alpha[i] = a[i] - (b[i-1] * c[i-1]) / alpha[i-1]
beta[i] = d[i] - (beta[i-1] * c[i-1]) / alpha[i-1]
#initialize and set last step
x = np.zeros(n)
x[n-1] = beta[n-1] / alpha[n-1]
# second (backwards) loop to find solutions
for j in range(n-2, -1, -1): #indices are weird, want to step from n-2 to 0
x[j] = (beta[j] - b[j-1] * x[j+1]) / alpha[j]
return x
| 30.465116 | 82 | 0.591603 | 0 | 0 | 0 | 0 | 1,249 | 0.476718 | 0 | 0 | 1,634 | 0.623664 |
68907c9f56a7dff7255659e5fac49a90b9c790a1 | 1,444 | py | Python | chemreg/auth/tests/test_views.py | Chemical-Curation/chemcurator | bcd7fab84e407f06502e6873c38820724d4e54e7 | [
"MIT"
] | 1 | 2020-10-05T18:02:24.000Z | 2020-10-05T18:02:24.000Z | chemreg/auth/tests/test_views.py | Chemical-Curation/chemcurator_django | bcd7fab84e407f06502e6873c38820724d4e54e7 | [
"MIT"
] | 207 | 2020-01-30T19:17:44.000Z | 2021-02-24T19:45:29.000Z | chemreg/auth/tests/test_views.py | Chemical-Curation/chemcurator_django | bcd7fab84e407f06502e6873c38820724d4e54e7 | [
"MIT"
] | null | null | null | from base64 import b64encode
import pytest
@pytest.mark.django_db
def test_login_view(user, client, settings):
"""Test that the login view can login and logout."""
login_url = f"/{settings.LOGIN_URL.strip('/')}/"
# Django doesn't store the raw password, so we need to set one we know.
password = "A test password"
user.set_password(password)
user.save()
# Test login with valid credentials
auth_str = b64encode(f"{user.username}:{password}".encode()).decode()
client.credentials(HTTP_AUTHORIZATION=f"Basic {auth_str}")
response = client.post(login_url)
assert response.status_code == 200
assert response.data["username"] == user.username
assert response.data["email"] == user.email
# Get user using just session ID
client.credentials()
response = client.get(login_url)
assert response.status_code == 200
assert response.data["username"] == user.username
assert response.data["email"] == user.email
# Logout using just session ID
response = client.delete(login_url)
assert response.status_code == 200
assert not response.data["username"]
assert not response.data["email"]
# Test with invalid credentials
response = client.post(login_url)
assert response.status_code == 403
response = client.get(login_url)
assert response.status_code == 403
response = client.delete(login_url)
assert response.status_code == 403
| 32.088889 | 75 | 0.702216 | 0 | 0 | 0 | 0 | 1,397 | 0.967452 | 0 | 0 | 403 | 0.279086 |
68914268f5d91c6c327ee9480e17e743af2a3790 | 9,445 | py | Python | vnpy/app/cta_strategy/strategies/tsmyo_polyfit_strategy.py | TheSuperMyo/vnpy | e38b7f4de879f1756aa664d5dfe7e0bec65c9a1b | [
"MIT"
] | null | null | null | vnpy/app/cta_strategy/strategies/tsmyo_polyfit_strategy.py | TheSuperMyo/vnpy | e38b7f4de879f1756aa664d5dfe7e0bec65c9a1b | [
"MIT"
] | null | null | null | vnpy/app/cta_strategy/strategies/tsmyo_polyfit_strategy.py | TheSuperMyo/vnpy | e38b7f4de879f1756aa664d5dfe7e0bec65c9a1b | [
"MIT"
] | null | null | null | from datetime import time
from vnpy.app.cta_strategy import (
CtaTemplate,
StopOrder,
TickData,
BarData,
TradeData,
OrderData,
BarGenerator,
ArrayManager
)
from vnpy.app.cta_strategy.base import (
EngineType,
STOPORDER_PREFIX,
StopOrder,
StopOrderStatus,
)
from vnpy.app.cta_strategy.TSMtools import TSMArrayManager
import numpy as np
class TSMyoPolyfitStrategy(CtaTemplate):
""""""
author = "TheSuperMyo"
# 日内交易
exit_time = time(hour=14, minute=54)
# 针对不同交易时间的市场
open_time_night = time(hour=21,minute=0)# 商品夜盘
open_time_day_1 = time(hour=9,minute=0)# 商品
open_time_day_2 = time(hour=9,minute=30)# 股指
close_time_day = time(hour=15,minute=0)# 商品/股指(除了利率期货)
close_time_night_1 = time(hour=23,minute=0)# 其他夜盘商品
close_time_night_2 = time(hour=1,minute=0)# 工业金属
close_time_night_3 = time(hour=2,minute=30)# 黄金/白银/原油
break_time_start_1 = time(hour=10,minute=15)# 商品茶歇
break_time_start_2 = time(hour=11,minute=30)# 全体午休
break_time_end_1 = time(hour=10,minute=30)# 商品茶歇
break_time_end_2 = time(hour=13,minute=0)# 股指下午
break_time_end_3 = time(hour=13,minute=30)# 商品下午
poly_entry_1 = 0.5 # 入场一阶导条件
poly_entry_2 = 0.06 # 入场二阶导条件
poly_out_1 = 0.2 # 出场一阶导条件
poly_out_2 = 0 # 出场二阶导条件
fit_bar = 3 # K线周期
setup_fit = 85 # 基础拟合分钟数
end_window = 95 # 时间窗口分钟数
trailing_stop = 0.45 # 跟踪止损
fixed_size = 1 # 固定手数
bar_counter = 0 # 每日分钟计数器
poly_1 = 0
poly_2 = 0
long_entry = 0
short_entry = 0
long_exit = 0
short_exit = 0
stop_long = 0
stop_short = 0
hold_high = 0
hold_low = 0
parameters = ['poly_entry_1','poly_entry_2','poly_out_1','poly_out_2','end_window','setup_fit','fit_bar','trailing_stop','fixed_size']
variables = ['bar_counter','poly_1','poly_2','stop_long','stop_short']
def __init__(self, cta_engine, strategy_name, vt_symbol, setting):
""""""
super(TSMyoPolyfitStrategy, self).__init__(
cta_engine, strategy_name, vt_symbol, setting
)
self.bg = BarGenerator(self.on_bar, self.fit_bar, self.on_fit_bar)
# 股指每天240分钟
self.am = TSMArrayManager(240)
# 策略自身订单管理
self.active_orderids = []
self.bars = []
def on_init(self):
"""
Callback when strategy is inited.
"""
self.write_log("策略初始化")
# 不会用到昨日数据
self.load_bar(10)
def on_start(self):
"""
Callback when strategy is started.
"""
self.write_log("策略启动")
def on_stop(self):
"""
Callback when strategy is stopped.
"""
self.write_log("策略停止")
def tick_filter(self, tick: TickData):
"""
过滤异常时间的tick
"""
tick_time = tick.datetime.time()
if tick_time < self.open_time_day_2:
return False
if tick_time > self.break_time_start_2 and tick_time < self.break_time_end_2:
return False
if tick_time > self.close_time_day:
return False
return True
def on_tick(self, tick: TickData):
"""
Callback of new tick data update.
"""
if not self.tick_filter(tick):
return
self.bg.update_tick(tick)
def on_bar(self, bar: BarData):
"""
1.分钟计数
2.根据信号挂单
"""
self.bar_counter += 1
self.bg.update_bar(bar)
self.cancel_all()
if self.pos == 0 and bar.datetime.time() < self.exit_time:
if self.long_entry:
# 入场开多,收盘价
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.buy(bar.close_price, self.fixed_size, False, True)
self.active_orderids.extend(orderids)
if self.short_entry:
# 入场开空,收盘价
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.short(bar.close_price, self.fixed_size, False, True)
self.active_orderids.extend(orderids)
if self.pos > 0:
self.hold_high = max(self.hold_high,bar.high_price)
self.stop_long = self.hold_high*(1-self.trailing_stop/100)
if self.long_exit or bar.datetime.time() > self.exit_time:
# 信号平多 or 日内平仓
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.sell(bar.close_price, self.fixed_size, False, True)
self.active_orderids.extend(orderids)
else:
# 停止单平多
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.sell(self.stop_long, self.fixed_size, True, True)
self.active_orderids.extend(orderids)
if self.pos < 0:
self.hold_low = min(self.hold_low,bar.high_price)
self.stop_short = self.hold_low*(1+self.trailing_stop/100)
if self.short_exit or bar.datetime.time() > self.exit_time:
# 信号平空 or 日内平仓
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.cover(bar.close_price, self.fixed_size, False, True)
self.active_orderids.extend(orderids)
else:
# 停止单平空
if self.active_orderids:
self.write_log("撤单不干净,无法挂单")
return
orderids = self.cover(self.stop_short, self.fixed_size, True, True)
self.active_orderids.extend(orderids)
def on_fit_bar(self, bar: BarData):
"""
1.负责每日开盘的初始化
2.计算一二阶导数并产生信号
"""
# for backtest
# self.cta_engine.output(f"{bar.datetime.time()}")
# self.write_log(f"{bar.datetime.time()}")
am = self.am
am.update_bar(bar)
self.bars.append(bar)
if len(self.bars) <= 2:
return
else:
self.bars.pop(0)
last_bar = self.bars[-2]
# 开盘fit_min_bar
if last_bar.datetime.date() != bar.datetime.date():
# 初始化
self.bar_counter = self.fit_bar
self.long_entry = 0
self.short_entry = 0
self.long_exit = 0
self.short_exit = 0
if self.bar_counter < self.setup_fit:
return
self.poly_1, self.poly_2 = am.polyfit(int((self.bar_counter)/self.fit_bar))
if self.pos == 0 and self.bar_counter < self.end_window:
if self.poly_1 > self.poly_entry_1 and self.poly_2 > self.poly_entry_2:
# 加速上涨,开多信号
self.long_entry = 1
self.short_entry = 0
self.long_exit = 0
self.short_exit = 0
if self.poly_1 < -self.poly_entry_1 and self.poly_2 < -self.poly_entry_2:
# 加速下跌,开空信号
self.long_entry = 0
self.short_entry = 1
self.long_exit = 0
self.short_exit = 0
if self.pos > 0:
if self.poly_1 < self.poly_out_1 or self.poly_2 < self.poly_out_2:
# 减速上涨,平多信号
self.long_entry = 0
self.short_entry = 0
self.long_exit = 1
self.short_exit = 0
if self.pos < 0:
if self.poly_1 > -self.poly_out_1 or self.poly_2 > -self.poly_out_2:
self.long_entry = 0
self.short_entry = 0
self.long_exit = 0
self.short_exit = 1
def on_order(self, order: OrderData):
"""
Callback of new order data update.
"""
# 移除已成交或已撤销的订单
if not order.is_active() and order.vt_orderid in self.active_orderids:
self.active_orderids.remove(order.vt_orderid)
def on_trade(self, trade: TradeData):
"""
Callback of new trade data update.
"""
# 邮寄提醒
self.send_email(f"{trade.vt_symbol}在{trade.time}成交,价格{trade.price},方向{trade.direction}{trade.offset},数量{trade.volume}")
self.long_entry = 0
self.short_entry = 0
self.long_exit = 0
self.short_exit = 0
self.put_event()
def on_stop_order(self, stop_order: StopOrder):
"""
Callback of stop order update.
"""
# 刚刚生成的本地停止单
if stop_order.status == StopOrderStatus.WAITING:
return
# 撤销的本地停止单,从活跃列表移除
if stop_order.status == StopOrderStatus.CANCELLED:
if stop_order.stop_orderid in self.active_orderids:
self.active_orderids.remove(stop_order.stop_orderid)
# 触发的本地停止单,停止单移除,限价单加入
if stop_order.status == StopOrderStatus.TRIGGERED:
if stop_order.stop_orderid in self.active_orderids:
self.active_orderids.remove(stop_order.stop_orderid)
self.active_orderids.extend(stop_order.vt_orderids)
# 撤掉其他停止单
for other_orderids in self.active_orderids:
if other_orderids.startswith(STOPORDER_PREFIX):
self.cancel_order(other_orderids) | 32.457045 | 138 | 0.565061 | 9,901 | 0.962664 | 0 | 0 | 0 | 0 | 0 | 0 | 2,278 | 0.221488 |
689273e3976328d8791057987bb0cd4e006aacad | 4,757 | py | Python | run.py | GpNico/KernelMethods_MVA_Kaggle | c6c083301977ca765557680f1e724a5d0322ef13 | [
"MIT"
] | null | null | null | run.py | GpNico/KernelMethods_MVA_Kaggle | c6c083301977ca765557680f1e724a5d0322ef13 | [
"MIT"
] | null | null | null | run.py | GpNico/KernelMethods_MVA_Kaggle | c6c083301977ca765557680f1e724a5d0322ef13 | [
"MIT"
] | null | null | null | # run.py
"""
Script for running a specific pipeline from a given yaml config file
"""
import os
import argparse
import yaml
from importlib import import_module
import numpy as np
import time
import pandas as pd
def import_from_path(path_to_module, obj_name = None):
"""
Import an object from a module based on the filepath of
the module and the string name of the object.
If obj_name is None, return the module instead.
"""
module_name = path_to_module.replace("/",".").strip(".py")
module = import_module(module_name)
if obj_name == None:
return module
obj = getattr(module, obj_name)
return obj
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument("-c", "--config", help = "File path to the config file")
parser.add_argument("-o", "--output", help = "Path to the output file")
args = parser.parse_args()
with open(args.config) as config_file:
config = yaml.safe_load(config_file)
if args.output != None:
output = True
out_csv = args.output
dfs = []
else:
output = False
# Importing pipeline elements
ds_splitter = import_from_path(config["split"]["filepath"],
config["split"]["class"]) (**config["split"]["parameters"])
preprocess = import_from_path(config["preprocess"]["filepath"])
model_params = config["model"]["parameters"]
if "kernel" in model_params:
kernel_func = import_from_path(model_params["kernel"]["filepath"],
model_params["kernel"]["class"])
kernel_params = model_params["kernel"]["parameters"]
model_params["kernel"] = lambda X, Y: kernel_func(X,Y,**kernel_params)
model = import_from_path(config["model"]["filepath"],
config["model"]["class"])(**config["model"]["parameters"])
evaluation = import_from_path(config["evaluation"]["filepath"])
# Evaluation output directory
out_dir = 'submissions'
if output and not os.path.isdir(out_dir):
os.makedirs(out_dir)
# Lists filling information for the output dataframe
datasets = []
metrics = []
values = []
# Applying pipeline
# Iterate over datasets
for i, dataset in enumerate(config["datasets"]):
time_beg = time.time()
print("Working on dataset ", i)
# Read dataset
X = pd.read_csv(dataset["X"]["filepath"],
**dataset["X"]["parameters"])
## It is currently very important to drop Id before splitting or preprocessing
y = pd.read_csv(dataset["y"]["filepath"],
**dataset["y"]["parameters"]).drop("Id", axis = 1)
if output:
test = pd.read_csv(dataset["test"]["filepath"],
**dataset["test"]["parameters"])
# Split dataset
ds_splitter.generate_idx(y)
X_train, X_test = ds_splitter.split(X)
y_train, y_test = ds_splitter.split(y)
# Preprocess dataset
for transform in config["preprocess"]["X"]:
X_train = getattr(preprocess, transform["transform"])(X_train, **transform["parameters"])
X_test = getattr(preprocess, transform["transform"])(X_test, **transform["parameters"])
for transform in config["preprocess"]["y"]:
y_train = getattr(preprocess, transform["transform"])(y_train, **transform["parameters"])
y_test = getattr(preprocess, transform["transform"])(y_test, **transform["parameters"])
if output:
for transform in config["preprocess"]["X"]:
test = getattr(preprocess, transform["transform"])(test, **transform["parameters"])
# Fit model
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
if output:
y_pred_test = model.predict(test)
y_pred_test = (y_pred_test + 1)/2
id = np.arange(1000*i, 1000*(i + 1))
dic = {'Id': id, 'Bound': y_pred_test}
df = pd.DataFrame(data = dic)
dfs.append(df)
# Evaluate model
for metric in config["evaluation"]["metrics"]:
datasets.append(dataset["name"])
metrics.append(metric)
values.append(getattr(evaluation, metric)(y_pred, y_test))
print("Done ! In {} s".format(time.time() - time_beg))
if output:
df = pd.concat(dfs).astype('int32')
df.to_csv(os.path.join(out_dir, out_csv), index = False)
results = {"datasets": datasets, "metrics": metrics, "values": values}
print(pd.DataFrame.from_dict(results)) | 35.766917 | 101 | 0.596384 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,286 | 0.270338 |
689285cf22e54c3ba6df2bb206502e17fb240035 | 1,970 | py | Python | alien_invasion.py | shtiyu/biubiubiu | ebb41a14d4fd14cfc2a27a01af755713e084c4b3 | [
"MIT"
] | 3 | 2018-11-22T11:31:47.000Z | 2022-02-22T06:29:59.000Z | alien_invasion.py | shtiyu/biubiubiu | ebb41a14d4fd14cfc2a27a01af755713e084c4b3 | [
"MIT"
] | null | null | null | alien_invasion.py | shtiyu/biubiubiu | ebb41a14d4fd14cfc2a27a01af755713e084c4b3 | [
"MIT"
] | null | null | null | import pygame
from pygame.sprite import Group
from button import Button
from game_stats import GameStats
from settings import Settings
from ship import Ship
from scoreboard import Scoreboard
import game_funcitons as gf
def run_game():
pygame.init()
pygame.mixer.init()
ai_settings = Settings()
screen = pygame.display.set_mode(
(ai_settings.screen_width, ai_settings.screen_height))
pygame.display.set_caption("Alien Invasion")
play_button = Button(ai_settings, screen, 'PLAY')
stats = GameStats(ai_settings)
scoreboard = Scoreboard(ai_settings, screen, stats)
bg_img1 = pygame.image.load("images/map.jpg").convert()
bg_img2 = bg_img1.copy()
pos_y1 = -1024
pos_y2 = 0
ship = Ship(ai_settings, screen)
aliens = Group()
bullets = Group()
alien_bullets = Group()
gf.create_fleet(ai_settings, screen, aliens, alien_bullets)
#背景音乐
gf.play_music('bgm')
clock = pygame.time.Clock()
while True:
# 按键事件
gf.check_events(ai_settings, screen, stats, scoreboard, play_button, ship, aliens, bullets, alien_bullets)
gf.update_bullets(ai_settings, screen, stats, scoreboard, aliens, bullets, alien_bullets)
time_passed = clock.tick()
if stats.game_active:
stats.increase_time(time_passed)
# 飞机/子弹 更新
ship.update()
#敌机位置
gf.update_aliens(ai_settings, stats, scoreboard, screen, ship, aliens, bullets, alien_bullets, time_passed)
# 背景滚动
screen.blit(bg_img1, (0, pos_y1))
screen.blit(bg_img2, (0, pos_y2))
pos_y1 += ai_settings.bg_roll_speed_factor
pos_y2 += ai_settings.bg_roll_speed_factor
if pos_y1 > 0:
pos_y1 = -1024
if pos_y2 > 1024:
pos_y2 = 0
gf.update_screen(ai_settings, screen, stats, scoreboard, ship, aliens, bullets, alien_bullets, play_button, time_passed)
run_game() | 28.970588 | 128 | 0.664975 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.059086 |
6892e2003fc1c2b7ef39326cbcdfaa61a234e1ef | 185 | py | Python | insights_messaging/downloaders/localfs.py | dpensi/insights-core-messaging | a964cecdf5cbb57407dae9e9208a31fc5e9318e4 | [
"Apache-2.0"
] | 6 | 2019-12-12T14:19:30.000Z | 2020-04-08T16:20:04.000Z | insights_messaging/downloaders/localfs.py | dpensi/insights-core-messaging | a964cecdf5cbb57407dae9e9208a31fc5e9318e4 | [
"Apache-2.0"
] | 14 | 2020-01-27T17:04:39.000Z | 2021-03-16T15:18:30.000Z | insights_messaging/downloaders/localfs.py | dpensi/insights-core-messaging | a964cecdf5cbb57407dae9e9208a31fc5e9318e4 | [
"Apache-2.0"
] | 13 | 2019-12-16T09:32:38.000Z | 2021-05-05T12:39:38.000Z | import os
from contextlib import contextmanager
class LocalFS:
@contextmanager
def get(self, src):
path = os.path.realpath(os.path.expanduser(src))
yield path
| 18.5 | 56 | 0.686486 | 134 | 0.724324 | 95 | 0.513514 | 115 | 0.621622 | 0 | 0 | 0 | 0 |
68943b195cd6c1b43741489cbceaa66e3ae51918 | 3,542 | py | Python | tests/test_utilities/test_manifest_parser.py | QualiSystems/DevBox | 9a1807006bc93727970068d586764e9dccda94ec | [
"Apache-1.1"
] | null | null | null | tests/test_utilities/test_manifest_parser.py | QualiSystems/DevBox | 9a1807006bc93727970068d586764e9dccda94ec | [
"Apache-1.1"
] | null | null | null | tests/test_utilities/test_manifest_parser.py | QualiSystems/DevBox | 9a1807006bc93727970068d586764e9dccda94ec | [
"Apache-1.1"
] | null | null | null | import os
from pyfakefs import fake_filesystem_unittest
from devbox.utilities.manifest_parser import ManifestParser
class TestManifestParser(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
def test_manifest_parser(self):
# Arrange
self.fs.CreateFile('my-app/devbox.yaml', contents="""
tosca_definitions_version: tosca_simple_yaml_1_0
topology_template:
node_templates:
python_server1:
type: tosca.nodes.Python
properties:
ports_bindings:
type: string
default: "{1234:80}"
artifacts:
binaries:
file: binaries.zip
python_client1:
type: tosca.nodes.Python
node_types:
tosca.nodes.Python:
derived_from: tosca.nodes.SoftwareComponent
properties:
deployment_image:
type: string
default: rastasheep/ubuntu-sshd
deployment_command:
type: string
default: /bin/sh
deployment_ports:
type: list
default: [22, 1234]
ports_bindings:
type: string
required: false
provisioning_instruction:
type: string
default: playbook.yaml
""")
nodes = ManifestParser().parse('my-app/devbox.yaml')
self.assertEqual(nodes[0].properties['deployment_ports'], [22, 1234])
self.assertEqual(nodes[0].properties['ports_bindings'], "{1234:80}")
self.assertTrue('ports_bindings' not in nodes[1].properties)
def test_manifest_parser_deployment_path(self):
# Arrange
self.fs.CreateFile('my-app/devbox.yaml', contents="""
tosca_definitions_version: tosca_simple_yaml_1_0
topology_template:
node_templates:
python_server1:
type: tosca.nodes.Python
properties:
ports_bindings:
type: string
default: "{1234:80}"
execution_command:
type: string
default: "abcd"
artifacts:
binaries:
artifacts_path: /home/user/myappfolder
deploy_path: mybin
python_client1:
type: tosca.nodes.Python
node_types:
tosca.nodes.Python:
derived_from: tosca.nodes.SoftwareComponent
properties:
deployment_image:
type: string
default: rastasheep/ubuntu-sshd
deployment_command:
type: string
default: /bin/sh
deployment_ports:
type: list
default: [22, 1234]
ports_bindings:
type: string
required: false
provisioning_instruction:
type: string
default: playbook.yaml
execution_command:
type: string
default: ""
""")
nodes = ManifestParser().parse('my-app/devbox.yaml')
self.assertEqual(nodes[0].properties['deployment_ports'], [22, 1234])
self.assertEqual(nodes[0].properties['ports_bindings'], "{1234:80}")
self.assertEqual(nodes[0].artifacts['binaries']['deploy_path'], "mybin")
self.assertEqual(nodes[0].artifacts['binaries']['artifacts_path'], "/home/user/myappfolder")
self.assertEqual(nodes[0].properties['execution_command'], "abcd")
self.assertEqual(nodes[1].properties['execution_command'], "")
self.assertTrue('ports_bindings' not in nodes[1].properties)
| 30.016949 | 100 | 0.593732 | 3,420 | 0.965556 | 0 | 0 | 0 | 0 | 0 | 0 | 2,505 | 0.707228 |
68960b586f20c8d621871e22f1d973112cd25522 | 2,140 | py | Python | messages.py | Pyprohly/batch-bot | 0163fa75d4b6b2408a7504429d1b8662b13dded9 | [
"MIT"
] | null | null | null | messages.py | Pyprohly/batch-bot | 0163fa75d4b6b2408a7504429d1b8662b13dded9 | [
"MIT"
] | null | null | null | messages.py | Pyprohly/batch-bot | 0163fa75d4b6b2408a7504429d1b8662b13dded9 | [
"MIT"
] | null | null | null |
from enum import Enum, auto
from string import Template
class MessageRegister:
def __init__(self, dispatch_table=None):
self.dispatch = dict() if dispatch_table is None else dispatch_table
def register(self, ref):
def decorator(func):
self.dispatch[ref] = func
return func
return decorator
def get(self, ref):
return self.dispatch.get(ref)
def __getitem__(self, ref):
return self.dispatch[ref]
messages = MessageRegister()
class MessageBank(Enum):
code_block_needed = auto()
inline_code_misuse = auto()
class MessageStorage:
code_block = '''Looks like your Batch file code isn’t wrapped in a code block.
To format code correctly on **new.reddit.com**, highlight the code and select *‘Code Block’* in the editing toolbar.
If you’re on **old.reddit.com**, separate the code from your text with a blank line and precede each line of code with **4 spaces** or a **tab**.
---
^(*Beep-boop. I am a bot.*)
'''
code_block_with_example = '''Looks like your Batch file code isn’t wrapped in a code block.
To format code correctly on **new.reddit.com**, highlight the code and select *‘Code Block’* in the editing toolbar.
If you’re on **old.reddit.com**, separate the code from your text with a blank line and precede each line of code with **4 spaces** or a **tab**. E.g.,
This is normal text.
@echo off
echo This is code!
> This is normal text.
>
> @echo off
> echo This is code!
---
^(*Beep-boop. I am a bot.*)
'''
inline_code = '''Looks like you used *inline code* formatting where a **code block** should have been used.
The inline code text styling is for use in paragraphs of text. For larger sequences of code, consider using a code bock. This can be done by selecting your code then clicking the *‘Code Block’* button.
---
^(*Beep-boop. I am a bot.*)
'''
class MessageData:
@messages.register(MessageBank.code_block_needed)
def code_block_needed(example=False, **kws):
return MessageStorage.code_block_with_example if example else MessageStorage.code_block
@messages.register(MessageBank.inline_code_misuse)
def inline_code_misuse(**kws):
return MessageStorage.inline_code
| 27.792208 | 201 | 0.730841 | 2,065 | 0.956019 | 0 | 0 | 303 | 0.140278 | 0 | 0 | 1,216 | 0.562963 |
6896c0545b2d4f8acba5f99caf7c1f5b8e3c596c | 91 | py | Python | siem_integrations/clx_query_service/clxquery/apps.py | mdemoret-nv/clx | 3737706187d8f5720561e10b85cbd638c77b9267 | [
"Apache-2.0"
] | 143 | 2019-11-06T16:08:50.000Z | 2022-03-22T12:14:59.000Z | siem_integrations/clx_query_service/clxquery/apps.py | mdemoret-nv/clx | 3737706187d8f5720561e10b85cbd638c77b9267 | [
"Apache-2.0"
] | 361 | 2019-11-06T20:33:24.000Z | 2022-03-31T19:59:12.000Z | siem_integrations/clx_query_service/clxquery/apps.py | mdemoret-nv/clx | 3737706187d8f5720561e10b85cbd638c77b9267 | [
"Apache-2.0"
] | 82 | 2019-11-06T17:36:42.000Z | 2022-03-17T07:03:04.000Z | from django.apps import AppConfig
class ClxQueryConfig(AppConfig):
name = "clxquery"
| 15.166667 | 33 | 0.758242 | 54 | 0.593407 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.10989 |
6897023917e0a55a0e08c289c8451ac380c0fbaa | 5,254 | py | Python | tests/test_code_vasp.py | petavazohi/PyChemia | e779389418771c25c830aed360773c63bb069372 | [
"MIT"
] | 67 | 2015-01-31T07:44:55.000Z | 2022-03-21T21:43:34.000Z | tests/test_code_vasp.py | petavazohi/PyChemia | e779389418771c25c830aed360773c63bb069372 | [
"MIT"
] | 13 | 2016-06-03T19:07:51.000Z | 2022-03-31T04:20:40.000Z | tests/test_code_vasp.py | petavazohi/PyChemia | e779389418771c25c830aed360773c63bb069372 | [
"MIT"
] | 37 | 2015-01-22T15:37:23.000Z | 2022-03-21T15:38:10.000Z | import os
import shutil
import pychemia
import tempfile
import unittest
class MyTestCase(unittest.TestCase):
def test_incar(self):
"""
Test (pychemia.code.vasp) [INCAR parsing and writing] :
"""
print(os.getcwd())
iv = pychemia.code.vasp.read_incar('tests/data/vasp_01/INCAR')
self.assertEqual(len(iv), 12)
self.assertEqual(iv.EDIFF, 1E-7)
wf = tempfile.NamedTemporaryFile()
iv.write(wf.name)
wf.close()
iv4dir = pychemia.code.vasp.read_incar('tests/data/vasp_01')
self.assertEqual(iv, iv4dir)
self.assertRaises(ValueError, pychemia.code.vasp.read_incar, 'tests/data')
iv3 = pychemia.code.vasp.VaspInput(variables={'EDIFF': 1E-6})
self.assertEqual(iv3['EDIFF'], 1E-6)
iv = pychemia.code.vasp.read_incar('tests/data/vasp_02')
iv.EDIFF *= 1.3
td = tempfile.mkdtemp()
pychemia.code.vasp.write_incar(iv, td)
self.assertRaises(ValueError, iv.write_key, 'EDIF')
shutil.rmtree(td)
def test_bad_outcar(self):
"""
Test (pychemia.code.vasp) [corrupted VASP OUTCAR] :
"""
vo = pychemia.code.vasp.VaspOutput('tests/data/vasp_04/OUTCAR')
self.assertTrue(vo.is_finished)
def test_encut_setup(self):
"""
Test (pychemia.code.vasp) [ENCUT setup] :
"""
iv = pychemia.code.vasp.read_incar('tests/data/vasp_06')
iv.set_encut(ENCUT=1.2, POTCAR='tests/data/vasp_06/POTCAR')
self.assertEqual(iv.ENCUT, 307)
iv.set_rough_relaxation()
self.assertEqual(iv.EDIFFG, -1E-2)
iv.set_mit_settings()
def test_vaspjob(self):
"""
Test (pychemia.code.vasp) [VaspJob] :
"""
td = tempfile.mkdtemp()
st = pychemia.code.vasp.read_poscar('tests/data/vasp_06')
kp = pychemia.code.vasp.read_kpoints('tests/data/vasp_06')
self.assertEqual(kp.number_of_kpoints, 693)
iv = pychemia.code.vasp.read_incar('tests/data/vasp_06')
vj = pychemia.code.vasp.VaspJob(workdir=td,)
vj.initialize(st, kpoints=kp)
vj.set_input_variables(iv)
vj.write_poscar()
vj.write_kpoints()
vj.write_incar()
shutil.rmtree(td)
def test_outcar(self):
"""
Test (pychemia.code.vasp) [outcar] :
"""
vo = pychemia.code.vasp.VaspOutput('tests/data/vasp_06/OUTCAR')
self.assertEqual(vo.get_memory_used()['grid'], (1028.0, 'kBytes'))
self.assertAlmostEqual(vo.to_dict['energy'], -19.67192646)
print(vo)
self.assertTrue(vo.has_forces_stress_energy())
def test_poscar(self):
"""
Test (pychemia.code.vasp) [poscar] :
"""
# Temporal directory for outputs
tmpdir = tempfile.mkdtemp()
# Read a POSCAR by directory
st = pychemia.code.vasp.read_poscar('tests/data/vasp_06')
self.assertEqual(st.natom, 4)
# Opening old format POSCAR without POTCAR
with self.assertRaises(ValueError) as context:
st = pychemia.code.vasp.read_poscar('tests/data/vasp_07/POSCAR')
st = pychemia.code.vasp.read_poscar('tests/data/vasp_08/POSCAR_old')
self.assertEqual(st.natom, 2)
st = pychemia.code.vasp.read_poscar('tests/data/vasp_08/POSCAR_new')
self.assertEqual(st.natom, 2)
with self.assertRaises(ValueError) as context:
pychemia.code.vasp.write_potcar(st, filepath=tmpdir + os.sep + 'POTCAR', basepsp='/no/existing/path')
with self.assertRaises(ValueError) as context:
pychemia.code.vasp.write_potcar(st, filepath=tmpdir + os.sep + 'POTCAR', basepsp='tests/data')
cwd = os.getcwd()
os.chdir('tests/data/vasp_07')
st = pychemia.code.vasp.read_poscar('POSCAR_new')
os.chdir(cwd)
self.assertEqual(st.natom, 44)
st = pychemia.code.vasp.read_poscar('tests/data/vasp_07/POSCAR_alt')
pychemia.code.vasp.write_poscar(st, tmpdir + os.sep + 'POSCAR1')
pychemia.code.vasp.write_poscar(st, tmpdir + os.sep + 'POSCAR2', direct=False)
pychemia.code.vasp.write_poscar(st, tmpdir + os.sep + 'POSCAR3', newformat=False)
st = pychemia.code.vasp.read_poscar(tmpdir + os.sep + 'POSCAR1')
self.assertAlmostEqual(st.volume, 584.47161926043907)
sym = pychemia.crystal.CrystalSymmetry(st)
self.assertEqual(sym.symbol(), 'C2/c')
st = pychemia.code.vasp.read_poscar(tmpdir + os.sep + 'POSCAR2')
self.assertAlmostEqual(st.volume, 584.47161926043907)
sym = pychemia.crystal.CrystalSymmetry(st)
self.assertEqual(sym.symbol(), 'C2/c')
st = pychemia.code.vasp.read_poscar(tmpdir + os.sep + 'POSCAR3')
self.assertAlmostEqual(st.volume, 584.47161926043907)
sym = pychemia.crystal.CrystalSymmetry(st)
self.assertEqual(sym.symbol(), 'C2/c')
pychemia.code.vasp.write_potcar(st, filepath=tmpdir + os.sep + 'POTCAR', basepsp='tests/data')
pychemia.code.vasp.get_potcar_info(tmpdir + os.sep + 'POTCAR')
shutil.rmtree(tmpdir)
| 39.208955 | 113 | 0.621622 | 5,178 | 0.985535 | 0 | 0 | 0 | 0 | 0 | 0 | 1,212 | 0.230681 |
6897d0617b652b4190982ab2f26f229b87c68973 | 7,048 | py | Python | generateResources.py | shisi1/libVulpes | 15ec3cab2cdea076cf777da811d783f58ec7f36f | [
"MIT"
] | null | null | null | generateResources.py | shisi1/libVulpes | 15ec3cab2cdea076cf777da811d783f58ec7f36f | [
"MIT"
] | null | null | null | generateResources.py | shisi1/libVulpes | 15ec3cab2cdea076cf777da811d783f58ec7f36f | [
"MIT"
] | 2 | 2021-11-29T18:16:46.000Z | 2021-12-16T16:35:59.000Z | import os
class Material:
def __init__(self, name, color, outputs):
self.name = name
self.color = color
self.outputs = outputs
materials = [Material("dilithium", 0xddcecb, ("DUST", "GEM")),
Material("iron", 0xafafaf, ("SHEET", "STICK", "DUST", "PLATE")),
Material("gold", 0xffff5d, ("DUST", "COIL", "PLATE")),
Material("silicon", 0x2c2c2b, ("INGOT", "DUST", "BOULE", "NUGGET", "PLATE")),
Material("copper", 0xd55e28, ("ORE", "COIL", "BLOCK", "STICK", "INGOT", "NUGGET", "DUST", "PLATE", "SHEET")),
Material("tin", 0xcdd5d8, ("ORE", "BLOCK", "PLATE", "INGOT", "NUGGET", "DUST")),
Material("steel", 0x55555d, ("BLOCK", "FAN", "PLATE", "INGOT", "NUGGET", "DUST", "STICK", "GEAR", "SHEET")),
Material("titanium", 0xb2669e, ("PLATE", "COIL", "INGOT", "NUGGET", "DUST", "STICK", "BLOCK", "GEAR", "SHEET")),
Material("rutile", 0xbf936a, ("ORE",)),
Material("aluminum", 0xb3e4dc, ("ORE", "COIL", "BLOCK", "INGOT", "PLATE", "SHEET", "DUST", "NUGGET", "SHEET")),
Material("iridium", 0xdedcce, ("ORE", "COIL", "BLOCK", "DUST", "INGOT", "NUGGET", "PLATE", "STICK"))]
materials = [Material("dilithium", 0xddcecb, ("DUST", "GEM")),
Material("titaniumaluminide", 0xaec2de, ("GEAR", "COIL", "BLOCK", "INGOT", "PLATE", "SHEET", "DUST", "NUGGET", "SHEET")),
Material("titaniumiridium", 0xd7dfe4, ("GEAR", "COIL", "BLOCK", "INGOT", "PLATE", "SHEET", "DUST", "NUGGET", "SHEET"))]
blockTypes = ['COIL', 'BLOCK', 'ORE']
coilTypes = ['COIL']
noIconGenTypes = ['ORE']
itemSample = '{\n "parent": "item/generated",\n "textures": {\n "layer0": "libvulpes:items/@TYPE@@MATERIAL@"\n }\n}'
blockItemSample = '{\n "parent": "libvulpes:block/@TYPE@@MATERIAL@"\n}'
blockSample = '{\n "parent": "minecraft:block/cube_all",\n "textures": {\n "all": "libvulpes:blocks/@TYPE@@MATERIAL@"\n }\n}'
coilSample = '{\n "parent": "libvulpes:block/tintedcubecolumn",\n "textures": {\n "end": "libvulpes:blocks/@TYPE@@MATERIAL@top",\n "side": "libvulpes:blocks/@TYPE@@MATERIAL@side"\n }\n}'
blockStateSample = '{\n "variants": {\n "": { "model": "libvulpes:block/@TYPE@@MATERIAL@" }\n }\n}'
itemDir = 'src/main/resources/assets/libvulpes/models/item/'
blockDir = 'src/main/resources/assets/libvulpes/models/block/'
blockStateDir = 'src/main/resources/assets/libvulpes/blockstates/'
itemIconDir = 'src/main/resources/assets/libvulpes/textures/items/'
blockIconDir = 'src/main/resources/assets/libvulpes/textures/blocks/'
blockTagPath = "src/main/resources/data/forge/tags/blocks/"
itemTagPath = "src/main/resources/data/forge/tags/items/"
blockTagSample = '{\n "replace": false,\n "values": [@BLOCKLIST@]\n}'
def getMatrix(color):
r = ((color >> 16) & 0xff)/0xff
g = ((color >> 8) & 0xff)/0xff
b = (color & 0xff)/0xff
return str(r) + ' 0 0 0 ' + str(g) + ' 0 0 0 ' + str(b)
def getCommand(inputFile, outputFile, color):
return 'convert ' + inputFile + ' -color-matrix "' + getMatrix(color) + '" ' + outputFile
def genItem(mat, objType):
if not objType in blockTypes:
output = itemSample.replace('@MATERIAL@', mat.name).replace('@TYPE@', objType.lower())
else:
output = blockItemSample.replace('@MATERIAL@', mat.name).replace('@TYPE@', objType.lower())
filename = itemDir + objType.lower() + mat.name + '.json'
f = open(filename, 'w')
f.write(output)
# generate the icon now
if not objType in blockTypes:
inputFile = itemIconDir + objType.lower() + '.png'
outputFile = itemIconDir + objType.lower() + mat.name + '.png'
cmd = getCommand(inputFile, outputFile, mat.color)
os.system(cmd)
def genBlock(mat, objType):
if objType in coilTypes:
output = coilSample.replace('@MATERIAL@', mat.name).replace('@TYPE@', objType.lower())
else:
output = blockSample.replace('@MATERIAL@', mat.name).replace('@TYPE@', objType.lower())
filename = blockDir + objType.lower() + mat.name + '.json'
f = open(filename, 'w')
f.write(output)
# generate the blockState
output = blockStateSample.replace('@MATERIAL@', mat.name).replace('@TYPE@', objType.lower())
filename = blockStateDir + objType.lower() + mat.name + '.json'
f = open(filename, 'w')
f.write(output)
# Generate the icon now
if not objType in noIconGenTypes:
if objType in coilTypes:
inputFile = blockIconDir + objType.lower() + 'pole.png'
outputFile = blockIconDir + objType.lower() + mat.name + 'top.png'
cmd = getCommand(inputFile, outputFile, mat.color)
os.system(cmd)
inputFile = blockIconDir + objType.lower() + 'side.png'
outputFile = blockIconDir + objType.lower() + mat.name + 'side.png'
cmd = getCommand(inputFile, outputFile, mat.color)
os.system(cmd)
else:
inputFile = blockIconDir + objType.lower() + '.png'
outputFile = blockIconDir + objType.lower() + mat.name + '.png'
cmd = getCommand(inputFile, outputFile, mat.color)
os.system(cmd)
def printEnLang(mat, objType, block):
human_mat = mat.name
human_type = objType.lower()
human_mat = human_mat[0].upper() + human_mat[1:]
human_type = human_type[0].upper() + human_type[1:]
if block:
print(' "block.libvulpes.{}{}": "{} {}",'.format(objType.lower(),mat.name, human_mat, human_type))
else:
print(' "item.libvulpes.{}{}": "{} {}",'.format(objType.lower(),mat.name, human_mat, human_type))
def generateTag(tagPath, mat, objType):
if not os.path.exists(tagPath + objType.lower()):
os.makedirs(tagPath + objType.lower())
filename = tagPath + objType.lower() + '/' + mat.name + '.json'
contents = blockTagSample.replace('@BLOCKLIST@', ' "libvulpes:' + objType.lower() + mat.name + '"')
f = open(filename, 'w')
f.write(contents)
f.close()
objTypeToMaterialMap = {}
for mat in materials:
for objType in mat.outputs:
if objType not in objTypeToMaterialMap:
objTypeToMaterialMap[objType] = []
#objTypeToMaterialMap[objType].append(mat)
#genItem(mat, objType)
if objType in blockTypes:
# genBlock(mat, objType)
generateTag(blockTagPath, mat, objType)
generateTag(itemTagPath, mat, objType)
#printEnLang(mat, objType, objType in blockTypes)
for objType in objTypeToMaterialMap.keys():
contentString = []
for mat in objTypeToMaterialMap[objType]:
contentString.append(' "libvulpes:' + objType.lower() + mat.name + '"')
contents = blockTagSample.replace('@BLOCKLIST@', ',\n'.join(contentString))
f = None
try:
if objType in blockTypes:
f = open(blockTagPath + objType.lower() + '.json', 'w')
else:
f = open(itemTagPath + objType.lower() + '.json', 'w')
f.write(contents)
f.close()
except FileNotFoundError:
pass
| 41.458824 | 209 | 0.610528 | 149 | 0.021141 | 0 | 0 | 0 | 0 | 0 | 0 | 2,352 | 0.333712 |
68989bd20a742e1b1f652b4df8e618dc746273a5 | 89,471 | py | Python | SOGM-3D-2D-Net/slam/dev_slam.py | liuxinren456852/Deep-Collison-Checker | 1d96415fd865361e22a6f25547707c47628dcfbe | [
"MIT"
] | 7 | 2021-09-09T15:03:36.000Z | 2022-03-15T16:33:33.000Z | SOGM-3D-2D-Net/slam/dev_slam.py | liuxinren456852/Deep-Collison-Checker | 1d96415fd865361e22a6f25547707c47628dcfbe | [
"MIT"
] | null | null | null | SOGM-3D-2D-Net/slam/dev_slam.py | liuxinren456852/Deep-Collison-Checker | 1d96415fd865361e22a6f25547707c47628dcfbe | [
"MIT"
] | 1 | 2021-09-10T01:46:50.000Z | 2021-09-10T01:46:50.000Z | #
#
# 0=================================0
# | Kernel Point Convolutions |
# 0=================================0
#
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Class handling SemanticKitti dataset.
# Implements a Dataset, a Sampler, and a collate_fn
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 11/06/2018
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Common libs
import sys
import struct
import scipy
import time
import numpy as np
import pickle
import torch
import yaml
#from mayavi import mlab
from multiprocessing import Lock
import open3d
from scipy.spatial.transform import Rotation as scipyR
from scipy.spatial.transform import Slerp
import matplotlib.pyplot as plt
# OS functions
from os import listdir
from os.path import exists, join, isdir, getsize
# Dataset parent class
from utils.mayavi_visu import *
from sklearn.neighbors import KDTree
from slam.cpp_slam import polar_normals, bundle_pt2pl_icp
from datasets.common import grid_subsampling
import open3d as o3d
import copy
import re
from utils.mayavi_visu import show_point_cloud
def compute_plane(points):
ref_point = points[0]
normal = np.cross(points[1] - points[0], points[2] - points[0])
normal = normal / np.sqrt(np.sum(np.power(normal, 2)))
return ref_point, normal
def in_plane(points, ref_pt, normal, threshold_in=0.1):
return np.abs(np.dot((points - ref_pt), normal)) < threshold_in
def RANSAC(points, NB_RANDOM_DRAWS=100, threshold_in=0.1):
best_mask = None
best_vote = 3
best_ref_pt, best_normal = compute_plane(points[:3])
N = len(points)
for i in range(NB_RANDOM_DRAWS):
# Random selection of points
random_inds = np.zeros((0,), dtype=np.int32)
while random_inds.shape[0] < 3:
new_inds = np.random.randint(0, N, size=3, dtype=np.int32)
random_inds = np.unique(np.hstack((random_inds, new_inds)))
random_inds = random_inds[:3]
# Corresponding plane
ref_pt, normal = compute_plane(points[random_inds])
# Number of votes
mask = in_plane(points, ref_pt, normal, threshold_in)
vote = np.sum(mask)
# Save
if vote > best_vote:
best_ref_pt = ref_pt
best_normal = normal
best_vote = vote
best_mask = mask
return best_ref_pt, best_normal, best_mask
def extract_ground(points, normals, out_folder,
vertical_thresh=10.0,
dist_thresh=0.15,
remove_dist=0.15,
saving=True):
# Get points with vertical normal
vertical_angle = np.arccos(np.abs(np.clip(normals[:, 2], -1.0, 1.0)))
# Use the thresold on the vertical angle in degree
plane_mask = vertical_angle < vertical_thresh * np.pi / 180
# Get the ground plane with RANSAC
plane_P, plane_N, _ = RANSAC(points[plane_mask], threshold_in=dist_thresh)
# Get mask on all the points
plane_mask = in_plane(points, plane_P, plane_N, dist_thresh)
mask0 = np.copy(plane_mask)
# Get better ground/objects boundary
candidates = points[plane_mask]
others = points[np.logical_not(plane_mask)]
dists, inds = KDTree(others).query(candidates, 1)
plane_mask[plane_mask] = np.squeeze(dists) > remove_dist
if saving:
ground_points = points[plane_mask]
ground_normals = normals[plane_mask]
write_ply(join(out_folder, 'ground.ply'),
[ground_points, ground_normals],
['x', 'y', 'z', 'nx', 'ny', 'nz'])
return plane_mask
def read_pgm(filename, byteorder='>'):
"""Return image data from a raw PGM file as numpy array.
Format specification: http://netpbm.sourceforge.net/doc/pgm.html
"""
with open(filename, 'rb') as f:
buffer = f.read()
try:
header, width, height, maxval = re.search(
b"(^P5\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n])*"
b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buffer).groups()
except AttributeError:
raise ValueError("Not a raw PGM file: '%s'" % filename)
return np.frombuffer(buffer,
dtype='u1' if int(maxval) < 256 else byteorder+'u2',
count=int(width)*int(height),
offset=len(header)
).reshape((int(height), int(width)))
def write_pgm(filename, image):
# open in text mode to write the header
with open(filename, 'w') as pgm_file:
# First magical word
header = ['P5']
header.append('{:d} {:d}'.format(image.shape[1], image.shape[0]))
header.append('255')
for line in header:
pgm_file.write("%s\n" % line)
# open in binary/append to use tofile
with open(filename, 'ab') as pgm_file:
image.tofile(pgm_file)
def pointmap_for_AMCL():
# -----------------------------------------------------------------------------------------
# Load original map for comparison
path = '../../Myhal_Simulation/Simulator/JackalTourGuide/src/jackal_velodyne/maps'
pgm_file = 'myhal_map_V3.pgm'
yml_file = 'myhal_map_V3.yaml'
with open(join(path, yml_file), 'r') as stream:
doc = yaml.safe_load(stream)
print('-----------------------------')
print('image:', doc['image'])
print('resolution:', doc['resolution'])
print('origin:', doc['origin'])
print('negate:', doc['negate'])
print('occupied_thresh:', doc['occupied_thresh'])
print('free_thresh:', doc['free_thresh'])
print('-----------------------------')
image = read_pgm(join(path, pgm_file), byteorder='<')
# plt.imshow(image)
# plt.show()
# -----------------------------------------------------------------------------------------
# Load point map
path = '../../Myhal_Simulation/slam_offline/2020-10-02-13-39-05'
ply_file = 'map_update_0002.ply'
data = read_ply(join(path, ply_file))
points = np.vstack((data['x'], data['y'], data['z'])).T
heights = points[:, 2]
min_z = np.min(heights)
heights = heights[heights < min_z + 0.09]
ground_z = np.median(heights)
z1 = ground_z + 0.3
z2 = ground_z + 1.0
mask_2D = np.logical_and(points[:, 2] < z2, points[:, 2] > z1)
points_2D = points[mask_2D, :2]
# -----------------------------------------------------------------------------------------
# Fill map_image
origin_2D = np.array(doc['origin'][:2], dtype=np.float32)
# Compute voxel indice for each frame point
grid_indices = (np.floor((points_2D - origin_2D) / doc['resolution'])).astype(int)
# Flip first axis it is an image
grid_indices[:, 1] = image.shape[0] - grid_indices[:, 1]
# Scalar equivalent to grid indices
scalar_indices = grid_indices[:, 0] + grid_indices[:, 1] * image.shape[0]
vec_img = np.reshape(image * 0 + 255, (-1,))
vec_img[np.unique(scalar_indices)] = 0
image2 = np.reshape(vec_img, image.shape)
f, axarr = plt.subplots(1, 2)
axarr[0].imshow(image)
axarr[1].imshow(image2)
plt.show()
# -----------------------------------------------------------------------------------------
# Save and check saved
path = '../../Myhal_Simulation/Simulator/JackalTourGuide/src/jackal_velodyne/maps'
pgm_file = 'myhal_map_V4.pgm'
yml_file = 'myhal_map_V4.yaml'
doc['image'] = pgm_file
if False and exists(join(path, pgm_file)):
imagetest = read_pgm(join(path, pgm_file), byteorder='<')
plt.imshow(imagetest)
plt.show()
else:
with open(join(path, yml_file), 'w') as outfile:
yaml.dump(doc, outfile)
write_pgm(join(path, pgm_file), image2)
# -----------------------------------------------------------------------------------------
# change map parameters
doc['image'] = pgm_file
doc['resolution'] = 0.05
doc['origin'] = [-22, -22, 0]
limit_2D = np.array([22, 22], dtype=np.float32)
origin_2D = np.array(doc['origin'][:2], dtype=np.float32)
image_w, image_h = (np.ceil((limit_2D - origin_2D) / doc['resolution'])).astype(int)
# Compute voxel indice for each frame point
grid_indices = (np.floor((points_2D - origin_2D) / doc['resolution'])).astype(int)
# Flip first axis it is an image
grid_indices[:, 1] = image_h - grid_indices[:, 1]
# Scalar equivalent to grid indices
scalar_indices = grid_indices[:, 0] + grid_indices[:, 1] * image_w
vec_img = np.zeros(image_w * image_h, dtype='u1') + 255
vec_img[np.unique(scalar_indices)] = 0
image3 = np.reshape(vec_img, (image_w, image_h))
f, axarr = plt.subplots(1, 2)
axarr[0].imshow(image2)
axarr[1].imshow(image3)
plt.show()
path = '../../Myhal_Simulation/Simulator/JackalTourGuide/src/jackal_velodyne/maps'
pgm_file = 'myhal_map_V5.pgm'
yml_file = 'myhal_map_V5.yaml'
doc['image'] = pgm_file
if False and exists(join(path, pgm_file)):
imagetest = read_pgm(join(path, pgm_file), byteorder='<')
plt.imshow(imagetest)
plt.show()
else:
with open(join(path, yml_file), 'w') as outfile:
yaml.dump(doc, outfile)
write_pgm(join(path, pgm_file), image3)
return
def normals_orientation(normals):
# Discretise the sphere in carthesian coordiantes to avoid the resolution problem at poles
voxel_size = 0.05
# Compute voxel indice for each point
grid_indices = (np.floor(normals / voxel_size)).astype(int)
# Limits of the grid
min_grid_indices = np.amin(grid_indices, axis=0)
max_grid_indices = np.amax(grid_indices, axis=0)
# Number of cells in each direction
deltaX, deltaY, deltaZ = max_grid_indices - min_grid_indices + 1
# Relocate indices
grid_indices -= min_grid_indices
# Scalar equivalent to grid indices
scalar_indices = grid_indices[:, 0] + grid_indices[:, 1] * deltaX + grid_indices[:, 2] * deltaX * deltaY
unique_inds, inverse, counts = np.unique(scalar_indices, return_counts=True, return_inverse=True)
# Get counts in a 3D matrix
unique_z = unique_inds // (deltaX * deltaY)
unique_inds -= unique_z * deltaX * deltaY
unique_y = unique_inds // deltaX
unique_x = unique_inds - unique_y * deltaX
count_matrix = np.zeros((deltaX, deltaY, deltaZ), dtype=np.float32)
count_matrix[unique_x, unique_y, unique_z] += counts
# Smooth them with a gaussian filter convolution
torch_conv = torch.nn.Conv3d(1, 1, kernel_size=5, stride=1, bias=False)
torch_conv.weight.requires_grad_(False)
torch_conv.weight *= 0
torch_conv.weight += gaussian_conv_filter(3, 5)
torch_conv.weight *= torch.sum(torch_conv.weight) ** -1
count_matrix = np.expand_dims(count_matrix, 0)
count_matrix = np.expand_dims(count_matrix, 0)
torch_count = torch.from_numpy(count_matrix)
torch_count = torch.nn.functional.pad(torch_count, [2, 2, 2, 2, 2, 2])
smooth_counts = torch.squeeze(torch_conv(torch_count))
smooth_counts = smooth_counts.numpy()[unique_x, unique_y, unique_z]
#################################################
# Create weight according to the normal direction
#################################################
# Show histogram in a spherical point cloud
n_cloud = np.vstack((unique_x, unique_y, unique_z)).astype(np.float32).T
n_cloud = (n_cloud + min_grid_indices.astype(np.float32) + 0.5) * voxel_size
# Only 20% of the normals bins are kept For the rest, we use weights based on ditances
mask = (smooth_counts > np.percentile(smooth_counts, 80))
# Align with weighted PCA
# weighted_cloud = n_cloud[mask] * np.expand_dims(smooth_counts[mask], 1)
weighted_cloud = n_cloud[mask]
# mean_P = np.sum(weighted_cloud, axis=0) / np.sum(smooth_counts)
# print(mean_P.shape)
# cloud_0 = n_cloud - mean_P
# TODO: Covarariance not robust, do a ICP???
cov_mat = np.matmul(weighted_cloud.T, weighted_cloud) / n_cloud[mask].shape[0] #np.sum(smooth_counts[mask])
eigen_values, eigen_vectors = np.linalg.eigh(cov_mat)
# Correct eigenvectors orientation with centroid
# mean_P = np.sum(weighted_cloud, axis=0) / np.sum(smooth_counts)
# rotated_centroids = np.matmul(mean_P, eigen_vectors.T)
# corrections = (rotated_centroids > 0).astype(eigen_vectors.dtype) * 2 - 1
return n_cloud, smooth_counts, eigen_vectors
def rot_trans_diffs(all_H):
all_R = all_H[:, :3, :3]
all_R_T = np.transpose(all_R, (0, 2, 1))
dR = np.matmul(all_R[1:], all_R_T[:-1])
dR = np.arccos(np.clip((np.trace(dR, axis1=1, axis2=2) - 1) / 2, -1.0, 1.0))
dT = all_H[1:, :3, 3] - all_H[:-1, :3, 3]
dT = np.linalg.norm(dT, axis=1)
return dT, dR
def interp_pose(t, H0, H1):
"""
Interpolate pose at time t (between 0 and 1) between the pose H(t=0) and H(t=1)
:param t: interpolation time
:param H0: first pose
:param H1: second pose
:return: interpolated pose
"""
# Create a slerp interpolation function for the rotation part of the transform
R1 = H0[:3, :3]
R2 = H1[:3, :3]
key_rots = scipyR.from_matrix(np.stack((R1, R2), axis=0))
slerp = Slerp([0, 1], key_rots)
interp_R = slerp(t).as_matrix()
# Create linear interpolation for translation
interp_H = (1 - t) * H0 + t * H1
# Update rotation part of the transform
interp_H[:3, :3] = interp_R
return interp_H
def frame_H_to_points(H_f, size=1.0):
# Create artificial frames
x = np.linspace(0, size, 50, dtype=np.float32)
points = np.hstack((np.vstack((x, x * 0, x * 0)), np.vstack((x * 0, x, x * 0)), np.vstack((x * 0, x * 0, x)))).T
colors = ((points > 0.1 * size).astype(np.float32) * 255).astype(np.uint8)
hpoints = np.hstack((points, np.ones_like(points[:, :1])))
hpoints = np.matmul(hpoints, H_f.T)
return hpoints[:, :3], colors
def save_trajectory(filename, all_traj_H):
# Save full trajectory
all_traj_pts = []
all_traj_clrs = []
for save_i, save_H in enumerate(all_traj_H):
# Save trajectory
traj_pts, traj_clrs = frame_H_to_points(save_H, size=0.1)
traj_pts = np.hstack((traj_pts, np.ones_like(traj_pts[:, :1]) * save_i))
all_traj_pts.append(traj_pts.astype(np.float32))
all_traj_clrs.append(traj_clrs)
write_ply(filename,
[np.vstack(all_traj_pts), np.vstack(all_traj_clrs)],
['x', 'y', 'z', 't', 'red', 'green', 'blue'])
def cart2pol(xyz):
"""
Convertion from 3D carthesian coordinates xyz to 3D polar coordinates rtp
:param xyz: [N,3] matrix of x, y, z coordinates
:return: [N,3] matrix of rho, theta, phi coordinates
"""
rho = np.linalg.norm(xyz, axis=1)
phi = (3 * np.pi / 2 - np.arctan2(xyz[:, 1], xyz[:, 0])) % (2 * np.pi)
theta = np.arctan2(np.linalg.norm(xyz[:, :2], axis=1), xyz[:, 2])
return np.vstack((rho, theta, phi)).T
def pol2cart(rtp):
"""
Convertion from 3D polar coordinates rtp to 3D carthesian coordinates xyz
:param rtp: [N,3] matrix of rho, theta, phi coordinates
:return: [N,3] matrix of x, y, z coordinates
"""
x = rtp[:, 0] * np.sin(rtp[:, 1]) * np.cos(rtp[:, 2])
y = rtp[:, 0] * np.sin(rtp[:, 1]) * np.sin(rtp[:, 2])
z = rtp[:, 0] * np.cos(rtp[:, 1])
return np.vstack((x, y, z)).T
def ssc_to_homo(ssc, ssc_in_radians=True):
# Convert 6-DOF ssc coordinate transformation to 4x4 homogeneous matrix
# transformation
if ssc.ndim == 1:
reduce = True
ssc = np.expand_dims(ssc, 0)
else:
reduce = False
if not ssc_in_radians:
ssc[:, 3:] = np.pi / 180.0 * ssc[:, 3:]
sr = np.sin(ssc[:, 3])
cr = np.cos(ssc[:, 3])
sp = np.sin(ssc[:, 4])
cp = np.cos(ssc[:, 4])
sh = np.sin(ssc[:, 5])
ch = np.cos(ssc[:, 5])
H = np.zeros((ssc.shape[0], 4, 4))
H[:, 0, 0] = ch*cp
H[:, 0, 1] = -sh*cr + ch*sp*sr
H[:, 0, 2] = sh*sr + ch*sp*cr
H[:, 1, 0] = sh*cp
H[:, 1, 1] = ch*cr + sh*sp*sr
H[:, 1, 2] = -ch*sr + sh*sp*cr
H[:, 2, 0] = -sp
H[:, 2, 1] = cp*sr
H[:, 2, 2] = cp*cr
H[:, 0, 3] = ssc[:, 0]
H[:, 1, 3] = ssc[:, 1]
H[:, 2, 3] = ssc[:, 2]
H[:, 3, 3] = 1
if reduce:
H = np.squeeze(H)
return H
def verify_magic(s):
magic = 44444
m = struct.unpack('<HHHH', s)
return len(m)>=4 and m[0] == magic and m[1] == magic and m[2] == magic and m[3] == magic
def test_read_hits():
data_path = '../../Data/NCLT'
velo_folder = 'velodyne_data'
day = '2012-01-08'
hits_path = join(data_path, velo_folder, day, 'velodyne_hits.bin')
all_utimes = []
all_hits = []
all_ints = []
num_bytes = getsize(hits_path)
current_bytes = 0
with open(hits_path, 'rb') as f_bin:
total_hits = 0
first_utime = -1
last_utime = -1
while True:
magic = f_bin.read(8)
if magic == b'':
break
if not verify_magic(magic):
print('Could not verify magic')
num_hits = struct.unpack('<I', f_bin.read(4))[0]
utime = struct.unpack('<Q', f_bin.read(8))[0]
# Do not convert padding (it is an int always equal to zero)
padding = f_bin.read(4)
total_hits += num_hits
if first_utime == -1:
first_utime = utime
last_utime = utime
hits = []
ints = []
for i in range(num_hits):
x = struct.unpack('<H', f_bin.read(2))[0]
y = struct.unpack('<H', f_bin.read(2))[0]
z = struct.unpack('<H', f_bin.read(2))[0]
i = struct.unpack('B', f_bin.read(1))[0]
l = struct.unpack('B', f_bin.read(1))[0]
hits += [[x, y, z]]
ints += [i]
utimes = np.full((num_hits,), utime - first_utime, dtype=np.int32)
ints = np.array(ints, dtype=np.uint8)
hits = np.array(hits, dtype=np.float32)
hits *= 0.005
hits += -100.0
all_utimes.append(utimes)
all_hits.append(hits)
all_ints.append(ints)
if 100 * current_bytes / num_bytes > 0.1:
break
current_bytes += 24 + 8 * num_hits
print('{:d}/{:d} => {:.1f}%'.format(current_bytes, num_bytes, 100 * current_bytes / num_bytes))
all_utimes = np.hstack(all_utimes)
all_hits = np.vstack(all_hits)
all_ints = np.hstack(all_ints)
write_ply('test_hits',
[all_hits, all_ints, all_utimes],
['x', 'y', 'z', 'intensity', 'utime'])
print("Read %d total hits from %ld to %ld" % (total_hits, first_utime, last_utime))
return 0
def raw_frames_ply():
# In files
data_path = '../../Data/NCLT'
velo_folder = 'velodyne_data'
# Out folder
out_folder = join(data_path, 'raw_ply')
if not exists(out_folder):
makedirs(out_folder)
# Transformation from body to velodyne frame (from NCLT paper)
x_body_velo = np.array([0.002, -0.004, -0.957, 0.807, 0.166, -90.703])
H_body_velo = ssc_to_homo(x_body_velo, ssc_in_radians=False)
H_velo_body = np.linalg.inv(H_body_velo)
x_body_lb3 = np.array([0.035, 0.002, -1.23, -179.93, -0.23, 0.50])
H_body_lb3 = ssc_to_homo(x_body_lb3, ssc_in_radians=False)
H_lb3_body = np.linalg.inv(H_body_lb3)
# properties list for binary file reading
properties = [('x', '<u2'),
('y', '<u2'),
('z', '<u2'),
('i', '<u1'),
('l', '<u1')]
# Get gt files and days
days = np.sort([v_f for v_f in listdir(join(data_path, velo_folder))])
for d, day in enumerate(days):
# Out folder
day_out_folder = join(out_folder, day)
if not exists(day_out_folder):
makedirs(day_out_folder)
# Day binary file
hits_path = join(data_path, velo_folder, day, 'velodyne_hits.bin')
# Init variables
all_hits = []
num_bytes = getsize(hits_path)
current_bytes = 0
frame_i = 0
last_phi = -1
t0 = time.time()
last_display = t0
with open(hits_path, 'rb') as f_bin:
while True:
####################
# Read packet header
####################
# Verify packet
magic = f_bin.read(8)
if magic == b'':
break
if not verify_magic(magic):
print('Could not verify magic')
# Get header info
num_hits = struct.unpack('<I', f_bin.read(4))[0]
utime = struct.unpack('<Q', f_bin.read(8))[0]
padding = f_bin.read(4) # Do not convert padding (it is an int always equal to zero)
##################
# Read binary hits
##################
# Get face data
packet_data = np.fromfile(f_bin, dtype=properties, count=num_hits)
# Rescale point coordinates
hits = np.vstack((packet_data['x'], packet_data['y'], packet_data['z'])).astype(np.float32).T
hits *= 0.005
hits += -100.0
##########################
# Gather frame if complete
##########################
phi = (np.arctan2(- hits[-1, 1], hits[-1, 0]) - np.pi / 2) % (2 * np.pi)
if phi < last_phi:
# Stack all frame points
f_hits = np.vstack(all_hits)
# Save frame
frame_name = join(day_out_folder, '{:.0f}.ply'.format(last_utime))
write_ply(frame_name,
[f_hits],
['x', 'y', 'z'])
# Display
t = time.time()
if (t - last_display) > 1.0:
last_display = t
message = '{:s}: frame {:7d} ({:6d} points)'
message += ' => {:5.1f}% and {:02d}:{:02d}:{:02d} remaining)'
# Predict remaining time
elapsed = t - t0
remaining = int(elapsed * num_bytes / current_bytes - elapsed)
hours = remaining // 3600
remaining = remaining - 3600 * hours
minutes = remaining // 60
seconds = remaining - 60 * minutes
print(message.format(day,
frame_i,
f_hits.shape[0],
100 * current_bytes / num_bytes,
hours, minutes, seconds))
# Update variables
frame_i += 1
all_hits = []
##############################
# Append hits to current frame
##############################
# Update last phi
last_phi = phi
last_utime = utime
# Append new data
all_hits.append(hits)
# Count bytes already read
current_bytes += 24 + 8 * num_hits
return 0
def frames_to_ply(show_frames=False):
# In files
data_path = '../../Data/NCLT'
velo_folder = 'velodyne_data'
days = np.sort([d for d in listdir(join(data_path, velo_folder))])
for day in days:
# Out files
ply_folder = join(data_path, 'frames_ply', day)
if not exists(ply_folder):
makedirs(ply_folder)
day_path = join(data_path, velo_folder, day, 'velodyne_sync')
f_names = np.sort([f for f in listdir(day_path) if f[-4:] == '.bin'])
N = len(f_names)
print('Reading', N, 'files')
for f_i, f_name in enumerate(f_names):
ply_name = join(ply_folder, f_name[:-4] + '.ply')
if exists(ply_name):
continue
t1 = time.time()
hits = []
ints = []
with open(join(day_path, f_name), 'rb') as f_bin:
while True:
x_str = f_bin.read(2)
# End of file
if x_str == b'':
break
x = struct.unpack('<H', x_str)[0]
y = struct.unpack('<H', f_bin.read(2))[0]
z = struct.unpack('<H', f_bin.read(2))[0]
intensity = struct.unpack('B', f_bin.read(1))[0]
l = struct.unpack('B', f_bin.read(1))[0]
hits += [[x, y, z]]
ints += [intensity]
ints = np.array(ints, dtype=np.uint8)
hits = np.array(hits, dtype=np.float32)
hits *= 0.005
hits += -100.0
write_ply(ply_name,
[hits, ints],
['x', 'y', 'z', 'intensity'])
t2 = time.time()
print('File {:s} {:d}/{:d} Done in {:.1f}s'.format(f_name, f_i, N, t2 - t1))
if show_frames:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(hits[:, 0], hits[:, 1], -hits[:, 2], c=-hits[:, 2], s=5, linewidths=0)
plt.show()
return 0
def merge_day_pointclouds(show_day_trajectory=False, only_SLAM_nodes=False):
"""
Recreate the whole day point cloud thks to gt pose
Generate gt_annotation of mobile objects
"""
# In files
data_path = '../../Data/NCLT'
gt_folder = 'ground_truth'
cov_folder = 'ground_truth_cov'
# Transformation from body to velodyne frame (from NCLT paper)
x_body_velo = np.array([0.002, -0.004, -0.957, 0.807, 0.166, -90.703])
H_body_velo = ssc_to_homo(x_body_velo, ssc_in_radians=False)
H_velo_body = np.linalg.inv(H_body_velo)
x_body_lb3 = np.array([0.035, 0.002, -1.23, -179.93, -0.23, 0.50])
H_body_lb3 = ssc_to_homo(x_body_lb3, ssc_in_radians=False)
H_lb3_body = np.linalg.inv(H_body_lb3)
# Get gt files and days
gt_files = np.sort([gt_f for gt_f in listdir(join(data_path, gt_folder)) if gt_f[-4:] == '.csv'])
cov_files = np.sort([cov_f for cov_f in listdir(join(data_path, cov_folder)) if cov_f[-4:] == '.csv'])
days = [d[:-4].split('_')[1] for d in gt_files]
# Load all gt poses
print('\nLoading days groundtruth poses...')
t0 = time.time()
gt_H = []
gt_t = []
for d, gt_f in enumerate(gt_files):
t1 = time.time()
gt_pkl_file = join(data_path, gt_folder, gt_f[:-4] + '.pkl')
if exists(gt_pkl_file):
# Read pkl
with open(gt_pkl_file, 'rb') as f:
day_gt_t, day_gt_H = pickle.load(f)
else:
# File paths
gt_csv = join(data_path, gt_folder, gt_f)
# Load gt
gt = np.loadtxt(gt_csv, delimiter=',')
# Convert gt to homogenous rotation/translation matrix
day_gt_t = gt[:, 0]
day_gt_H = ssc_to_homo(gt[:, 1:])
# Save pickle
with open(gt_pkl_file, 'wb') as f:
pickle.dump([day_gt_t, day_gt_H], f)
t2 = time.time()
print('{:s} {:d}/{:d} Done in {:.1f}s'.format(gt_f, d, gt_files.shape[0], t2 - t1))
gt_t += [day_gt_t]
gt_H += [day_gt_H]
if show_day_trajectory:
cov_csv = join(data_path, cov_folder, cov_files[d])
cov = np.loadtxt(cov_csv, delimiter=',')
t_cov = cov[:, 0]
t_cov_bool = np.logical_and(t_cov > np.min(day_gt_t), t_cov < np.max(day_gt_t))
t_cov = t_cov[t_cov_bool]
# Note: Interpolation is not needed, this is done as a convinience
interp = scipy.interpolate.interp1d(day_gt_t, day_gt_H[:, :3, 3], kind='nearest', axis=0)
node_poses = interp(t_cov)
plt.figure()
plt.scatter(day_gt_H[:, 1, 3], day_gt_H[:, 0, 3], 1, c=-day_gt_H[:, 2, 3], linewidth=0)
plt.scatter(node_poses[:, 1], node_poses[:, 0], 1, c=-node_poses[:, 2], linewidth=5)
plt.axis('equal')
plt.title('Ground Truth Position of Nodes in SLAM Graph')
plt.xlabel('East (m)')
plt.ylabel('North (m)')
plt.colorbar()
plt.show()
t2 = time.time()
print('Done in {:.1f}s\n'.format(t2 - t0))
# Out files
out_folder = join(data_path, 'day_ply')
if not exists(out_folder):
makedirs(out_folder)
# Focus on a particular point
p0 = np.array([-220, -527, 12])
center_radius = 10.0
point_radius = 50.0
# Loop on days
for d, day in enumerate(days):
#if day != '2012-02-05':
# continue
day_min_t = gt_t[d][0]
day_max_t = gt_t[d][-1]
frames_folder = join(data_path, 'frames_ply', day)
f_times = np.sort([float(f[:-4]) for f in listdir(frames_folder) if f[-4:] == '.ply'])
# If we want, load only SLAM nodes
if only_SLAM_nodes:
# Load node timestamps
cov_csv = join(data_path, cov_folder, cov_files[d])
cov = np.loadtxt(cov_csv, delimiter=',')
t_cov = cov[:, 0]
t_cov_bool = np.logical_and(t_cov > day_min_t, t_cov < day_max_t)
t_cov = t_cov[t_cov_bool]
# Find closest lidar frames
t_cov = np.expand_dims(t_cov, 1)
diffs = np.abs(t_cov - f_times)
inds = np.argmin(diffs, axis=1)
f_times = f_times[inds]
# Is this frame in gt
f_t_bool = np.logical_and(f_times > day_min_t, f_times < day_max_t)
f_times = f_times[f_t_bool]
# Interpolation gt poses to frame timestamps
interp = scipy.interpolate.interp1d(gt_t[d], gt_H[d], kind='nearest', axis=0)
frame_poses = interp(f_times)
N = len(f_times)
world_points = []
world_frames = []
world_frames_c = []
print('Reading', day, ' => ', N, 'files')
for f_i, f_t in enumerate(f_times):
t1 = time.time()
#########
# GT pose
#########
H = frame_poses[f_i].astype(np.float32)
# s = '\n'
# for cc in H:
# for c in cc:
# s += '{:5.2f} '.format(c)
# s += '\n'
# print(s)
#############
# Focus check
#############
if np.linalg.norm(H[:3, 3] - p0) > center_radius:
continue
###################################
# Local frame coordinates for debug
###################################
# Create artificial frames
x = np.linspace(0, 1, 50, dtype=np.float32)
points = np.hstack((np.vstack((x, x*0, x*0)), np.vstack((x*0, x, x*0)), np.vstack((x*0, x*0, x)))).T
colors = ((points > 0.1).astype(np.float32) * 255).astype(np.uint8)
hpoints = np.hstack((points, np.ones_like(points[:, :1])))
hpoints = np.matmul(hpoints, H.T)
hpoints[:, 3] *= 0
world_frames += [hpoints[:, :3]]
world_frames_c += [colors]
#######################
# Load velo point cloud
#######################
# Load frame ply file
f_name = '{:.0f}.ply'.format(f_t)
data = read_ply(join(frames_folder, f_name))
points = np.vstack((data['x'], data['y'], data['z'])).T
#intensity = data['intensity']
hpoints = np.hstack((points, np.ones_like(points[:, :1])))
hpoints = np.matmul(hpoints, H.T)
hpoints[:, 3] *= 0
hpoints[:, 3] += np.sqrt(f_t - f_times[0])
# focus check
focus_bool = np.linalg.norm(hpoints[:, :3] - p0, axis=1) < point_radius
hpoints = hpoints[focus_bool, :]
world_points += [hpoints]
t2 = time.time()
print('File {:s} {:d}/{:d} Done in {:.1f}s'.format(f_name, f_i, N, t2 - t1))
if len(world_points) < 2:
continue
world_points = np.vstack(world_points)
###### DEBUG
world_frames = np.vstack(world_frames)
world_frames_c = np.vstack(world_frames_c)
write_ply('testf.ply',
[world_frames, world_frames_c],
['x', 'y', 'z', 'red', 'green', 'blue'])
###### DEBUG
print(world_points.shape, world_points.dtype)
# Subsample merged frames
# world_points, features = grid_subsampling(world_points[:, :3],
# features=world_points[:, 3:],
# sampleDl=0.1)
features = world_points[:, 3:]
world_points = world_points[:, :3]
print(world_points.shape, world_points.dtype)
write_ply('test' + day + '.ply',
[world_points, features],
['x', 'y', 'z', 't'])
# Generate gt annotations
# Subsample day ply (for visualization)
# Save day ply
# a = 1/0
def local_PCA(points):
# Compute the barycenter
center = np.mean(points, axis=0)
# Centered clouds
points_c = points - center
# Covariance matrix
C = (points_c.T).dot(points_c) / points.shape[0]
# Eigenvalues
return np.linalg.eigh(C)
def estimate_normals_planarity_debug(cloud):
"""
Custom function that estimates normals and planarity of lidar frames, using polar coordinates neighborhoods.
:param cloud: Open3D PointCloud.
:return: planarities (Normals are modified in place)
"""
# Rescale for numerical stability
#
t = [time.time()]
# Get point cloud
points = cloud.astype(np.float32)
normals0, planarity, linearity = polar_normals(points.astype(np.float32), verbose=1)
scores0 = planarity + linearity
t += [time.time()]
print(normals0.dtype, normals0.shape)
print(scores0.dtype, scores0.shape)
# Transform to polar coordinates
polar_points = cart2pol(points)
t += [time.time()]
# Define search radius in l1 metric. Vertical angular resolution of HDL32 is 1.29
angular_res = 1.29 * np.pi / 180
polar_r = 1.5 * angular_res
# Define horizontal scale (smaller distance for the neighbor in horizontal direction)
horizontal_scale = 0.5
# Use log of range so that neighbor radius is proportional to the range.
range_scale = 4.0
polar_points[:, 0] = np.log(polar_points[:, 0]) * polar_r / (np.log((1 + polar_r) / (1 - polar_r)) * range_scale)
# Apply horizontal scale
polar_points[:, 2] *= 1 / horizontal_scale
t += [time.time()]
# Create 2d KDTree to search lidar neighborhoods
polar_tree = KDTree(polar_points, metric='l2')
t += [time.time()]
# Find neighbors
all_neighb_inds = polar_tree.query_radius(polar_points, polar_r)
t += [time.time()]
# Rescale everything
# polar_points[:, 2] *= horizontal_scale
# polar_points[:, 0] = np.exp(polar_points[:, 0] * np.log((1 + polar_r) / (1 - polar_r)) * range_scale / polar_r)
# Compute covariance matrices
all_eigenvalues = np.empty(polar_points.shape, dtype=np.float32)
all_eigenvectors = np.empty((polar_points.shape[0], 3, 3), dtype=np.float32)
for i, neighb_inds in enumerate(all_neighb_inds):
all_eigenvalues[i, :], all_eigenvectors[i, :, :] = local_PCA(points[neighb_inds, :])
t += [time.time()]
# Compute normals and planarity
normals = all_eigenvectors[:, :, 0]
sphericity = 1 -all_eigenvalues[:, 0] / (all_eigenvalues[:, 2] + 1e-9)
t += [time.time()]
# Choose random point for showing
rand_inds = np.random.randint(polar_points.shape[0], size=100)
features = np.zeros_like(polar_points[:, 2])
for ri, rand_id in enumerate(rand_inds):
features[all_neighb_inds[rand_id]] = ri
features[rand_id] = 2 * len(rand_inds)
write_ply('ttt_xyz.ply',
[points, normals, features, sphericity, scores0],
['x', 'y', 'z', 'nx', 'ny', 'nz', 'f', 'score', 'cpp_score'])
# polar_points[:, 1] *= 180 / np.pi
# polar_points[:, 2] *= 180 / np.pi
#polar_points[:, 0] = np.exp(polar_points[:, 0] * np.log((1 + polar_r) / (1 - polar_r)) * range_scale / polar_r)
polar_points = polar_points[:, [2, 1, 0]]
write_ply('ttt_rtp.ply',
[polar_points, polar_points[:, 1] * 0, features],
['x', 'y', 'z', 'i', 'f'])
# Filter outlier from ray/edges
# Assign normals to pointcloud structure
#cloud.normals = o3d.utility.Vector3dVector(normals)
t += [time.time()]
# Display timings
print('\n*****************\n')
print('Validation timings:')
i = 0
print('C++ ....... {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('polar ..... {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('scale ... {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('Tree ...... {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('neighb .... {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('PCA ...... {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('features . {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('Assign ... {:.1f}s'.format(1000 * (t[i + 1] - t[i])))
print('\n*****************\n')
return sphericity
def estimate_normals_planarity(cloud):
"""
Custom function that estimates normals and planarity of lidar frames, using polar coordinates neighborhoods.
:param cloud: Open3D PointCloud.
:return: planarities (Normals are modified in place)
"""
# Get point cloud
points = np.asarray(cloud.points)
normals, planarity, linearity = polar_normals(points.astype(np.float32), verbose=1)
# Assign normals to pointcloud structure
cloud.normals = o3d.utility.Vector3dVector(normals)
return scores
def gaussian_conv_filter(dimension=3, size=5):
# Sigma according to size
sig = (size/2 - 0.5) / 2
eps = 1e-6
# Get coordinates
coords = np.arange(-size/2 + 0.5, size/2, 1.0)
if dimension == 2:
x, y = np.meshgrid(coords, coords)
sq_r = x ** 2 + y ** 2
elif dimension == 3:
x, y, z = np.meshgrid(coords, coords, coords)
sq_r = x ** 2 + y ** 2 + z ** 2
elif dimension == 4:
x, y, z, t = np.meshgrid(coords, coords, coords, coords)
sq_r = x ** 2 + y ** 2 + z ** 2 + t ** 2
else:
raise ValueError('Unsupported dimension (max is 4)')
return torch.exp(-torch.from_numpy(sq_r.astype(np.float32)) / (2 * sig ** 2 + eps))
def normal_filtering(normals, debug=False):
# Discretise the sphere in carthesian coordiantes to avoid the pole reolution problem
voxel_size = 0.05
# Compute voxel indice for each point
grid_indices = (np.floor(normals / voxel_size)).astype(int)
# Limits of the grid
min_grid_indices = np.amin(grid_indices, axis=0)
max_grid_indices = np.amax(grid_indices, axis=0)
# Number of cells in each direction
deltaX, deltaY, deltaZ = max_grid_indices - min_grid_indices + 1
# Relocate indices
grid_indices -= min_grid_indices
# Scalar equivalent to grid indices
scalar_indices = grid_indices[:, 0] + grid_indices[:, 1] * deltaX + grid_indices[:, 2] * deltaX * deltaY
unique_inds, inverse, counts = np.unique(scalar_indices, return_counts=True, return_inverse=True)
# Get counts in a 3D matrix
unique_z = unique_inds // (deltaX * deltaY)
unique_inds -= unique_z * deltaX * deltaY
unique_y = unique_inds // deltaX
unique_x = unique_inds - unique_y * deltaX
count_matrix = np.zeros((deltaX, deltaY, deltaZ), dtype=np.float32)
count_matrix[unique_x, unique_y, unique_z] += counts
# Smooth them with a gaussian filter convolution
torch_conv = torch.nn.Conv3d(1, 1, kernel_size=5, stride=1, bias=False)
torch_conv.weight.requires_grad_(False)
torch_conv.weight *= 0
torch_conv.weight += gaussian_conv_filter(3, 5)
torch_conv.weight *= torch.sum(torch_conv.weight) ** -1
count_matrix = np.expand_dims(count_matrix, 0)
count_matrix = np.expand_dims(count_matrix, 0)
torch_count = torch.from_numpy(count_matrix)
torch_count = torch.nn.functional.pad(torch_count, [2, 2, 2, 2, 2, 2])
smooth_counts = torch.squeeze(torch_conv(torch_count))
smooth_counts = smooth_counts.numpy()[unique_x, unique_y, unique_z]
#################################################
# Create weight according to the normal direction
#################################################
# Only 20% of the normals bins are kept For the rest, we use weights based on ditances
weights = (smooth_counts > np.percentile(smooth_counts, 80)).astype(np.float32)
# Show histogram in a spherical point cloud
if debug:
n_cloud = np.vstack((unique_x, unique_y, unique_z)).astype(np.float32).T
n_cloud = (n_cloud + min_grid_indices.astype(np.float32) + 0.5) * voxel_size
#n_cloud = n_cloud / np.linalg.norm(n_cloud, axis=1, keepdims=True)
write_ply('nnn_NORMAL_HIST.ply',
[n_cloud, smooth_counts],
['x', 'y', 'z', 'counts'])
a = 1/0
return weights[inverse]
def load_gt_poses(gt_path, only_day_1=False):
gt_files = np.sort([gt_f for gt_f in listdir(gt_path) if gt_f[-4:] == '.csv'])
gt_H = []
gt_t = []
for d, gt_f in enumerate(gt_files):
t1 = time.time()
gt_pkl_file = join(gt_path, gt_f[:-4] + '.pkl')
if exists(gt_pkl_file):
# Read pkl
with open(gt_pkl_file, 'rb') as f:
day_gt_t, day_gt_H = pickle.load(f)
else:
# File paths
gt_csv = join(gt_path, gt_f)
# Load gt
gt = np.loadtxt(gt_csv, delimiter=',')
# Convert gt to homogenous rotation/translation matrix
day_gt_t = gt[:, 0]
day_gt_H = ssc_to_homo(gt[:, 1:])
# Save pickle
with open(gt_pkl_file, 'wb') as f:
pickle.dump([day_gt_t, day_gt_H], f)
t2 = time.time()
print('{:s} {:d}/{:d} Done in {:.1f}s'.format(gt_f, d, gt_files.shape[0], t2 - t1))
gt_t += [day_gt_t]
gt_H += [day_gt_H]
if only_day_1 and d > -1:
break
return gt_t, gt_H
def get_area_frames(days, gt_t, gt_H, raw_path, area_center, area_radius, only_day_1=False):
# Loop on days
day_f_times = []
for d, day in enumerate(days):
# Get frame timestamps
frames_folder = join(raw_path, day)
f_times = np.sort([float(f[:-4]) for f in listdir(frames_folder) if f[-4:] == '.ply'])
# Ground truth does not cover all frames
day_min_t = gt_t[d][0]
day_max_t = gt_t[d][-1]
f_t_bool = np.logical_and(f_times > day_min_t, f_times < day_max_t)
f_times = f_times[f_t_bool]
# Interpolation gt poses to frame timestamps
interp = scipy.interpolate.interp1d(gt_t[d], gt_H[d], kind='nearest', axis=0)
frame_poses = interp(f_times)
# Closest frame to picked point
closest_i = 0
closest_d = 1e6
new_f_times = []
for f_i, f_t in enumerate(f_times):
# GT pose
H = frame_poses[f_i].astype(np.float32)
# Focus check
f_dist = np.linalg.norm(H[:3, 3] - area_center)
if f_dist > area_radius:
continue
# Save closest frame
if (f_dist < closest_d):
closest_d = f_dist
closest_i = len(new_f_times)
# Append frame to candidates
new_f_times.append(f_t)
# Filter to only get subsequent frames
new_f_times = np.array(new_f_times, dtype=np.float64)
gaps = new_f_times[1:] - new_f_times[:-1]
med_gap = np.median(gaps[:50])
jumps = np.sort(np.where(gaps > 5 * med_gap)[0])
i0 = 0
i1 = len(new_f_times)
for j in jumps:
if j + 1 < closest_i:
i0 = j + 1
for j in jumps[::-1]:
if j + 1 > closest_i:
i1 = j + 1
day_f_times.append(new_f_times[i0:i1])
if only_day_1 and d > -1:
break
return day_f_times
def test_icp_registration():
"""
Test ICP registration Use GT to extract a small interesting region.
"""
############
# Parameters
############
# In files
data_path = '../../Data/NCLT'
gt_folder = 'ground_truth'
cov_folder = 'ground_truth_cov'
# Transformation from body to velodyne frame (from NCLT paper)
x_body_velo = np.array([0.002, -0.004, -0.957, 0.807, 0.166, -90.703])
H_body_velo = ssc_to_homo(x_body_velo, ssc_in_radians=False)
H_velo_body = np.linalg.inv(H_body_velo)
x_body_lb3 = np.array([0.035, 0.002, -1.23, -179.93, -0.23, 0.50])
H_body_lb3 = ssc_to_homo(x_body_lb3, ssc_in_radians=False)
H_lb3_body = np.linalg.inv(H_body_lb3)
# Out files
out_folder = join(data_path, 'day_ply')
if not exists(out_folder):
makedirs(out_folder)
# Get gt files and days
gt_files = np.sort([gt_f for gt_f in listdir(join(data_path, gt_folder)) if gt_f[-4:] == '.csv'])
cov_files = np.sort([cov_f for cov_f in listdir(join(data_path, cov_folder)) if cov_f[-4:] == '.csv'])
days = [d[:-4].split('_')[1] for d in gt_files]
###############
# Load GT poses
###############
print('\nLoading days groundtruth poses...')
t0 = time.time()
gt_H = []
gt_t = []
for d, gt_f in enumerate(gt_files):
t1 = time.time()
gt_pkl_file = join(data_path, gt_folder, gt_f[:-4] + '.pkl')
if exists(gt_pkl_file):
# Read pkl
with open(gt_pkl_file, 'rb') as f:
day_gt_t, day_gt_H = pickle.load(f)
else:
# File paths
gt_csv = join(data_path, gt_folder, gt_f)
# Load gt
gt = np.loadtxt(gt_csv, delimiter=',')
# Convert gt to homogenous rotation/translation matrix
day_gt_t = gt[:, 0]
day_gt_H = ssc_to_homo(gt[:, 1:])
# Save pickle
with open(gt_pkl_file, 'wb') as f:
pickle.dump([day_gt_t, day_gt_H], f)
t2 = time.time()
print('{:s} {:d}/{:d} Done in {:.1f}s'.format(gt_f, d, gt_files.shape[0], t2 - t1))
gt_t += [day_gt_t]
gt_H += [day_gt_H]
if d > -1:
break
t2 = time.time()
print('Done in {:.1f}s\n'.format(t2 - t0))
########################
# Get lidar frames times
########################
# Focus on a particular point
p0 = np.array([-220, -527, 12])
center_radius = 10.0
point_radius = 50.0
print('\nGet timestamps in focused area...')
t0 = time.time()
# Loop on days
day_f_times = []
for d, day in enumerate(days):
day_min_t = gt_t[d][0]
day_max_t = gt_t[d][-1]
frames_folder = join(data_path, 'raw_ply', day)
f_times = np.sort([float(f[:-4]) for f in listdir(frames_folder) if f[-4:] == '.ply'])
# Is this frame in gt
f_t_bool = np.logical_and(f_times > day_min_t, f_times < day_max_t)
f_times = f_times[f_t_bool]
# Interpolation gt poses to frame timestamps
interp = scipy.interpolate.interp1d(gt_t[d], gt_H[d], kind='nearest', axis=0)
frame_poses = interp(f_times)
N = len(f_times)
new_f_times = []
for f_i, f_t in enumerate(f_times):
t1 = time.time()
# GT pose
H = frame_poses[f_i].astype(np.float32)
# Focus check
if np.linalg.norm(H[:3, 3] - p0) > center_radius:
continue
new_f_times.append(f_t)
# DEBUGGGGGG
new_f_times = new_f_times[5:-5]
day_f_times.append(np.array(new_f_times, dtype=np.float64))
if d > -1:
break
t2 = time.time()
print('Done in {:.1f}s\n'.format(t2 - t0))
###########################
# coarse map with pt2pt icp
###########################
for d, day in enumerate(days):
frames_folder = join(data_path, 'raw_ply', day)
N = len(day_f_times[d])
print('Reading', day, ' => ', N, 'files')
# Load first frame as map
last_transform = np.eye(4)
last_cloud = None
threshold = 0.3
score_thresh = 0.99
voxel_size = 0.1
transform_list = []
cloud_list = []
cloud_map = None
full_map = None
full_map_t = None
verbose = 1
t = [time.time()]
for f_i, f_t in enumerate(day_f_times[d]):
#######################
# Load velo point cloud
#######################
t = [time.time()]
# Load frame ply file
f_name = '{:.0f}.ply'.format(f_t)
cloud = o3d.io.read_point_cloud(join(frames_folder, f_name))
t += [time.time()]
# Cloud normals and planarity
scores = estimate_normals_planarity(cloud)
if f_i < 1:
last_cloud = cloud
cloud_map = cloud
continue
t += [time.time()]
# Remove low score for fitting
cloud_down = o3d.geometry.PointCloud()
cloud_down.points = o3d.utility.Vector3dVector(np.asarray(cloud.points)[scores > score_thresh, :])
cloud_down.normals = o3d.utility.Vector3dVector(np.asarray(cloud.normals)[scores > score_thresh, :])
# Downsample target
cloud_down = cloud_down.voxel_down_sample(voxel_size)
# if f_i > 2:
#
# np.asarray(last_cloud.normals).astype(np.float32)
# new_scores = np.ones_like(np.asarray(cloud_down.points).astype(np.float32))[:, 0]
# H, rms = pt2pl_icp(np.asarray(cloud_down.points).astype(np.float32),
# np.asarray(last_cloud.points).astype(np.float32),
# np.asarray(last_cloud.normals).astype(np.float32),
# new_scores,
# n_samples=1000,
# max_pairing_dist=0.2,
# max_iter=10,
# minDiffRMS=0.001)
#
# print(H)
# print(rms)
# a = 1 / 0
t += [time.time()]
# Measure initial ICP metrics
if verbose == 2:
reg_init = o3d.registration.evaluate_registration(cloud_down, last_cloud,
threshold, last_transform)
t += [time.time()]
else:
reg_init = None
# Apply ICP
reg_pt2pl = o3d.registration.registration_icp(
cloud_down, last_cloud, threshold, last_transform,
o3d.registration.TransformationEstimationPointToPlane())
t += [time.time()]
# Print results
if verbose == 2:
print('ICP convergence:')
print('fitness ...... {:7.4f} => {:7.4f}'.format(reg_init.fitness,
reg_pt2pl.fitness))
print('inlier_rmse .. {:7.4f} => {:7.4f}'.format(reg_init.inlier_rmse,
reg_pt2pl.inlier_rmse))
print('corresp_n .... {:7d} => {:7d}'.format(np.asarray(reg_init.correspondence_set).shape[0],
np.asarray(reg_pt2pl.correspondence_set).shape[0]))
# Apply transformation for the init of next step
cloud_down.transform(reg_pt2pl.transformation)
if verbose == 2:
# Save init cloud
# cloud_init = copy.deepcopy(cloud)
# cloud_init.transform(last_transform)
# write_ply('ttt_{:d}_init.ply'.format(f_i),
# [np.asarray(cloud_init.points)],
# ['x', 'y', 'z'])
# Save result cloud
cloud.transform(reg_pt2pl.transformation)
write_ply('ttt_{:d}_reg.ply'.format(f_i),
[np.asarray(cloud.points)],
['x', 'y', 'z'])
t += [time.time()]
# Update sub map
cloud_map.points.extend(cloud.points)
cloud_map = cloud_map.voxel_down_sample(voxel_size=voxel_size)
write_ply('tt_sub_map.ply'.format(f_i),
[np.asarray(cloud_map.points)],
['x', 'y', 'z'])
# Update full map
if full_map is None:
full_map = copy.deepcopy(cloud_down)
full_map_t = np.full(shape=(np.asarray(cloud_down.points).shape[0],),
fill_value=f_t - day_f_times[d][0],
dtype=np.float64)
else:
full_map.points.extend(cloud_down.points)
full_map_t = np.hstack((full_map_t, np.full(shape=(np.asarray(cloud_down.points).shape[0],),
fill_value=f_t - day_f_times[d][0],
dtype=np.float64)))
write_ply('tt_full_map.ply'.format(f_i),
[np.asarray(full_map.points), full_map_t],
['x', 'y', 'z', 't'])
t += [time.time()]
# Update variables
last_cloud = cloud_down
last_transform = reg_pt2pl.transformation
transform_list += [reg_pt2pl.transformation]
cloud_list += [np.asarray(cloud_down.points).astype(np.float32)]
t += [time.time()]
if verbose > 0:
print('{:.0f} registered on {:.0f} in {:.1f}ms ({:d}/{:d})'.format(f_t,
day_f_times[d][f_i - 1],
1000 * (t[-1] - t[0]),
f_i,
N))
# Display timings
if verbose == 2:
print('\n*********************')
i = 0
print('Load ...... {:.1f}ms'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('Normals ... {:.1f}ms'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('Filter .... {:.1f}ms'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('Eval ...... {:.1f}ms'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('ICP ....... {:.1f}ms'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('Transform . {:.1f}ms'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('Save ...... {:.1f}ms'.format(1000 * (t[i + 1] - t[i])))
i += 1
print('Update .... {:.1f}ms'.format(1000 * (t[i + 1] - t[i])))
print('*********************\n')
print('\n********************************************\n')
# Save results
full_map = np.vstack(cloud_list)
times_list = [f_t - day_f_times[d][0] for f_t in day_f_times[d][1:]]
full_map_t = np.vstack([np.full((cld.shape[0], 1), f_t, dtype=np.float64)
for f_t, cld in zip(times_list, cloud_list)])
write_ply('tt_full_map.ply',
[full_map, full_map_t],
['x', 'y', 'z', 't'])
# TODO:
# > Multithread this first path at a python level (use Pytorch?). No need for multitherad cpp wrapper
# > Second path (refinement) with normals re-estimnated on the map
# > Take motion distortion into account (in second path).
# > Use graph optimization for loop closure and day merging
a = 1 / 0
def bundle_icp_debug(verbose=2):
"""
Test ICP registration Use GT to extract a small interesting region.
"""
############
# Parameters
############
# Path to data
data_path = '../../Data/NCLT'
gt_folder = 'ground_truth'
raw_folder = 'raw_ply'
days = np.sort([d for d in listdir(join(data_path, raw_folder))])
# Out files
out_folder = join(data_path, 'day_ply')
if not exists(out_folder):
makedirs(out_folder)
# Stride (nb of frames skipped for transformations)
frame_stride = 2
# Bundle size (number of frames jointly optimized) and stride (nb of frames between each bundle start)
bundle_size = 7
bundle_stride = bundle_size - 1
# Normal estimation parameters
score_thresh = 0.99
# Pointcloud filtering parameters
map_voxel_size = 0.05
frame_voxel_size = -0.05
# Group of frames saved together
save_group = 100
###############
# Load GT poses
###############
print('\nLoading days groundtruth poses...')
t0 = time.time()
gt_t, gt_H = load_gt_poses(join(data_path, gt_folder), only_day_1=True)
t2 = time.time()
print('Done in {:.1f}s\n'.format(t2 - t0))
#######################
# Get lidar frame times
#######################
# Focus on a particular point
p0 = np.array([-220, -527, 12])
R0 = 10.0
print('\nGet timestamps in focused area...')
t0 = time.time()
day_f_times = get_area_frames(days, gt_t, gt_H, join(data_path, raw_folder), p0, R0, only_day_1=True)
t2 = time.time()
print('Done in {:.1f}s\n'.format(t2 - t0))
###########################
# coarse map with pt2pl icp
###########################
for d, day in enumerate(days):
# List of transformation we are trying to optimize
frames_folder = join(data_path, 'raw_ply', day)
f_times = [f_t for f_t in day_f_times[d][::frame_stride]]
transform_list = [np.eye(4) for _ in f_times]
last_saved_frames = 0
FPS = 0
N = len(f_times)
for b_i, bundle_i0 in enumerate(np.arange(0, len(f_times), bundle_stride)):
####################
# Load bundle frames
####################
t = [time.time()]
if (bundle_i0 + bundle_size > len(f_times)):
bundle_i0 = len(f_times) - bundle_size
frame_pts = []
frame_norms = []
frame_w = []
for f_t in f_times[bundle_i0:bundle_i0+bundle_size]:
# Load ply format points
f_name = '{:.0f}.ply'.format(f_t)
data = read_ply(join(frames_folder, f_name))
points = np.vstack((data['x'], data['y'], data['z'])).T
t += [time.time()]
# Get normals
normals, planarity, linearity = polar_normals(points, radius=1.5, h_scale=0.5)
# Remove low quality normals for fitting
points = points[norm_scores > score_thresh]
normals = normals[norm_scores > score_thresh]
norm_scores = (norm_scores[norm_scores > score_thresh] - score_thresh) / (1 - score_thresh)
t += [time.time()]
# Subsample to reduce number of points
if frame_voxel_size > 0:
# grid supsampling
points, normals = grid_subsampling(points, features=normals, sampleDl=map_voxel_size)
# Renormalize normals
normals = normals / np.linalg.norm(normals, axis=1, keepdims=True)
# Filter out points according to main normal directions (NOt necessary if normals are better computed)
bool_filter = normal_filtering(normals) > 0.5
points = points[bool_filter]
normals = normals[bool_filter]
norm_scores = norm_scores[bool_filter]
t += [time.time()]
# Compute score for each component of rotations / translation
# Weights according to distance the futher, the higher (square rule because points lies on surfaces)
#rot_scores = np.expand_dims(norm_scores, 1) * np.cross(points, normals, axis=1)
#weights = np.hstack((rot_scores, -rot_scores))
weights = np.expand_dims(norm_scores, 1)
# Gather frames data
frame_pts.append(points)
frame_norms.append(normals)
frame_w.append(weights)
t += [time.time()]
if verbose == 3:
dt = np.array(t[1:]) - np.array(t[:-1])
dt = dt.reshape(bundle_size, -1)
timing_names = ['Load', 'Normals', 'Filter', 'Append']
s = ''
for t_name in timing_names:
s += '{:^10s} '.format(t_name)
s += '\n'
for b in range(bundle_size):
for t_i in range(len(timing_names)):
s += '{:^10.1f} '.format(1000 * dt[b, t_i])
s += '\n'
print(s)
t = t[:1]
t += [time.time()]
##################
# Apply bundle ICP
##################
# for b in range(bundle_size):
# w_names = ['w{:d}'.format(i) for i in range(frame_w[b].shape[1])]
# write_ply('bb_init_{:02d}.ply'.format(b),
# [frame_pts[b], frame_w[b]],
# ['x', 'y', 'z'] + w_names)
bundle_H, bundle_rms, all_H = bundle_pt2pl_icp(frame_pts,
frame_norms,
frame_w,
n_samples=1000,
max_pairing_dist=0.2,
max_iter=200,
avg_steps=5)
t += [time.time()]
save_debug_frames = False
if save_debug_frames:
print(all_H.shape)
bundle_inds = []
steps = []
all_pts = []
for s, HH in enumerate(all_H):
for b, H in enumerate(HH):
if b == 0:
world_H = np.linalg.inv(H)
else:
world_H = np.eye(4)
for bb in range(b, 0, -1):
world_H = np.matmul(HH[bb], world_H)
pts, clrs = frame_H_to_points(world_H, size=0.1)
pts = pts.astype(np.float32)
all_pts.append(pts)
bundle_inds.append(pts[:, 0]*0+b)
steps.append(pts[:, 0]*0+s)
write_ply('bb_frames.ply',
[np.vstack(all_pts), np.hstack(steps), np.hstack(bundle_inds)],
['x', 'y', 'z', 's', 'b'])
debug_rms = False
if debug_rms:
fig = plt.figure('RMS')
for b, b_rms in enumerate(bundle_rms):
if b == 0:
plt.plot(b_rms, '-', linewidth=2, label='{:d}-0'.format(bundle_size - 1))
else:
plt.plot(b_rms, '-', linewidth=1, label='{:d}-{:d}'.format(b, b - 1))
plt.xlabel('steps')
plt.ylabel('rms')
#plt.legend(loc=1)
plt.ylim(0, 0.3)
all_H_inv = np.copy(all_H.transpose((0, 1, 3, 2)))
all_H_inv[:, :, 3, :3] = 0
all_H_inv[:, :, :3, 3:] = -np.matmul(all_H_inv[:, :, :3, :3], all_H[:, :, :3, 3:])
dH = np.matmul(all_H[1:], all_H_inv[:-1])
dH = dH.transpose((1, 0, 2, 3))
plt.figure('dT')
for b, b_dH in enumerate(dH):
b_dT = np.linalg.norm(b_dH[:, :3, 3], axis=1)
b_dT = running_mean(b_dT, 4)
if b == 0:
plt.plot(b_dT, '-', linewidth=2, label='{:d}-0'.format(bundle_size - 1))
else:
plt.plot(b_dT, '-', linewidth=1, label='{:d}-{:d}'.format(b, b - 1))
plt.xlabel('steps')
plt.ylabel('rms')
#plt.legend(loc=1)
plt.ylim(0, 0.05)
plt.figure('dR')
for b, b_dH in enumerate(dH):
b_dR = np.arccos((np.trace(b_dH[:, :3, :3], axis1=1, axis2=2) - 1) / 2)
b_dR = running_mean(b_dR, 4)
if b == 0:
plt.plot(b_dR, '-', linewidth=2, label='{:d}-0'.format(bundle_size - 1))
else:
plt.plot(b_dR, '-', linewidth=1, label='{:d}-{:d}'.format(b, b - 1))
plt.xlabel('steps')
plt.ylabel('rms')
#plt.legend(loc=1)
plt.ylim(0, 0.01)
plt.show()
a = 1/0
# Update transformations to world coordinates
for b in range(bundle_size):
world_H = np.eye(4)
for bb in range(b, 0, -1):
world_H = np.matmul(bundle_H[bb], world_H)
world_H = np.matmul(transform_list[bundle_i0], world_H)
transform_list[bundle_i0 + b] = world_H
t += [time.time()]
if verbose == 2:
print('Bundle {:9.1f}ms / ICP {:.1f}ms => {:.1f} FPS'.format(1000 * (t[1] - t[0]),
1000 * (t[2] - t[1]),
bundle_size / (t[2] - t[1])))
if verbose == 1:
fmt_str = 'Bundle [{:3d},{:3d}] --- {:5.1f}% or {:02d}:{:02d}:{:02d} remaining at {:.1f}fps'
if bundle_i0 == 0:
FPS = bundle_size / (t[-1] - t[0])
else:
FPS += (bundle_size / (t[-1] - t[0]) - FPS) / 10
remaining = int((N - (bundle_i0 + bundle_size)) / FPS)
hours = remaining // 3600
remaining = remaining - 3600 * hours
minutes = remaining // 60
seconds = remaining - 60 * minutes
print(fmt_str.format(bundle_i0,
bundle_i0 + bundle_size - 1,
100 * (bundle_i0 + bundle_size) / N,
hours, minutes, seconds,
FPS))
# Save groups of 100 frames together
if (bundle_i0 > last_saved_frames + save_group + 1):
all_points = []
all_traj_pts = []
all_traj_clrs = []
i0 = last_saved_frames
i1 = i0 + save_group
for i, world_H in enumerate(transform_list[i0: i1]):
# Load ply format points
f_name = '{:.0f}.ply'.format(f_times[i0 + i])
data = read_ply(join(frames_folder, f_name))
points = np.vstack((data['x'], data['y'], data['z'])).T
# Apply transf
world_pts = np.hstack((points, np.ones_like(points[:, :1])))
world_pts = np.matmul(world_pts, world_H.T)
# Save frame
world_pts[:, 3] = i0 + i
all_points.append(world_pts)
# also save trajectory
traj_pts, traj_clrs = frame_H_to_points(world_H, size=0.1)
traj_pts = np.hstack((traj_pts, np.ones_like(traj_pts[:, :1]) * (i0 + i)))
all_traj_pts.append(traj_pts.astype(np.float32))
all_traj_clrs.append(traj_clrs)
last_saved_frames += save_group
filename = join(out_folder, 'd_{:s}_{:05d}.ply'.format(day, i0))
write_ply(filename,
[np.vstack(all_points)],
['x', 'y', 'z', 't'])
filename = join(out_folder, 'd_{:s}_{:05d}_traj.ply'.format(day, i0))
write_ply(filename,
[np.vstack(all_traj_pts), np.vstack(all_traj_clrs)],
['x', 'y', 'z', 't', 'red', 'green', 'blue'])
#################
# Post processing
#################
all_points = []
all_traj_pts = []
all_traj_clrs = []
i0 = last_saved_frames
for i, world_H in enumerate(transform_list[i0:]):
# Load ply format points
f_name = '{:.0f}.ply'.format(f_times[i0 + i])
data = read_ply(join(frames_folder, f_name))
points = np.vstack((data['x'], data['y'], data['z'])).T
# Apply transf
world_pts = np.hstack((points, np.ones_like(points[:, :1])))
world_pts = np.matmul(world_pts, world_H.T)
# Save frame
world_pts[:, 3] = i0 + i
all_points.append(world_pts)
# also save trajectory
traj_pts, traj_clrs = frame_H_to_points(world_H, size=0.1)
traj_pts = np.hstack((traj_pts, np.ones_like(traj_pts[:, :1]) * (i0 + i)))
all_traj_pts.append(traj_pts.astype(np.float32))
all_traj_clrs.append(traj_clrs)
last_saved_frames += save_group
filename = join(out_folder, 'd_{:s}_{:05d}.ply'.format(day, i0))
write_ply(filename,
[np.vstack(all_points)],
['x', 'y', 'z', 't'])
filename = join(out_folder, 'd_{:s}_{:05d}_traj.ply'.format(day, i0))
write_ply(filename,
[np.vstack(all_traj_pts), np.vstack(all_traj_clrs)],
['x', 'y', 'z', 't', 'red', 'green', 'blue'])
def bundle_icp(frame_names,
bundle_size=5,
score_thresh=0.99,
frame_voxel_size=-1,
verbose=2):
"""
Test ICP registration Use GT to extract a small interesting region.
"""
############
# Parameters
############
# Bundle stride (nb of frames between each bundle start)
bundle_stride = bundle_size - 1
# Group of frames saved together
save_group = 100
# List of transformation we are trying to optimize
transform_list = [np.eye(4) for _ in frame_names]
last_saved_frames = 0
FPS = 0
N = len(frame_names)
for b_i, bundle_i0 in enumerate(np.arange(0, len(frame_names), bundle_stride)):
####################
# Load bundle frames
####################
t = [time.time()]
if (bundle_i0 + bundle_size > N):
bundle_i0 = N - bundle_size
frame_pts = []
frame_norms = []
frame_w = []
for f_name in frame_names[bundle_i0:bundle_i0+bundle_size]:
# Load ply format points
data = read_ply(f_name)
points = np.vstack((data['x'], data['y'], data['z'])).T
# Get normals
normals, planarity, linearity = polar_normals(points, radius=1.5, h_scale=0.5)
norm_scores = planarity + linearity
# Remove low quality normals for fitting
points = points[norm_scores > score_thresh]
normals = normals[norm_scores > score_thresh]
norm_scores = (norm_scores[norm_scores > score_thresh] - score_thresh) / (1 - score_thresh)
# Subsample to reduce number of points
if frame_voxel_size > 0:
# grid supsampling
points, normals = grid_subsampling(points, features=normals, sampleDl=frame_voxel_size)
# Renormalize normals
normals = normals / np.linalg.norm(normals, axis=1, keepdims=True)
# Filter out points according to main normal directions (NOt necessary if normals are better computed)
bool_filter = normal_filtering(normals) > 0.5
points = points[bool_filter]
normals = normals[bool_filter]
norm_scores = norm_scores[bool_filter]
# Compute score for each component of rotations / translation
# Weights according to distance the futher, the higher (square rule because points lies on surfaces)
#rot_scores = np.expand_dims(norm_scores, 1) * np.cross(points, normals, axis=1)
#weights = np.hstack((rot_scores, -rot_scores))
weights = np.expand_dims(norm_scores, 1)
# Gather frames data
frame_pts.append(points)
frame_norms.append(normals)
frame_w.append(weights)
t += [time.time()]
##################
# Apply bundle ICP
##################
bundle_H, bundle_rms, all_H = bundle_pt2pl_icp(frame_pts,
frame_norms,
frame_w,
n_samples=1000,
max_pairing_dist=0.2,
max_iter=200,
avg_steps=5)
t += [time.time()]
# Update transformations to world coordinates
for b in range(bundle_size):
world_H = np.eye(4)
for bb in range(b, 0, -1):
world_H = np.matmul(bundle_H[bb], world_H)
world_H = np.matmul(transform_list[bundle_i0], world_H)
transform_list[bundle_i0 + b] = world_H
t += [time.time()]
if verbose > 0:
fmt_str = 'Bundle [{:3d},{:3d}] --- {:5.1f}% or {:02d}:{:02d}:{:02d} remaining at {:.1f}fps'
if bundle_i0 == 0:
FPS = bundle_size / (t[-1] - t[0])
else:
FPS += (bundle_size / (t[-1] - t[0]) - FPS) / 10
remaining = int((N - (bundle_i0 + bundle_size)) / FPS)
hours = remaining // 3600
remaining = remaining - 3600 * hours
minutes = remaining // 60
seconds = remaining - 60 * minutes
print(fmt_str.format(bundle_i0,
bundle_i0 + bundle_size - 1,
100 * (bundle_i0 + bundle_size) / N,
hours, minutes, seconds,
FPS))
# Save groups of 100 frames together
if (bundle_i0 > last_saved_frames + save_group + 1):
all_points = []
all_traj_pts = []
all_traj_clrs = []
i0 = last_saved_frames
i1 = i0 + save_group
for i, world_H in enumerate(transform_list[i0: i1]):
# Load ply format points
data = read_ply(frame_names[i0 + i])
points = np.vstack((data['x'], data['y'], data['z'])).T
# Apply transf
world_pts = np.hstack((points, np.ones_like(points[:, :1])))
world_pts = np.matmul(world_pts, world_H.T)
# Save frame
world_pts[:, 3] = i0 + i
all_points.append(world_pts)
# also save trajectory
traj_pts, traj_clrs = frame_H_to_points(world_H, size=0.1)
traj_pts = np.hstack((traj_pts, np.ones_like(traj_pts[:, :1]) * (i0 + i)))
all_traj_pts.append(traj_pts.astype(np.float32))
all_traj_clrs.append(traj_clrs)
last_saved_frames += save_group
filename = 'debug_icp_{:05d}.ply'.format(i0)
write_ply(filename,
[np.vstack(all_points)],
['x', 'y', 'z', 't'])
filename = 'debug_icp_{:05d}_traj.ply'.format(i0)
write_ply(filename,
[np.vstack(all_traj_pts), np.vstack(all_traj_clrs)],
['x', 'y', 'z', 't', 'red', 'green', 'blue'])
#################
# Post processing
#################
all_points = []
all_traj_pts = []
all_traj_clrs = []
i0 = last_saved_frames
for i, world_H in enumerate(transform_list[i0:]):
# Load ply format points
data = read_ply(frame_names[i0 + i])
points = np.vstack((data['x'], data['y'], data['z'])).T
# Apply transf
world_pts = np.hstack((points, np.ones_like(points[:, :1])))
world_pts = np.matmul(world_pts, world_H.T)
# Save frame
world_pts[:, 3] = i0 + i
all_points.append(world_pts)
# also save trajectory
traj_pts, traj_clrs = frame_H_to_points(world_H, size=0.1)
traj_pts = np.hstack((traj_pts, np.ones_like(traj_pts[:, :1]) * (i0 + i)))
all_traj_pts.append(traj_pts.astype(np.float32))
all_traj_clrs.append(traj_clrs)
last_saved_frames += save_group
filename = 'debug_icp_{:05d}.ply'.format(i0)
write_ply(filename,
[np.vstack(all_points)],
['x', 'y', 'z', 't'])
filename = 'debug_icp_{:05d}_traj.ply'.format(i0)
write_ply(filename,
[np.vstack(all_traj_pts), np.vstack(all_traj_clrs)],
['x', 'y', 'z', 't', 'red', 'green', 'blue'])
return transform_list
def bundle_slam(verbose=1):
############
# Parameters
############
# Path to data
data_path = '../../Data/NCLT'
gt_folder = 'ground_truth'
raw_folder = 'raw_ply'
days = np.sort([d for d in listdir(join(data_path, raw_folder))])
# Out files
out_folder = join(data_path, 'day_ply')
if not exists(out_folder):
makedirs(out_folder)
# Stride (nb of frames skipped for transformations)
frame_stride = 2
# Bundle size (number of frames jointly optimized) and stride (nb of frames between each bundle start)
bundle_size = 7
bundle_stride = bundle_size - 1
# Normal estimation parameters
score_thresh = 0.99
# Pointcloud filtering parameters
map_voxel_size = 0.05
frame_voxel_size = 0.05
# Group of frames saved together
save_group = 100
###############
# Load GT poses
###############
print('\nLoading days groundtruth poses...')
t0 = time.time()
gt_t, gt_H = load_gt_poses(join(data_path, gt_folder), only_day_1=True)
t2 = time.time()
print('Done in {:.1f}s\n'.format(t2 - t0))
#######################
# Get lidar frame times
#######################
# Focus on a particular point
p0 = np.array([-220, -527, 12])
R0 = 20.0
print('\nGet timestamps in focused area...')
t0 = time.time()
day_f_times = get_area_frames(days, gt_t, gt_H, join(data_path, raw_folder), p0, R0, only_day_1=True)
t2 = time.time()
print('Done in {:.1f}s\n'.format(t2 - t0))
###########################
# coarse map with pt2pl icp
###########################
for d, day in enumerate(days):
# List of transformation we are trying to optimize
frames_folder = join(data_path, 'raw_ply', day)
f_times = [f_t for f_t in day_f_times[d][::frame_stride]]
transform_list = [np.eye(4) for _ in f_times]
last_saved_frames = 0
FPS = 0
N = len(f_times)
for b_i, bundle_i0 in enumerate(np.arange(0, len(f_times), bundle_stride)):
####################
# Load bundle frames
####################
t = [time.time()]
if (bundle_i0 + bundle_size > len(f_times)):
bundle_i0 = len(f_times) - bundle_size
frame_pts = []
frame_norms = []
frame_w = []
for f_t in f_times[bundle_i0:bundle_i0+bundle_size]:
# Load ply format points
f_name = '{:.0f}.ply'.format(f_t)
data = read_ply(join(frames_folder, f_name))
points = np.vstack((data['x'], data['y'], data['z'])).T
estimate_normals_planarity_debug(points)
a = 1/0
t += [time.time()]
# Get normals
normals, planarity, linearity = polar_normals(points, radius=1.5, h_scale=0.5)
# Remove low quality normals for fitting
points = points[norm_scores > score_thresh]
normals = normals[norm_scores > score_thresh]
norm_scores = (norm_scores[norm_scores > score_thresh] - score_thresh) / (1 - score_thresh)
t += [time.time()]
# Subsample to reduce number of points
if frame_voxel_size > 0:
# grid supsampling
points, normals = grid_subsampling(points, features=normals, sampleDl=map_voxel_size)
# Renormalize normals
normals = normals / np.linalg.norm(normals, axis=1, keepdims=True)
# Filter out points according to main normal directions (NOt necessary if normals are better computed)
bool_filter = normal_filtering(normals) > 0.5
points = points[bool_filter]
normals = normals[bool_filter]
norm_scores = norm_scores[bool_filter]
t += [time.time()]
# Compute score for each component of rotations / translation
# Weights according to distance the futher, the higher (square rule because points lies on surfaces)
#rot_scores = np.expand_dims(norm_scores, 1) * np.cross(points, normals, axis=1)
#weights = np.hstack((rot_scores, -rot_scores))
weights = np.expand_dims(norm_scores, 1)
# Gather frames data
frame_pts.append(points)
frame_norms.append(normals)
frame_w.append(weights)
t += [time.time()]
if verbose == 3:
dt = np.array(t[1:]) - np.array(t[:-1])
dt = dt.reshape(bundle_size, -1)
timing_names = ['Load', 'Normals', 'Filter', 'Append']
s = ''
for t_name in timing_names:
s += '{:^10s} '.format(t_name)
s += '\n'
for b in range(bundle_size):
for t_i in range(len(timing_names)):
s += '{:^10.1f} '.format(1000 * dt[b, t_i])
s += '\n'
print(s)
t = t[:1]
t += [time.time()]
##################
# Apply bundle ICP
##################
# for b in range(bundle_size):
# w_names = ['w{:d}'.format(i) for i in range(frame_w[b].shape[1])]
# write_ply('bb_init_{:02d}.ply'.format(b),
# [frame_pts[b], frame_w[b]],
# ['x', 'y', 'z'] + w_names)
bundle_H, bundle_rms, all_H = bundle_pt2pl_icp(frame_pts,
frame_norms,
frame_w,
n_samples=1000,
max_pairing_dist=0.2,
max_iter=200,
avg_steps=5)
# TODO: Lidar scan cleanup. AFTER THE MAPPING
# > In polar coordinate: retrieve each line of scan. like 1D grid subs, index in a 1D grid, adjust grid
# by min max and nb of scan lines. r = log(r) pour la suite
# > In each line, order points by phi. find jumps in r direction. get dr0 = r(j)-(j-1) and dr1 = r(j+1)-r(j)
# IF dr0 = dr1 THEN we probably are on a plane keep the point.
# IF abs(dr0-dr1) > Thresh THEN outlier, remove the point
#
#
# TODO: Mapping
# > Start with a frame to frame bundle adjustement (do 20 frames, between 5 and 10 meters))
# > Create map from these 20 frames (USe our smart spherical grid subs)
# > ICP on the map
# > Choose, update map or compute it again from 20 new frames (Better to update if possible)
# TODO: motion distortion, use phi angle to get points timestamps, remembre frame stime stamp is the one of the
# last points => unperiodicize phi, create linear interp function with last points and their angle Interpolate
# pose based on points angle
def get_odometry(sensor_path, day, t0, t1):
odom_name = join(sensor_path, day, 'odometry_mu_100hz.csv')
odom = np.loadtxt(odom_name, delimiter=",", dtype=np.float64)
mask = np.logical_and(odom[:, 0] > t0, odom[:, 0] < t1)
ssc = odom[mask, 1:]
t = odom[mask, 0]
H = ssc_to_homo(ssc, ssc_in_radians=True)
return t, H
| 35.141791 | 121 | 0.499156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22,030 | 0.246225 |
68998d2ad54cd9bef2834023dae463b6fbec5d90 | 1,128 | py | Python | src/3-all_together/parse_vectors.py | bt3gl/Tool-NetClean_Complex_Networks_Data_Cleanser | 312f4fe2a080b75550e353a55ebf5b03d4d9c9a1 | [
"MIT"
] | 5 | 2015-04-14T04:09:40.000Z | 2018-05-18T21:57:21.000Z | src/3-all_together/parse_vectors.py | aquario-crypto/NetClean-Complex_Networks_Data_Cleanser | 312f4fe2a080b75550e353a55ebf5b03d4d9c9a1 | [
"MIT"
] | null | null | null | src/3-all_together/parse_vectors.py | aquario-crypto/NetClean-Complex_Networks_Data_Cleanser | 312f4fe2a080b75550e353a55ebf5b03d4d9c9a1 | [
"MIT"
] | 4 | 2015-04-14T04:11:28.000Z | 2019-07-12T03:47:30.000Z | #!/usr/bin/env python
__author__ = "Mari Wahl"
__copyright__ = "Copyright 2014, The Cogent Project"
__credits__ = ["Mari Wahl"]
__license__ = "GPL"
__version__ = "2.0"
__maintainer__ = "Mari Wahl"
__email__ = "marina.w4hl@gmail.com"
import os
from constants import SUBFOLDERS, FEATURES
def create_input_files(subfolder):
return '../../output/vectors_proc/' + subfolder + '.data'
def create_output_files():
out = '../../output/'
if not os.path.exists(out):
os.makedirs(out)
out_v = out + 'vectors_together/'
if not os.path.exists(out_v):
os.makedirs(out_v)
return out_v + 'together.data'
if __name__ == '__main__':
output_file = create_output_files()
# Loop saving the values for each file
for subfolder in SUBFOLDERS:
input_file = create_input_files(subfolder)
print 'Processing ' + input_file + ' ...'
tempfile = open(input_file, 'r')
aux = tempfile.read()
outfile = open(output_file, 'a')
outfile.write(aux)
tempfile.close()
outfile.close()
print '\nDone!!!'
| 21.692308 | 61 | 0.625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 291 | 0.257979 |
6899ddd456696f5f8cbff28853c9ebc19f43f8ce | 18,361 | py | Python | monitoring/tests/unit/gapic/v3/test_service_monitoring_service_client_v3.py | q-logic/google-cloud-python | a65065c89c059bc564bbdd79288a48970907c399 | [
"Apache-2.0"
] | null | null | null | monitoring/tests/unit/gapic/v3/test_service_monitoring_service_client_v3.py | q-logic/google-cloud-python | a65065c89c059bc564bbdd79288a48970907c399 | [
"Apache-2.0"
] | 40 | 2019-07-16T10:04:48.000Z | 2020-01-20T09:04:59.000Z | monitoring/tests/unit/gapic/v3/test_service_monitoring_service_client_v3.py | q-logic/google-cloud-python | a65065c89c059bc564bbdd79288a48970907c399 | [
"Apache-2.0"
] | 2 | 2019-07-18T00:05:31.000Z | 2019-11-27T14:17:22.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.cloud import monitoring_v3
from google.cloud.monitoring_v3.proto import service_pb2
from google.cloud.monitoring_v3.proto import service_service_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestServiceMonitoringServiceClient(object):
def test_create_service(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
expected_response = {"name": name, "display_name": display_name}
expected_response = service_pb2.Service(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.project_path("[PROJECT]")
service = {}
response = client.create_service(parent, service)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.CreateServiceRequest(
parent=parent, service=service
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.project_path("[PROJECT]")
service = {}
with pytest.raises(CustomException):
client.create_service(parent, service)
def test_get_service(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
expected_response = {"name": name_2, "display_name": display_name}
expected_response = service_pb2.Service(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_path("[PROJECT]", "[SERVICE]")
response = client.get_service(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.GetServiceRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_path("[PROJECT]", "[SERVICE]")
with pytest.raises(CustomException):
client.get_service(name)
def test_list_services(self):
# Setup Expected Response
next_page_token = ""
services_element = {}
services = [services_element]
expected_response = {"next_page_token": next_page_token, "services": services}
expected_response = service_service_pb2.ListServicesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.project_path("[PROJECT]")
paged_list_response = client.list_services(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.services[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_service_pb2.ListServicesRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_services_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.project_path("[PROJECT]")
paged_list_response = client.list_services(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_update_service(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
expected_response = {"name": name, "display_name": display_name}
expected_response = service_pb2.Service(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
service = {}
response = client.update_service(service)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.UpdateServiceRequest(service=service)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
service = {}
with pytest.raises(CustomException):
client.update_service(service)
def test_delete_service(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_path("[PROJECT]", "[SERVICE]")
client.delete_service(name)
assert len(channel.requests) == 1
expected_request = service_service_pb2.DeleteServiceRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_service_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_path("[PROJECT]", "[SERVICE]")
with pytest.raises(CustomException):
client.delete_service(name)
def test_create_service_level_objective(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
goal = 317825.0
expected_response = {"name": name, "display_name": display_name, "goal": goal}
expected_response = service_pb2.ServiceLevelObjective(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.service_path("[PROJECT]", "[SERVICE]")
service_level_objective = {}
response = client.create_service_level_objective(
parent, service_level_objective
)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.CreateServiceLevelObjectiveRequest(
parent=parent, service_level_objective=service_level_objective
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.service_path("[PROJECT]", "[SERVICE]")
service_level_objective = {}
with pytest.raises(CustomException):
client.create_service_level_objective(parent, service_level_objective)
def test_get_service_level_objective(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
goal = 317825.0
expected_response = {"name": name_2, "display_name": display_name, "goal": goal}
expected_response = service_pb2.ServiceLevelObjective(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
response = client.get_service_level_objective(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.GetServiceLevelObjectiveRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
with pytest.raises(CustomException):
client.get_service_level_objective(name)
def test_list_service_level_objectives(self):
# Setup Expected Response
next_page_token = ""
service_level_objectives_element = {}
service_level_objectives = [service_level_objectives_element]
expected_response = {
"next_page_token": next_page_token,
"service_level_objectives": service_level_objectives,
}
expected_response = service_service_pb2.ListServiceLevelObjectivesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
parent = client.service_path("[PROJECT]", "[SERVICE]")
paged_list_response = client.list_service_level_objectives(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.service_level_objectives[0] == resources[0]
assert len(channel.requests) == 1
expected_request = service_service_pb2.ListServiceLevelObjectivesRequest(
parent=parent
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_service_level_objectives_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
parent = client.service_path("[PROJECT]", "[SERVICE]")
paged_list_response = client.list_service_level_objectives(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_update_service_level_objective(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
goal = 317825.0
expected_response = {"name": name, "display_name": display_name, "goal": goal}
expected_response = service_pb2.ServiceLevelObjective(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
service_level_objective = {}
response = client.update_service_level_objective(service_level_objective)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = service_service_pb2.UpdateServiceLevelObjectiveRequest(
service_level_objective=service_level_objective
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
service_level_objective = {}
with pytest.raises(CustomException):
client.update_service_level_objective(service_level_objective)
def test_delete_service_level_objective(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup Request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
client.delete_service_level_objective(name)
assert len(channel.requests) == 1
expected_request = service_service_pb2.DeleteServiceLevelObjectiveRequest(
name=name
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_service_level_objective_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = monitoring_v3.ServiceMonitoringServiceClient()
# Setup request
name = client.service_level_objective_path(
"[PROJECT]", "[SERVICE]", "[SERVICE_LEVEL_OBJECTIVE]"
)
with pytest.raises(CustomException):
client.delete_service_level_objective(name)
| 38.093361 | 88 | 0.680954 | 17,502 | 0.953216 | 0 | 0 | 0 | 0 | 0 | 0 | 3,325 | 0.18109 |
689b058710ba7e2e873901967ce4945aab966125 | 294 | py | Python | bin/ripple/util/Database.py | tdfischer/rippled | 399c43cae6e90a428e9ce6a988123972b0f03c99 | [
"BSL-1.0"
] | 89 | 2015-04-23T20:24:58.000Z | 2022-03-20T12:35:30.000Z | bin/ripple/util/Database.py | tdfischer/rippled | 399c43cae6e90a428e9ce6a988123972b0f03c99 | [
"BSL-1.0"
] | 14 | 2020-05-25T15:42:18.000Z | 2022-03-20T12:44:56.000Z | bin/ripple/util/Database.py | tdfischer/rippled | 399c43cae6e90a428e9ce6a988123972b0f03c99 | [
"BSL-1.0"
] | 41 | 2015-01-19T05:26:34.000Z | 2022-02-23T03:47:39.000Z | from __future__ import absolute_import, division, print_function, unicode_literals
import sqlite3
def fetchall(database, query, kwds):
conn = sqlite3.connect(database)
try:
cursor = conn.execute(query, kwds)
return cursor.fetchall()
finally:
conn.close()
| 22.615385 | 82 | 0.697279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
689c35abc62fa1170e0db4a74ec9011f9d0a466c | 1,775 | py | Python | backend/utils/__init__.py | szkkteam/agrosys | a390332202f7200632d2ff3816e1b0f3cc76f586 | [
"MIT"
] | null | null | null | backend/utils/__init__.py | szkkteam/agrosys | a390332202f7200632d2ff3816e1b0f3cc76f586 | [
"MIT"
] | null | null | null | backend/utils/__init__.py | szkkteam/agrosys | a390332202f7200632d2ff3816e1b0f3cc76f586 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Common Python library imports
import re
import unicodedata
# Pip package imports
from flask_sqlalchemy.model import camel_to_snake_case
from flask import current_app
from itsdangerous import URLSafeSerializer, BadData
from loguru import logger
# Internal package imports
from .decorators import was_decorated_without_parenthesis, wrap_decorator
#from .mail import send_mail, prepare_mail, send_mail_sync
def slugify(string):
string = re.sub(r'[^\w\s-]', '',
unicodedata.normalize('NFKD', string.strip()))
return re.sub(r'[-\s]+', '-', string).lower()
def title_case(string):
return camel_to_snake_case(string).replace('_', ' ').title()
def pluralize(name):
if name.endswith('y'):
# right replace 'y' with 'ies'
return 'ies'.join(name.rsplit('y', 1))
elif name.endswith('s'):
return f'{name}es'
return f'{name}s'
def string_to_bool(s):
if isinstance(s, str):
if s.lower() in [ 'true', 'yes', 'y', '1', 'ye', 't' ]:
return True
elif s.lower() in [ 'false', 'no', 'n', '0', 'f' ]:
return False
return
def listify(obj):
if not isinstance(obj, (tuple, list)):
return [obj]
return obj
def decode_token(token):
""" Decode the token to retrive the encoded data """
s = URLSafeSerializer(current_app.secret_key, salt=current_app.config['SECURITY_PASSWORD_SALT'])
try:
return s.loads(token)
except BadData as e:
logger.error(e)
return None
def encode_token(data):
""" Encode a data and return with the encoded token """
s = URLSafeSerializer(current_app.secret_key, salt=current_app.config['SECURITY_PASSWORD_SALT'])
return s.dumps(data)
| 27.307692 | 100 | 0.654085 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 481 | 0.270986 |
689c565289e0ab67922c34bd175bd019c149cc51 | 3,028 | py | Python | aadi/roi_heads.py | WanXiaopei/aadi | 08a7399b3dcfab716cc7b80a88201fc47186ffd3 | [
"MIT"
] | 4 | 2021-06-01T02:46:21.000Z | 2022-01-11T03:02:36.000Z | aadi/roi_heads.py | WanXiaopei/aadi | 08a7399b3dcfab716cc7b80a88201fc47186ffd3 | [
"MIT"
] | null | null | null | aadi/roi_heads.py | WanXiaopei/aadi | 08a7399b3dcfab716cc7b80a88201fc47186ffd3 | [
"MIT"
] | null | null | null | from typing import List
import torch
from detectron2.structures import ImageList, Boxes, Instances, pairwise_iou
from detectron2.modeling.box_regression import Box2BoxTransform
from detectron2.modeling.roi_heads import ROI_HEADS_REGISTRY, StandardROIHeads
from detectron2.modeling.roi_heads.cascade_rcnn import CascadeROIHeads
from .utils import get_aligned_pooler, label_and_sample_proposals
from .lazy_fast_rcnn import LazyFastRCNNOutputLayers
@ROI_HEADS_REGISTRY.register()
class LazyRoIHeads(StandardROIHeads):
@torch.no_grad()
def label_and_sample_proposals(
self, proposals: List[Instances], targets: List[Instances]
) -> List[Instances]:
return label_and_sample_proposals(self, proposals, targets)
@classmethod
def _init_box_head(cls, cfg, input_shape):
ret = super()._init_box_head(cfg, input_shape)
ret["box_predictor"] = LazyFastRCNNOutputLayers(
cfg, ret["box_head"].output_shape,
# The loss weight is set as Cascade RPN
loss_weight={
"loss_cls": 1.5,
"loss_box_reg": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT
},
)
ret["box_in_features"] = cfg.MODEL.RPN.IN_FEATURES
ret["box_pooler"] = get_aligned_pooler(
cfg.MODEL.RPN, input_shape,
output_size=cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION,
sampling_ratio=cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO,
)
return ret
@ROI_HEADS_REGISTRY.register()
class LazyCascadeRoIHeads(CascadeROIHeads):
@torch.no_grad()
def label_and_sample_proposals(
self, proposals: List[Instances], targets: List[Instances]
) -> List[Instances]:
return label_and_sample_proposals(self, proposals, targets)
@classmethod
def _init_box_head(cls, cfg, input_shape):
ret = super()._init_box_head(cfg, input_shape)
cascade_bbox_reg_weights = cfg.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS
box_predictors = []
for bbox_reg_weights in cascade_bbox_reg_weights:
box_predictors.append(
LazyFastRCNNOutputLayers(
cfg, ret["box_heads"][0].output_shape,
box2box_transform=Box2BoxTransform(weights=bbox_reg_weights),
loss_weight={
"loss_cls": 1.5,
"loss_box_reg": cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT
},
)
)
ret["box_predictors"] = box_predictors
ret["box_in_features"] = cfg.MODEL.RPN.IN_FEATURES
ret["box_pooler"] = get_aligned_pooler(
cfg.MODEL.RPN, input_shape,
output_size=cfg.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION,
sampling_ratio=cfg.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO,
)
return ret
def _match_and_label_boxes(self, proposals, stage, targets):
return label_and_sample_proposals(self, proposals, targets, False, False, stage) | 40.373333 | 88 | 0.674042 | 2,513 | 0.829921 | 0 | 0 | 2,575 | 0.850396 | 0 | 0 | 197 | 0.065059 |
689ca4570dd678af10ee1eb645c837983c0d78c1 | 350 | py | Python | TF/tensorflow--tutorial/tf.contrib.data.py | STHSF/DeepLearning | af91fced5af87f69915bb2674229d5f6461f553d | [
"MIT"
] | 6 | 2016-09-13T05:50:05.000Z | 2020-01-03T06:34:53.000Z | TF/tensorflow--tutorial/tf.contrib.data.py | STHSF/DeepLearning | af91fced5af87f69915bb2674229d5f6461f553d | [
"MIT"
] | null | null | null | TF/tensorflow--tutorial/tf.contrib.data.py | STHSF/DeepLearning | af91fced5af87f69915bb2674229d5f6461f553d | [
"MIT"
] | 6 | 2016-09-28T02:55:35.000Z | 2019-05-31T02:51:43.000Z | # coding=utf-8
# tensorflow tf.contrib.data api test
import tensorflow as tf
# file path
filename = ''
batch_size = 100
aa = (tf.contrib.data.TextLineDataset(filename)
.map((lambda line: tf.decode_csv(line, record_defaults=[['1'], ['1'], ['1']], field_delim='\t')))
.shuffle(buffer_size=1000)
.batch_size(batch_size)
)
| 18.421053 | 103 | 0.651429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.22 |
689cd75c21771ffe0186b10faeb2d7cc34833c06 | 1,333 | py | Python | 哥伦布(STM32F407)/4.拓展实验/3.音频播放/01-物理按键版/main.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
] | 73 | 2020-05-02T13:48:27.000Z | 2022-03-26T13:15:10.000Z | 哥伦布(STM32F407)/4.拓展实验/3.音频播放/01-物理按键版/main.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
] | null | null | null | 哥伦布(STM32F407)/4.拓展实验/3.音频播放/01-物理按键版/main.py | 01studio-lab/MicroPython_Examples | f06a1bee398674ceafebed2aac88d8413cc8abad | [
"MIT"
] | 50 | 2020-05-15T13:57:28.000Z | 2022-03-30T14:03:33.000Z | '''
实验名称:音频播放
版本:v1.0
日期:2020.12
作者:01Studio
说明:MP3/WAV音频文件播放。使用物理按键控制
'''
#导入相关模块
import audio,time
from pyb import Switch
from machine import Pin
#构建音频对象
wm=audio.WM8978()
vol = 80 #音量初始化,80
######################
# 播放 USR按键
######################
play_flag = 0
def music_play():
global play_flag
play_flag = 1
sw =Switch()
sw.callback(music_play)
######################
# 音量加 A0按键
######################
VOL_U = Pin('A0',Pin.IN,Pin.PULL_UP) #构建按键A0
vol_up_flag = 0
def vol_up(VOL_U):
global vol
#消除按键抖动
if VOL_U.value() == 0:
time.sleep_ms(10)
if VOL_U.value() == 0:
vol=vol+10
if vol > 100:
vol = 100
wm.volume(vol)
VOL_U.irq(vol_up,Pin.IRQ_FALLING, hard=1) #定义中断,下降沿触发
######################
# 音量减 E3按键
######################
VOL_D = Pin('E3',Pin.IN,Pin.PULL_UP) #构建按键A0
vol_down_flag = 0
def vol_down(VOL_D):
global vol
#消除按键抖动
if VOL_D.value() == 0:
time.sleep_ms(10)
if VOL_D.value() == 0:
vol=vol-10
if vol < 10:
vol = 10
wm.volume(vol)
VOL_D.irq(vol_down,Pin.IRQ_FALLING, hard=1) #定义中断,下降沿触发
#加载音乐
wm.load('/flash/music/Seasons In The Sun.mp3')
while True:
#播放音乐
if play_flag == 1:
wm.play()
play_flag = 0
| 15.147727 | 55 | 0.523631 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 596 | 0.380831 |
689f3d8878428c818087147e65c24430b3c5c043 | 942 | py | Python | mir_navigation/nodes/min_max_finder.py | xneomac/mir_robot | 0041b41bb661d32a34c266d0535acf1c78cf61b9 | [
"BSD-3-Clause"
] | 3 | 2020-07-31T05:15:59.000Z | 2020-11-30T03:49:43.000Z | mir_navigation/nodes/min_max_finder.py | xneomac/mir_robot | 0041b41bb661d32a34c266d0535acf1c78cf61b9 | [
"BSD-3-Clause"
] | null | null | null | mir_navigation/nodes/min_max_finder.py | xneomac/mir_robot | 0041b41bb661d32a34c266d0535acf1c78cf61b9 | [
"BSD-3-Clause"
] | 9 | 2019-09-17T21:18:01.000Z | 2021-04-21T11:28:24.000Z | #!/usr/bin/env python
import rospy
from nav_msgs.msg import Odometry
lin_min = 0.0
lin_max = 0.0
ang_min = 0.0
ang_max = 0.0
def odom_cb(msg):
global lin_min, lin_max, ang_min, ang_max
if lin_min > msg.twist.twist.linear.x:
lin_min = msg.twist.twist.linear.x
if lin_max < msg.twist.twist.linear.x:
lin_max = msg.twist.twist.linear.x
if ang_min > msg.twist.twist.angular.z:
ang_min = msg.twist.twist.angular.z
if ang_max < msg.twist.twist.angular.z:
ang_max = msg.twist.twist.angular.z
rospy.loginfo('linear: [%f, %f] angular: [%f, %f]', lin_min, lin_max,
ang_min, ang_max)
def main():
rospy.init_node('min_max_finder', anonymous=True)
rospy.Subscriber('odom_comb', Odometry, odom_cb)
rospy.loginfo('min_max_finde node ready and listening. now use teleop to move your robot to the limits!')
rospy.spin()
if __name__ == '__main__':
main()
| 26.166667 | 109 | 0.663482 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 186 | 0.197452 |
68a03991604ba0d78b82b80d7e17b446d2af4cc2 | 5,076 | py | Python | classes.py | cheng-zeng35/lyricsync | 19a4dde9eee543b822a93f9ccf613252dc1ad067 | [
"MIT"
] | null | null | null | classes.py | cheng-zeng35/lyricsync | 19a4dde9eee543b822a93f9ccf613252dc1ad067 | [
"MIT"
] | null | null | null | classes.py | cheng-zeng35/lyricsync | 19a4dde9eee543b822a93f9ccf613252dc1ad067 | [
"MIT"
] | null | null | null | # this file is to store all custom classes
import tkinter as tk
# class to store tkinter window properties
# font: tk font dictionary {family, size, weight, slant, underline, overstrike}
# font color: string
# nrows: the number of rows of lyric displayed (integer greater than 0)
# width: window width (int greater than 0)
# transparency: window transparency level (0.2 to 1)
# bg_color: [transparency: 1 or 2, background color (string), transparent color (string)]
# bd: border width in integers
# on_top: whether window is kept on top of all other windows (Boolean)
# x_pos, y_pos: window's x and y coordinates in pixels
class WindowProperties:
def __init__(self, font, font_color, font_color_bg, nrows, width, transparency, bg_color, bd, on_top, x_pos, y_pos):
self.font = font
self.font_color = font_color
self.font_color_bg = font_color_bg
self.nrows = nrows
self.width = width
self.transparency = transparency
self.bg_color = bg_color
self.bd = bd
self.on_top = on_top
self.x_pos = x_pos
self.y_pos = y_pos
def save(self, file_path):
with open(file_path, 'w') as f:
f.write(str(self.font['family']) + '\n')
f.write(str(self.font['size']) + '\n')
f.write(str(self.font['weight']) + '\n')
f.write(str(self.font['slant']) + '\n')
f.write(str(self.font['underline']) + '\n')
f.write(str(self.font['overstrike']) + '\n')
f.write(str(self.font_color) + '\n')
f.write(str(self.font_color_bg) + '\n')
f.write(str(self.nrows) + '\n')
f.write(str(self.width) + '\n')
f.write(str(self.transparency) + '\n')
f.write(str(self.bg_color) + '\n')
f.write(str(self.bd) + '\n')
f.write(str(self.on_top) + '\n')
f.write(str(self.x_pos) + '\n')
f.write(str(self.y_pos) + '\n')
# helper function for title bar to save setting and then close window
def close_root(root, win_properties):
win_properties.save('cache/user_setting.txt')
root.destroy()
# custom title bar class
class TitleBar:
# initialization takes x starting position, y starting position, and window
def __init__(self, last_click_x, last_click_y, root, win_properties):
# initialize title_bar, close button, and label
self.title_bar = tk.Frame(root, bg='#2e2e2e', relief='groove', bd=0, highlightthickness=0)
self.close_button = tk.Button(self.title_bar, text='×', bg="#2e2e2e", padx=5, activebackground='red',
bd=0, font="bold", fg='white', command=lambda: close_root(root, win_properties))
self.close_button.grid(row=0, column=1, sticky='E')
self.title_text = tk.Label(self.title_bar, text='', bg='#2e2e2e', padx=5, fg='white')
self.title_text.grid(row=0, column=0, sticky='W')
self.title_bar.grid_columnconfigure(0, weight=1)
# bind closing and drag
self.last_click_x = last_click_x
self.last_click_y = last_click_y
self.title_bar.bind('<Button-1>', self.save_last_click)
self.title_bar.bind('<B1-Motion>', lambda event: self.drag(event, root, win_properties))
self.title_text.bind('<Button-1>', self.save_last_click)
self.title_text.bind('<B1-Motion>', lambda event: self.drag(event, root, win_properties))
# update title function
def title(self, title_text):
self.title_text.config(text=title_text)
# update last position to help with drag function
def save_last_click(self, event):
self.last_click_x = event.x
self.last_click_y = event.y
# drag function
def drag(self, event, root, win_properties):
x, y = event.x - self.last_click_x + root.winfo_x(), event.y - self.last_click_y + root.winfo_y()
root.geometry("+%s+%s" % (x, y))
win_properties.x_pos = x
win_properties.y_pos = y
# class to store song information
# song: Spotify current song data (see spotify_func.py for format)
# lyric: current lyric used (lrc file in string format)
# search_result: list of lyric results scraped from website ([[song, link, singer]..])
# lyric_offset: number of ms to offset lyrics by when displayed (integer)
# nlyric: the lyric currently being used from search_result (integer between 0 and len(search_result) - 1)
# dynamic lyric position: to track which letter should be highlighted a different color, integer
# dynamic lyric duration: to track how frequent the lyric update function should be refreshed, in ms
# lyric_original: original lyric (not formatted) to be saved to cache
class SongProperties:
def __init__(self, songx, lyric_f, lyric_o, search_resultx, lyric_offsetx, nlyricx):
self.song = songx
self.lyric = lyric_f
self.search_result = search_resultx
self.lyric_offset = lyric_offsetx
self.nlyric = nlyricx
self.dynamic_lyric_pos = 0
self.dynamic_lyric_duration = 100
self.lyric_original = lyric_o
| 45.72973 | 120 | 0.656816 | 3,548 | 0.698838 | 0 | 0 | 0 | 0 | 0 | 0 | 1,873 | 0.368919 |
68a37f7b4dbe79a75b57e0f1d831de618aa2079e | 563 | py | Python | test/unit/unittest_utils/utility.py | tsungjui/fusionline | 26d5d41e82ac83822ba41df1cd14c54afa112655 | [
"CC-BY-3.0"
] | 1 | 2019-11-03T11:45:43.000Z | 2019-11-03T11:45:43.000Z | test/unit/unittest_utils/utility.py | tsungjui/fusionline | 26d5d41e82ac83822ba41df1cd14c54afa112655 | [
"CC-BY-3.0"
] | 4 | 2017-05-24T19:36:34.000Z | 2019-08-23T02:49:18.000Z | test/unit/unittest_utils/utility.py | abretaud/galaxy | 1ad89511540e6800cd2d0da5d878c1c77d8ccfe9 | [
"CC-BY-3.0"
] | null | null | null | """
Unit test utilities.
"""
import textwrap
def clean_multiline_string( multiline_string, sep='\n' ):
"""
Dedent, split, remove first and last empty lines, rejoin.
"""
multiline_string = textwrap.dedent( multiline_string )
string_list = multiline_string.split( sep )
if not string_list[0]:
string_list = string_list[1:]
if not string_list[-1]:
string_list = string_list[:-1]
# return '\n'.join( docstrings )
return ''.join([ ( s + '\n' ) for s in string_list ])
__all__ = (
"clean_multiline_string",
)
| 23.458333 | 61 | 0.641208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.296625 |
68a4b52bb7b1fdaa9592b0d8a5eb274cac1c955e | 1,387 | py | Python | tests/test_chunk_messages.py | UncleGoogle/galaxy-integrations-python-api | 49ae2beab9cc48e55d7a5e90e39d43789fb823db | [
"MIT"
] | null | null | null | tests/test_chunk_messages.py | UncleGoogle/galaxy-integrations-python-api | 49ae2beab9cc48e55d7a5e90e39d43789fb823db | [
"MIT"
] | null | null | null | tests/test_chunk_messages.py | UncleGoogle/galaxy-integrations-python-api | 49ae2beab9cc48e55d7a5e90e39d43789fb823db | [
"MIT"
] | null | null | null | import asyncio
import json
def test_chunked_messages(plugin, read):
request = {
"jsonrpc": "2.0",
"method": "install_game",
"params": {
"game_id": "3"
}
}
message = json.dumps(request).encode() + b"\n"
read.side_effect = [message[:5], message[5:], b""]
asyncio.run(plugin.run())
plugin.install_game.assert_called_with(game_id="3")
def test_joined_messages(plugin, read):
requests = [
{
"jsonrpc": "2.0",
"method": "install_game",
"params": {
"game_id": "3"
}
},
{
"jsonrpc": "2.0",
"method": "launch_game",
"params": {
"game_id": "3"
}
}
]
data = b"".join([json.dumps(request).encode() + b"\n" for request in requests])
read.side_effect = [data, b""]
asyncio.run(plugin.run())
plugin.install_game.assert_called_with(game_id="3")
plugin.launch_game.assert_called_with(game_id="3")
def test_not_finished(plugin, read):
request = {
"jsonrpc": "2.0",
"method": "install_game",
"params": {
"game_id": "3"
}
}
message = json.dumps(request).encode() # no new line
read.side_effect = [message, b""]
asyncio.run(plugin.run())
plugin.install_game.assert_not_called()
| 25.218182 | 83 | 0.522711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 267 | 0.192502 |
68a4f3241cf1bad984e99e344329235393aad6ba | 2,731 | py | Python | test/Crawler/Fs/Image/ImageCrawlerTest.py | paulondc/chilopoda | 046dbb0c1b4ff20ea5f2e1679f8d89f3089b6aa4 | [
"MIT"
] | 2 | 2019-09-24T18:56:27.000Z | 2021-02-07T04:58:49.000Z | test/Crawler/Fs/Image/ImageCrawlerTest.py | paulondc/kombi | 046dbb0c1b4ff20ea5f2e1679f8d89f3089b6aa4 | [
"MIT"
] | 20 | 2019-02-16T04:21:13.000Z | 2019-03-09T21:21:21.000Z | test/Crawler/Fs/Image/ImageCrawlerTest.py | paulondc/kombi | 046dbb0c1b4ff20ea5f2e1679f8d89f3089b6aa4 | [
"MIT"
] | 3 | 2019-11-15T05:16:32.000Z | 2021-09-28T21:28:29.000Z | import os
import unittest
from ....BaseTestCase import BaseTestCase
from kombi.Crawler import Crawler
from kombi.Crawler.Fs.Image import ImageCrawler
from kombi.Crawler.PathHolder import PathHolder
class ImageCrawlerTest(BaseTestCase):
"""Test Image crawler."""
__singleFile = os.path.join(BaseTestCase.dataTestsDirectory(), "test.dpx")
__sequenceFile = os.path.join(BaseTestCase.dataTestsDirectory(), "testSeq.0001.exr")
def testSingleImage(self):
"""
Test that the crawler created for a single image is based on the image crawler.
"""
crawler = Crawler.create(PathHolder(self.__singleFile))
self.assertIsInstance(crawler, ImageCrawler)
def testSequenceImage(self):
"""
Test that the crawler created for a sequence image is based on the image crawler.
"""
crawler = Crawler.create(PathHolder(self.__sequenceFile))
self.assertIsInstance(crawler, ImageCrawler)
def testGroupTagSequence(self):
"""
Test that the tag group has been assigned to the image sequence crawler.
"""
crawler = Crawler.create(PathHolder(self.__sequenceFile))
self.assertIn('group', crawler.tagNames())
self.assertEqual(crawler.tag('group'), "testSeq.####.exr")
def testGroupSprintfTagSequence(self):
"""
Test that the tag groupSprintf has been assigned to the image sequence crawler.
"""
crawler = Crawler.create(PathHolder(self.__sequenceFile))
self.assertIn('groupSprintf', crawler.tagNames())
self.assertEqual(crawler.tag('groupSprintf'), "testSeq.%04d.exr")
def testGroupTagSingle(self):
"""
Test that the tag group has not been assigned to a single image crawler.
"""
crawler = Crawler.create(PathHolder(self.__singleFile))
self.assertNotIn('group', crawler.tagNames())
def testGroupSprintfTagSingle(self):
"""
Test that the tag groupSprintf has not been assigned to a single image crawler.
"""
crawler = Crawler.create(PathHolder(self.__singleFile))
self.assertNotIn('groupSprintf', crawler.tagNames())
def testIsSequence(self):
"""
Test if a crawler is a sequence.
"""
singleCrawler = Crawler.create(PathHolder(self.__singleFile))
sequenceCrawler = Crawler.create(PathHolder(self.__sequenceFile))
self.assertEqual(singleCrawler.isSequence(), False)
self.assertEqual(singleCrawler.var("imageType"), "single")
self.assertEqual(sequenceCrawler.isSequence(), True)
self.assertEqual(sequenceCrawler.var("imageType"), "sequence")
if __name__ == "__main__":
unittest.main()
| 36.905405 | 89 | 0.677774 | 2,482 | 0.908825 | 0 | 0 | 0 | 0 | 0 | 0 | 864 | 0.316368 |
68a865bda85a4ed7b4aebbb6e21920d35b0aaa7a | 3,688 | py | Python | test/test_websocket_server.py | markkorput/pyRemoteParams | b96cc3d694f5715af40cf71865e7c2f550897a81 | [
"MIT"
] | null | null | null | test/test_websocket_server.py | markkorput/pyRemoteParams | b96cc3d694f5715af40cf71865e7c2f550897a81 | [
"MIT"
] | 2 | 2020-03-13T07:14:36.000Z | 2020-05-22T09:54:08.000Z | test/test_websocket_server.py | markkorput/pyRemoteParams | b96cc3d694f5715af40cf71865e7c2f550897a81 | [
"MIT"
] | null | null | null |
#!/usr/bin/env python
import unittest, asyncio, asynctest, websockets, json
from remote_params import HttpServer, Params, Server, Remote, create_sync_params, schema_list
from remote_params.WebsocketServer import WebsocketServer
class MockSocket:
def __init__(self):
self.close_count = 0
self.msgs = []
def close(self):
self.close_count += 1
async def send(self, msg):
self.msgs.append(msg)
class TestWebsocketServer(asynctest.TestCase):
def setUp(self):
self.params = params = Params()
self.p1 = params.int('some_int')
self.p1.set(0)
self.wss = WebsocketServer(Server(self.params), start=False)
def tearDown(self):
self.wss.stop()
def test_default_port(self):
self.assertEqual(self.wss.port, 8081)
async def test_connects_only_one_remote(self):
self.assertEqual(len(self.wss.server.connected_remotes), 0)
await self.wss.start_async()
self.assertEqual(len(self.wss.server.connected_remotes), 1)
uri = f'ws://localhost:{self.wss.port}'
async with websockets.connect(uri) as websocket:
self.assertEqual(len(self.wss.server.connected_remotes), 1)
async with websockets.connect(uri) as websocket:
self.assertEqual(len(self.wss.server.connected_remotes), 1)
self.assertEqual(len(self.wss.server.connected_remotes), 1)
self.assertEqual(len(self.wss.server.connected_remotes), 1)
self.wss.stop()
self.assertEqual(len(self.wss.server.connected_remotes), 0)
async def test_incoming_value(self):
await self.wss._onMessage(f'POST /some_int?value={3}', None)
self.assertEqual(self.p1.value, 0) # server not started
await self.wss.start_async()
await self.wss._onMessage(f'POST /some_int?value={4}', None)
self.assertEqual(self.p1.value, 4) # param changed
await self.wss._onMessage(f'POST /wrong_int?value={5}', None)
self.assertEqual(self.p1.value, 4) # wrong url
self.wss.stop()
await self.wss._onMessage(f'POST /wrong_int?value={6}', None)
self.assertEqual(self.p1.value, 4) # server stopped
async def test_stop_message(self):
mocksock = MockSocket()
await self.wss._onMessage('stop', mocksock)
self.assertEqual(mocksock.close_count, 1)
async def test_responds_to_schema_request_with_schema_json(self):
mocksocket = MockSocket()
await self.wss._onMessage(f'GET schema.json', mocksocket)
# verify responded with schema json
self.assertEqual(mocksocket.msgs, [
f'POST schema.json?schema={json.dumps(schema_list(self.params))}'
])
async def test_broadcasts_value_changes(self):
await self.wss.start_async()
# connect client
uri = f'ws://127.0.0.1:{self.wss.port}'
async with websockets.connect(uri) as ws:
# receive welcome message
msg = await ws.recv()
self.assertEqual(msg, 'welcome to pyRemoteParams websockets')
# change parameter value
self.p1.set(2)
# receive parameter value change
msg = await ws.recv()
self.assertEqual(msg, 'POST /some_int?value=2')
async def test_broadcasts_schema_change(self):
await self.wss.start_async()
# connect client
uri = f'ws://127.0.0.1:{self.wss.port}'
async with websockets.connect(uri) as ws:
# receive welcome message
msg = await ws.recv()
self.assertEqual(msg, 'welcome to pyRemoteParams websockets')
# change schema layout value
self.params.string('name')
# receive parameter value change
msg = await ws.recv()
self.assertEqual(msg, f'POST schema.json?schema={json.dumps(schema_list(self.params))}')
# run just the tests in this file
if __name__ == '__main__':
unittest.main()
| 31.521368 | 94 | 0.699024 | 3,372 | 0.914317 | 0 | 0 | 0 | 0 | 2,873 | 0.779013 | 838 | 0.227223 |
68a90bb15946847753f138179d4f2bda0c5abd53 | 3,592 | py | Python | analyzer/com_classfiction.py | mt-group-1/social-react | 8b3b75b08c595c99ab61da3c76b4916a524f75b0 | [
"MIT"
] | null | null | null | analyzer/com_classfiction.py | mt-group-1/social-react | 8b3b75b08c595c99ab61da3c76b4916a524f75b0 | [
"MIT"
] | 4 | 2021-11-08T20:48:42.000Z | 2021-11-11T21:38:16.000Z | analyzer/com_classfiction.py | mt-group-1/social-react | 8b3b75b08c595c99ab61da3c76b4916a524f75b0 | [
"MIT"
] | null | null | null | import warnings
import pandas as pd
from nltk.sentiment.vader import SentimentIntensityAnalyzer
warnings.filterwarnings("ignore")
def classify_comments(text_file, page_name):
"""
Description:
This function recives a text file and convert it into csv file to enable to label the comments inside that file, also this function use the nltk library whuch called vader to be enable to give percentages for positive,negative and neutral impact
Args:
text_file:text file
Returns:
DataFrames: contains classified data with positive | negative | nutral labels for each comment
"""
# nltk.download("vader_lexicon")
df = pd.read_csv("%s" % text_file, names=["comments"], sep="\t")
# Cleaning data from emails,number and special characters to be more accurate
df["comments"] = df["comments"].str.replace("^\d+\s|\s\d+\s|\s\d+$", " ")
df["comments"] = df["comments"].str.replace('"', "")
df["comments"] = df["comments"].str.replace("*", "")
df["comments"] = df["comments"].str.replace("/[^@\s]*@[^@\s]*\.[^@\s]*/", "")
df["comments"] = df["comments"].str.replace(
'"/[a-zA-Z]*[:\/\/]*[A-Za-z0-9\-_]+\.+[A-Za-z0-9\.\/%&=\?\-_]+/i"', ""
)
df["comments"] = df["comments"].str.replace(
"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))",
"",
)
df["comments"] = df["comments"].str.replace("https://", "")
df["comments"] = df["comments"].str.replace(r"\d+(\.\d+)?", "")
sid = SentimentIntensityAnalyzer()
new_words = {
"over": -0.5,
"garbage": -2.0,
"dumpster": -3.1,
":(": -1,
"refuses": -1,
"down": -1,
"crashed": -2,
"Amen": 1,
"Available": 1,
"#Save": 1,
"always": 0.5,
}
sid.lexicon.update(new_words)
# Create new coloums for positive and negative percentages
df["impactPers"] = df["comments"].apply(
lambda comments: sid.polarity_scores(comments)
)
df["posPers"] = df["impactPers"].apply(lambda score_dict: score_dict["pos"])
df["negPers"] = df["impactPers"].apply(lambda score_dict: score_dict["neg"])
df["neuPers"] = df["impactPers"].apply(lambda score_dict: score_dict["neu"])
df["comPers"] = df["impactPers"].apply(lambda score_dict: score_dict["compound"])
# Labeling the data depending on the above persentages
def label_race(row):
"""
This is a helper function that gives a positive or negative impact for each comment based on the persentages
Args:
row :String
Returns:
String (N) or Integer
"""
if row["comPers"] >= 0.02:
return 1
elif row["comPers"] <= -0.02:
return 0
else:
return "N"
# Create new coloumn for the final labels
df["labels"] = df.apply(lambda row: label_race(row), axis=1)
# Create new file containing two coloumns
new_df = df[["comments", "labels"]]
create_dir(page_name)
new_df.to_csv("./data/%s/classified_comments.txt" % page_name)
return new_df
def create_dir(page_name):
"""
make a new directory for non-existing page data directory
Args:
page_name (str)
Returns:
[boolen]: return True if the directory not exist and make it
return False if the directory exist
"""
import os
dir_path = "./data/%s" % page_name.lower()
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
return True
else:
return False
| 32.654545 | 249 | 0.584633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,942 | 0.540646 |
68aaa854df265e745f3f5094661f5862c8bd4125 | 846 | py | Python | tests/test_android_here.py | b3b/pythonhere | edd45b4f5087d63e1bf265ec6a7c6ade23580de3 | [
"MIT"
] | 2 | 2021-01-06T19:38:25.000Z | 2022-01-25T10:00:20.000Z | tests/test_android_here.py | b3b/pythonhere | edd45b4f5087d63e1bf265ec6a7c6ade23580de3 | [
"MIT"
] | 1 | 2021-01-04T10:44:12.000Z | 2021-01-04T10:44:12.000Z | tests/test_android_here.py | b3b/pythonhere | edd45b4f5087d63e1bf265ec6a7c6ade23580de3 | [
"MIT"
] | null | null | null | import pytest
@pytest.mark.parametrize(
"script", (None, "test.py"),
)
def test_restart_app(mocked_android_modules, app_instance, test_py_script, script):
from android_here import restart_app
restart_app(script)
def test_script_path_resolved(mocked_android_modules, app_instance, test_py_script):
from android_here import resolve_script_path
path = resolve_script_path("test.py")
assert path.startswith("/") and path.endswith("test.py")
def test_absolute_script_path_resolved(mocked_android_modules, app_instance, test_py_script):
from android_here import resolve_script_path
assert resolve_script_path(test_py_script) == test_py_script
def test_pin_shortcut(mocker, mocked_android_modules, app_instance, test_py_script):
from android_here import pin_shortcut
pin_shortcut("test.py", "test label")
| 32.538462 | 93 | 0.795508 | 0 | 0 | 0 | 0 | 209 | 0.247045 | 0 | 0 | 59 | 0.06974 |
68aff5ecfb731da2cc7a3de58df5147f26f41df7 | 346 | py | Python | Example/facenet_example.py | generalized-intelligence/Tegu | 99394f62f2acdeed9e985685811e76f94ab7ac11 | [
"BSD-3-Clause"
] | 11 | 2019-01-29T07:01:38.000Z | 2021-07-28T06:25:49.000Z | Example/facenet_example.py | generalized-intelligence/Tegu | 99394f62f2acdeed9e985685811e76f94ab7ac11 | [
"BSD-3-Clause"
] | 23 | 2019-02-18T13:35:09.000Z | 2022-03-11T23:42:11.000Z | Example/facenet_example.py | generalized-intelligence/Tegu | 99394f62f2acdeed9e985685811e76f94ab7ac11 | [
"BSD-3-Clause"
] | 3 | 2019-02-12T08:09:45.000Z | 2019-06-06T02:28:55.000Z | import sys
sys.path.append('..')
from Network.facenet.API import build_face_manager, detetion_and_recongnize
'''
You have to build face manager before you start detection and recongnize.
'''
face_manager = build_face_manager(r"face/dataset/path")
result = detetion_and_recongnize(face_manager, r"images/path/you/want/to/detect")
print(result)
| 28.833333 | 81 | 0.794798 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.398844 |
68b1fd87e78b3220b657c40bcdf0a4d61bd25d07 | 149 | py | Python | Fundamentos/mundo_pc/dispositivo_entrada.py | ijchavez/python | bccd94a9bee90125e2be27b0355bdaedb0ae9d19 | [
"Unlicense"
] | null | null | null | Fundamentos/mundo_pc/dispositivo_entrada.py | ijchavez/python | bccd94a9bee90125e2be27b0355bdaedb0ae9d19 | [
"Unlicense"
] | null | null | null | Fundamentos/mundo_pc/dispositivo_entrada.py | ijchavez/python | bccd94a9bee90125e2be27b0355bdaedb0ae9d19 | [
"Unlicense"
] | null | null | null | class DispositivoEntrada:
def __init__(self, marca, tipo_entrada):
self._marca = marca
self.tipo_entrada = tipo_entrada
| 24.833333 | 44 | 0.657718 | 139 | 0.932886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
68b43353f299ef2ed5654c73d7e6db582ed331bb | 132 | py | Python | src/clarityv2/homepage/urls.py | Clarity-89/clarityv2 | 7c4af765c5473778fc0750d1505f8b5f1724e5c5 | [
"MIT"
] | null | null | null | src/clarityv2/homepage/urls.py | Clarity-89/clarityv2 | 7c4af765c5473778fc0750d1505f8b5f1724e5c5 | [
"MIT"
] | 1 | 2022-03-11T23:38:16.000Z | 2022-03-11T23:38:16.000Z | src/clarityv2/homepage/urls.py | Clarity-89/clarityv2 | 7c4af765c5473778fc0750d1505f8b5f1724e5c5 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import HomepageView
urlpatterns = [
path('', HomepageView.as_view(), name='home'),
]
| 16.5 | 50 | 0.704545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.060606 |
68b51c475fa96363f2601bbdb5efa8957c5f2363 | 5,394 | py | Python | kolibri/plugins/utils/options.py | MBKayro/kolibri | 0a38a5fb665503cf8f848b2f65938e73bfaa5989 | [
"MIT"
] | 545 | 2016-01-19T19:26:55.000Z | 2022-03-20T00:13:04.000Z | kolibri/plugins/utils/options.py | MBKayro/kolibri | 0a38a5fb665503cf8f848b2f65938e73bfaa5989 | [
"MIT"
] | 8,329 | 2016-01-19T19:32:02.000Z | 2022-03-31T21:23:12.000Z | kolibri/plugins/utils/options.py | MBKayro/kolibri | 0a38a5fb665503cf8f848b2f65938e73bfaa5989 | [
"MIT"
] | 493 | 2016-01-19T19:26:48.000Z | 2022-03-28T14:35:05.000Z | import copy
import logging
import warnings
from kolibri.plugins.registry import registered_plugins
logger = logging.getLogger(__name__)
def __validate_config_option(
section, name, base_config_spec, plugin_specs, module_path
):
# Raise an error if someone tries to overwrite a base option
# except for the default value.
if section in base_config_spec:
if name in base_config_spec[section]:
raise ValueError("Cannot overwrite a core Kolibri options spec option")
# Warn if a plugin tries to add an option that another plugin has already added
if section in plugin_specs:
if name in plugin_specs[section]:
warnings.warn(
"{plugin} set an option {option} in section {section} but {plugins} had already set it".format(
plugin=module_path,
plugins=", ".join(plugin_specs[section][name]),
option=name,
section=section,
)
)
plugin_specs[section][name].append(module_path)
else:
# If not create the list for this option name
# to track this and future modifications
plugin_specs[section][name] = [module_path]
else:
# If not create the dict for the section
# and the list for this option name
plugin_specs[section] = {name: [module_path]}
def __process_config_spec(
option_spec, base_config_spec, plugin_specs, module_path, final_spec
):
for section, opts in option_spec.items():
for name, attrs in opts.items():
__validate_config_option(
section, name, base_config_spec, plugin_specs, module_path
)
if section not in final_spec:
final_spec[section] = {}
final_spec[section][name] = attrs
def __validate_option_default(section, name, plugin_default_overrides, module_path):
# Warn if a plugin tries to add an option that another plugin has already added
if section in plugin_default_overrides:
if name in plugin_default_overrides[section]:
warnings.warn(
"{plugin} set an option default {option} in section {section} but {plugins} had already set it".format(
plugin=module_path,
plugins=", ".join(plugin_default_overrides[section][name]),
option=name,
section=section,
)
)
plugin_default_overrides[section][name].append(module_path)
else:
# If not create the list for this option name
# to track this and future modifications
plugin_default_overrides[section][name] = [module_path]
else:
# If not create the dict for the section
# and the list for this option name
plugin_default_overrides[section] = {name: [module_path]}
def __process_option_defaults(
option_defaults, base_config_spec, plugin_default_overrides, module_path, final_spec
):
for section, opts in option_defaults.items():
for name, default in opts.items():
__validate_option_default(
section, name, plugin_default_overrides, module_path
)
if section not in final_spec:
logger.error(
"Tried to set a new default in section {}, but this is not a valid section".format(
section
)
)
continue
if name in final_spec[section]:
# This is valid, so set a default
# Note that we do not validation here for now,
# so it is up to the user to ensure the default value
# is kosher.
final_spec[section][name]["default"] = default
else:
logger.error(
"Tried to set a new default in section {}, for option {} but this is not a valid option".format(
section, name
)
)
def extend_config_spec(base_config_spec):
plugin_specs = {}
final_spec = copy.deepcopy(base_config_spec)
# First process options config spec additions
for plugin_instance in registered_plugins:
plugin_options = plugin_instance.options_module
if plugin_options and hasattr(plugin_options, "option_spec"):
module_path = plugin_instance.module_path
option_spec = plugin_options.option_spec
__process_config_spec(
option_spec, base_config_spec, plugin_specs, module_path, final_spec
)
# Now process default value overrides, do this second in order to allow plugins
# to override default values for other plugins!
plugin_default_overrides = {}
for plugin_instance in registered_plugins:
plugin_options = plugin_instance.option_defaults_module
if plugin_options and hasattr(plugin_options, "option_defaults"):
module_path = plugin_instance.module_path
option_defaults = plugin_options.option_defaults
__process_option_defaults(
option_defaults,
base_config_spec,
plugin_default_overrides,
module_path,
final_spec,
)
return final_spec
| 40.253731 | 119 | 0.616982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,329 | 0.246385 |
68b5318b5c1be79ea4655397f748e0742a2d9190 | 2,909 | py | Python | utils.py | yzhhome/seq2seq_chatbot | 2dd135da0d390c19b48cd72dfe19d6b32f698e03 | [
"MIT"
] | null | null | null | utils.py | yzhhome/seq2seq_chatbot | 2dd135da0d390c19b48cd72dfe19d6b32f698e03 | [
"MIT"
] | null | null | null | utils.py | yzhhome/seq2seq_chatbot | 2dd135da0d390c19b48cd72dfe19d6b32f698e03 | [
"MIT"
] | null | null | null | from io import open
import time
import math
import torch
import torch.nn.functional as F
from config import MAX_LENGTH
from config import SOS_token
from config import EOS_token
from config import device
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}#首尾符号
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
def readLangs(lang1, lang2, reverse=False):
# 读取数据放入列表
lines = open('data/chatdata_all.txt', encoding='utf-8').\
read().strip().split('\n')
# 数据处理,处理成一对对的样本
pairs = [[s for s in l.split('@@')] for l in lines]
print(pairs)
# Reverse
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
def filterPair(p):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
def prepareData(lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
#句子转index
def indexesFromSentence(lang, sentence):
return [lang.word2index[word] for word in sentence.split(' ')]
#句子转tensor
def tensorFromSentence(lang, sentence):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
#句子对转index
def tensorsFromPair(pair, input_lang, output_lang):
input_tensor = tensorFromSentence(input_lang, pair[0])
target_tensor = tensorFromSentence(output_lang, pair[1])
return (input_tensor, target_tensor)
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs)) | 28.80198 | 77 | 0.644208 | 629 | 0.211003 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.110366 |
68b5fc5d2f892e35cab73d5c07f10d1ff89a7241 | 1,941 | py | Python | src/archive-comments-lambda/main.py | knotsrepus/knotsrepus-archiver | 68384f24c6b8c3a45c39ac9720e10c26c1512279 | [
"Apache-2.0"
] | 1 | 2021-07-17T07:56:46.000Z | 2021-07-17T07:56:46.000Z | src/archive-comments-lambda/main.py | knotsrepus/knotsrepus-archiver | 68384f24c6b8c3a45c39ac9720e10c26c1512279 | [
"Apache-2.0"
] | null | null | null | src/archive-comments-lambda/main.py | knotsrepus/knotsrepus-archiver | 68384f24c6b8c3a45c39ac9720e10c26c1512279 | [
"Apache-2.0"
] | null | null | null | import asyncio
import json
import os
from datetime import datetime
from src.common import log_utils, pushshift
from src.common.filesystem import S3FileSystem, StubFileSystem
from src.common.lambda_context import local_lambda_invocation
def handler(event, context):
logger = log_utils.get_logger("archive-comments-lambda")
submission_id = event["Records"][0]["Sns"]["Message"]
if context is local_lambda_invocation:
filesystem = StubFileSystem()
else:
filesystem = S3FileSystem(os.environ.get("ARCHIVE_DATA_BUCKET"))
return asyncio.get_event_loop().run_until_complete(handle(submission_id, filesystem, logger))
async def handle(submission_id, filesystem, logger):
logger.info(f"Archiving comments for {submission_id}...")
comment_ids = await get_comment_ids(submission_id)
comments = await get_comments(comment_ids)
await filesystem.mkdir(submission_id)
data = json.dumps(comments, ensure_ascii=True, indent=4)
await filesystem.write(f"{submission_id}/comments.json", data)
return {
"statusCode": 200,
"body": json.dumps({
"submission_id": submission_id,
"last_updated": datetime.utcnow().timestamp()
})
}
async def get_comment_ids(submission_id):
comment_ids = await pushshift.request(f"submission/comment_ids/{submission_id}")
return comment_ids
async def get_comments(comment_ids):
if comment_ids is None:
return []
chunk_size = 256
id_chunks = [comment_ids[x: x + chunk_size] for x in range(0, len(comment_ids), chunk_size)]
comments = []
for id_chunk in id_chunks:
ids = ",".join(id_chunk)
chunk = await pushshift.request("search/comment", ids=ids)
comments.extend(chunk)
return comments
if __name__ == "__main__":
with open("event.json", "r") as file:
event = json.load(file)
handler(event, local_lambda_invocation)
| 26.958333 | 97 | 0.703246 | 0 | 0 | 0 | 0 | 0 | 0 | 1,133 | 0.58372 | 277 | 0.14271 |
68b67bfa73f83b95f8183d1aa57786bce152b7a4 | 11,578 | py | Python | cpc/criterion/clustering/clustering_quantization.py | anuragkumar95/CPC_audio | c771d07bb1cac4df947496813a4d07fd95e53f7f | [
"MIT"
] | 6 | 2021-04-27T06:19:15.000Z | 2021-11-04T20:22:46.000Z | cpc/criterion/clustering/clustering_quantization.py | anuragkumar95/CPC_audio | c771d07bb1cac4df947496813a4d07fd95e53f7f | [
"MIT"
] | 7 | 2021-02-05T11:40:54.000Z | 2021-04-06T12:45:54.000Z | cpc/criterion/clustering/clustering_quantization.py | anuragkumar95/CPC_audio | c771d07bb1cac4df947496813a4d07fd95e53f7f | [
"MIT"
] | 5 | 2021-04-29T06:45:00.000Z | 2021-11-23T19:51:02.000Z | import os
import sys
import json
import argparse
import progressbar
from pathlib import Path
from random import shuffle
from time import time
import torch
from cpc.dataset import findAllSeqs
from cpc.feature_loader import buildFeature, FeatureModule, loadModel, buildFeature_batch
from cpc.criterion.clustering import kMeanCluster
#from cpc.criterion.research.clustering import kMeanCluster
def readArgs(pathArgs):
print(f"Loading args from {pathArgs}")
with open(pathArgs, 'r') as file:
args = argparse.Namespace(**json.load(file))
return args
def loadClusterModule(pathCheckpoint, norm_vec_len=False):
print(f"Loading ClusterModule at {pathCheckpoint}")
state_dict = torch.load(pathCheckpoint)
if "state_dict" in state_dict: #kmeans
clusterModule = kMeanCluster(torch.zeros(1, state_dict["n_clusters"], state_dict["dim"]), norm_vec_len)
clusterModule.load_state_dict(state_dict["state_dict"])
else: #dpmeans
clusterModule = kMeanCluster(state_dict["mu"])
clusterModule = clusterModule.cuda()
return clusterModule
def parseArgs(argv):
# Run parameters
parser = argparse.ArgumentParser(description='Quantize audio files using CPC Clustering Module.')
parser.add_argument('pathCheckpoint', type=str,
help='Path to the clustering checkpoint.')
parser.add_argument('pathDB', type=str,
help='Path to the dataset that we want to quantize.')
parser.add_argument('pathOutput', type=str,
help='Path to the output directory.')
parser.add_argument('--pathSeq', type=str,
help='Path to the sequences (file names) to be included used.')
parser.add_argument('--split', type=str, default=None,
help="If you want to divide the dataset in small splits, specify it "
"with idxSplit-numSplits (idxSplit > 0), eg. --split 1-20.")
parser.add_argument('--file_extension', type=str, default=".flac",
help="Extension of the audio files in the dataset (default: .flac).")
parser.add_argument('--max_size_seq', type=int, default=10240,
help='Maximal number of frames to consider '
'when computing a batch of features (defaut: 10240).')
parser.add_argument('--batch_size', type=int, default=8,
help='Batch size used to compute features '
'when computing each file (defaut: 8).')
parser.add_argument('--strict', type=bool, default=True,
help='If activated, each batch of feature '
'will contain exactly max_size_seq frames (defaut: True).')
parser.add_argument('--debug', action='store_true',
help="Load only a very small amount of files for "
"debugging purposes.")
parser.add_argument('--nobatch', action='store_true',
help="Don't use batch implementation of when building features."
"NOTE: This can have better quantized units as we can set "
"model.gAR.keepHidden = True (line 162), but the quantization"
"will be a bit longer.")
parser.add_argument('--recursionLevel', type=int, default=1,
help='Speaker level in pathDB (defaut: 1). This is only helpful'
'when --separate-speaker is activated.')
parser.add_argument('--separate-speaker', action='store_true',
help="Separate each speaker with a different output file.")
parser.add_argument('--norm_vec_len', action='store_true',
help="Normalize vector lengths.")
return parser.parse_args(argv)
def main(argv):
# Args parser
args = parseArgs(argv)
print("=============================================================")
print(f"Quantizing data from {args.pathDB}")
print("=============================================================")
# Check if directory exists
if not os.path.exists(args.pathOutput):
print("")
print(f"Creating the output directory at {args.pathOutput}")
Path(args.pathOutput).mkdir(parents=True, exist_ok=True)
# Get splits
if args.split:
assert len(args.split.split("-"))==2 and int(args.split.split("-")[1]) >= int(args.split.split("-")[0]) >= 1, \
"SPLIT must be under the form idxSplit-numSplits (numSplits >= idxSplit >= 1), eg. --split 1-20"
idx_split, num_splits = args.split.split("-")
idx_split = int(idx_split)
num_splits = int(num_splits)
# Find all sequences
print("")
print(f"Looking for all {args.file_extension} files in {args.pathDB} with speakerLevel {args.recursionLevel}")
seqNames, speakers = findAllSeqs(args.pathDB,
speaker_level=args.recursionLevel,
extension=args.file_extension,
loadCache=True)
if args.pathSeq:
with open(args.pathSeq, 'r') as f:
seqs = set([x.strip() for x in f])
filtered = []
for s in seqNames:
if s[1].split('/')[-1].split('.')[0] in seqs:
filtered.append(s)
seqNames = filtered
print(f"Done! Found {len(seqNames)} files and {len(speakers)} speakers!")
if args.separate_speaker:
seqNames_by_speaker = {}
for seq in seqNames:
speaker = seq[1].split("/")[args.recursionLevel-1]
if speaker not in seqNames_by_speaker:
seqNames_by_speaker[speaker] = []
seqNames_by_speaker[speaker].append(seq)
# Check if output file exists
if not args.split:
nameOutput = "quantized_outputs.txt"
else:
nameOutput = f"quantized_outputs_split_{idx_split}-{num_splits}.txt"
if args.separate_speaker is False:
outputFile = os.path.join(args.pathOutput, nameOutput)
assert not os.path.exists(outputFile), \
f"Output file {outputFile} already exists !!!"
# Get splits
if args.split:
startIdx = len(seqNames) // num_splits * (idx_split-1)
if idx_split == num_splits:
endIdx = len(seqNames)
else:
endIdx = min(len(seqNames) // num_splits * idx_split, len(seqNames))
seqNames = seqNames[startIdx:endIdx]
print("")
print(f"Quantizing split {idx_split} out of {num_splits} splits, with {len(seqNames)} files (idx in range({startIdx}, {endIdx})).")
# Debug mode
if args.debug:
nsamples=20
print("")
print(f"Debug mode activated, only load {nsamples} samples!")
# shuffle(seqNames)
seqNames = seqNames[:nsamples]
# Load Clustering args
assert args.pathCheckpoint[-3:] == ".pt"
if os.path.exists(args.pathCheckpoint[:-3] + "_args.json"):
pathConfig = args.pathCheckpoint[:-3] + "_args.json"
elif os.path.exists(os.path.join(os.path.dirname(args.pathCheckpoint), "checkpoint_args.json")):
pathConfig = os.path.join(os.path.dirname(args.pathCheckpoint), "checkpoint_args.json")
else:
assert False, \
f"Args file not found in the directory {os.path.dirname(args.pathCheckpoint)}"
clustering_args = readArgs(pathConfig)
print("")
print(f"Clutering args:\n{json.dumps(vars(clustering_args), indent=4, sort_keys=True)}")
print('-' * 50)
# Load CluterModule
clusterModule = loadClusterModule(args.pathCheckpoint, norm_vec_len=args.norm_vec_len)
clusterModule.cuda()
# Load FeatureMaker
print("")
print("Loading CPC FeatureMaker")
if 'level_gru' in vars(clustering_args) and clustering_args.level_gru is not None:
updateConfig = argparse.Namespace(nLevelsGRU=clustering_args.level_gru)
else:
updateConfig = None
model = loadModel([clustering_args.pathCheckpoint], updateConfig=updateConfig)[0]
## If we don't apply batch implementation, we can set LSTM model to keep hidden units
## making the quality of the quantized units better
if args.nobatch:
model.gAR.keepHidden = True
featureMaker = FeatureModule(model, clustering_args.encoder_layer)
if clustering_args.dimReduction is not None:
dimRed = loadDimReduction(clustering_args.dimReduction, clustering_args.centroidLimits)
featureMaker = torch.nn.Sequential(featureMaker, dimRed)
if not clustering_args.train_mode:
featureMaker.eval()
featureMaker.cuda()
def feature_function(x):
if args.nobatch is False:
res0 = buildFeature_batch(featureMaker, x,
seqNorm=False,
strict=args.strict,
maxSizeSeq=args.max_size_seq,
batch_size=args.batch_size)
if args.norm_vec_len:
# [!] we actually used CPC_audio/scripts/quantize_audio.py for that in the end
res0Lengths = torch.sqrt((res0*res0).sum(2))
res0 = res0 / res0Lengths.view(*(res0Lengths.shape), 1)
return res0
else:
res0 = buildFeature(featureMaker, x,
seqNorm=False,
strict=args.strict)
if args.norm_vec_len:
# [!] we actually used CPC_audio/scripts/quantize_audio.py for that in the end
res0Lengths = torch.sqrt((res0*res0).sum(2))
res0 = res0 / res0Lengths.view(*(res0Lengths.shape), 1)
return res0
print("CPC FeatureMaker loaded!")
# Quantization of files
print("")
print(f"Quantizing audio files...")
seqQuantLines = []
bar = progressbar.ProgressBar(maxval=len(seqNames))
bar.start()
start_time = time()
for index, vals in enumerate(seqNames):
bar.update(index)
file_path = vals[1]
file_path = os.path.join(args.pathDB, file_path)
# Get features & quantizing
cFeatures = feature_function(file_path).cuda()
nGroups = cFeatures.size(-1)//clusterModule.Ck.size(-1)
cFeatures = cFeatures.view(1, -1, clusterModule.Ck.size(-1))
if len(vals) > 2 and int(vals[-1]) > 9400000: # Librilight, to avoid OOM
clusterModule = clusterModule.cpu()
cFeatures = cFeatures.cpu()
qFeatures = torch.argmin(clusterModule(cFeatures), dim=-1)
clusterModule = clusterModule.cuda()
else:
qFeatures = torch.argmin(clusterModule(cFeatures), dim=-1)
qFeatures = qFeatures[0].detach().cpu().numpy()
# Transform to quantized line
quantLine = ",".join(["-".join([str(i) for i in item]) for item in qFeatures.reshape(-1, nGroups)])
seqQuantLines.append(quantLine)
bar.finish()
print(f"...done {len(seqQuantLines)} files in {time()-start_time} seconds.")
# Saving outputs
print("")
print(f"Saving outputs to {outputFile}")
outLines = []
for vals, quantln in zip(seqNames, seqQuantLines):
file_path = vals[1]
file_name = os.path.splitext(os.path.basename(file_path))[0]
outLines.append("\t".join([file_name, quantln]))
with open(outputFile, "w") as f:
f.write("\n".join(outLines))
if __name__ == "__main__":
args = sys.argv[1:]
main(args)
| 43.040892 | 139 | 0.608568 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,471 | 0.299793 |
d799c19014a1b79f5fe8a1069ce05224e3482da3 | 1,591 | py | Python | workflows/pipe-common/pipeline/autoscaling/cloudprovider.py | NShaforostov/cloud-pipeline | 8d25b2b5f4838be569d9c25a307b77df5b0e73fc | [
"Apache-2.0"
] | null | null | null | workflows/pipe-common/pipeline/autoscaling/cloudprovider.py | NShaforostov/cloud-pipeline | 8d25b2b5f4838be569d9c25a307b77df5b0e73fc | [
"Apache-2.0"
] | 378 | 2021-03-25T20:09:54.000Z | 2021-10-01T01:02:39.000Z | workflows/pipe-common/pipeline/autoscaling/cloudprovider.py | NShaforostov/cloud-pipeline | 8d25b2b5f4838be569d9c25a307b77df5b0e73fc | [
"Apache-2.0"
] | 2 | 2019-08-09T18:04:54.000Z | 2019-08-11T19:03:06.000Z | # Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LIMIT_EXCEEDED_ERROR_MASSAGE = 'Instance limit exceeded. A new one will be launched as soon as free space will be available.'
LIMIT_EXCEEDED_EXIT_CODE = 6
class AbstractInstanceProvider(object):
def run_instance(self, is_spot, bid_price, ins_type, ins_hdd, ins_img, ins_key, run_id, kms_encyr_key_id,
num_rep, time_rep, kube_ip, kubeadm_token):
pass
def find_and_tag_instance(self, old_id, new_id):
pass
def verify_run_id(self, run_id):
pass
def check_instance(self, ins_id, run_id, num_rep, time_rep):
pass
def get_instance_names(self, ins_id):
pass
def find_instance(self, run_id):
pass
def terminate_instance(self, ins_id):
pass
def terminate_instance_by_ip(self, node_internal_ip, node_name):
pass
def find_nodes_with_run_id(self, run_id):
instance = self.find_instance(run_id)
return [instance] if instance is not None else []
| 32.469388 | 125 | 0.715273 | 822 | 0.516656 | 0 | 0 | 0 | 0 | 0 | 0 | 691 | 0.434318 |
d79a0e50ffc39bcabbef42ae425c1e746935ff84 | 4,140 | py | Python | setup.py | gokceneraslan/ProDy | 74d40c372c53bd68f5e9f9c47b991b2e2b1b9f27 | [
"NCSA"
] | null | null | null | setup.py | gokceneraslan/ProDy | 74d40c372c53bd68f5e9f9c47b991b2e2b1b9f27 | [
"NCSA"
] | null | null | null | setup.py | gokceneraslan/ProDy | 74d40c372c53bd68f5e9f9c47b991b2e2b1b9f27 | [
"NCSA"
] | null | null | null | import glob
import os
import os.path
import sys
import shutil
import cPickle
from types import StringType, UnicodeType
from distutils.core import setup
from distutils.extension import Extension
from distutils.command.install import install
PY3K = sys.version_info[0] > 2
with open('README.rst') as inp:
long_description = inp.read()
__version__ = ''
inp = open('prody/__init__.py')
for line in inp:
if (line.startswith('__version__')):
exec(line.strip())
break
inp.close()
def isInstalled(module_name):
"""Check if a required package is installed, by trying to import it."""
try:
return __import__(module_name)
except ImportError:
return False
else:
return True
if not isInstalled('numpy'):
print("""NumPy is not installed. This package is required for main ProDy
features and needs to be installed before you can use ProDy.
You can find NumPy at: http://numpy.scipy.org""")
PACKAGES = ['prody', 'prody.atomic', 'prody.dynamics', 'prody.ensemble',
'prody.measure', 'prody.proteins', 'prody.trajectory',
'prody.routines', 'prody.utilities']
PACKAGE_DATA = {}
if sys.version_info[:2] > (2,6):
PACKAGES.extend(['prody.tests', 'prody.tests.test_kdtree',
'prody.tests.test_measure'])
PACKAGE_DATA['prody.tests'] = ['data/pdb*.pdb', 'data/*.dat',
'data/*.coo', 'data/dcd*.dcd']
EXTENSIONS = []
if os.name != 'java' and sys.version_info[0] == 2:
pairwise2 = ['cpairwise2.c', 'pairwise2.py']
if all([os.path.isfile(os.path.join('prody', 'proteins', fn))
for fn in pairwise2]):
EXTENSIONS.append(
Extension('prody.proteins.cpairwise2',
['prody/proteins/cpairwise2.c'],
include_dirs=["prody"]
))
else:
raise Exception('one or more pairwise2 module files are missing')
if isInstalled('numpy'):
import numpy
kdtree_files = ['__init__.py', 'KDTree.c', 'KDTree.h',
'KDTreemodule.c', 'Neighbor.h', 'kdtree.py']
if all([os.path.isfile(os.path.join('prody', 'kdtree', fn))
for fn in kdtree_files]):
EXTENSIONS.append(
Extension('prody.kdtree._CKDTree',
['prody/kdtree/KDTree.c',
'prody/kdtree/KDTreemodule.c'],
include_dirs=[numpy.get_include()],
))
else:
raise Exception('one or more kdtree module files are missing')
PACKAGES.append('prody.kdtree')
elif isInstalled('numpy'):
raise ImportError('numpy is not installed')
SCRIPTS = ['scripts/prody']
setup(
name='ProDy',
version=__version__,
author='Ahmet Bakan',
author_email='ahb12 at pitt dot edu',
description='A Python Package for Protein Dynamics Analysis',
long_description=long_description,
url='http://www.csb.pitt.edu/ProDy',
packages=PACKAGES,
package_data=PACKAGE_DATA,
ext_modules=EXTENSIONS,
license='GPLv3',
keywords=('protein, dynamics, elastic network model, '
'Gaussian network model, anisotropic network model, '
'essential dynamics analysis, principal component analysis, '
'Protein Data Bank, PDB, GNM, ANM, PCA'),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
],
scripts=SCRIPTS,
requires=['NumPy', ],
provides=['ProDy({0:s})'.format(__version__)]
)
| 35.689655 | 79 | 0.587923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,747 | 0.421981 |
d79a69ca5970bcc43c73bbe72a7d4bd0d0841c39 | 491 | py | Python | rabbitai/migrations/versions/d7c1a0d6f2da_remove_limit_used_from_query_model.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | rabbitai/migrations/versions/d7c1a0d6f2da_remove_limit_used_from_query_model.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | null | null | null | rabbitai/migrations/versions/d7c1a0d6f2da_remove_limit_used_from_query_model.py | psbsgic/rabbitai | 769e120ba605d56ac076f810a549c38dac410c8e | [
"Apache-2.0"
] | 1 | 2021-07-09T16:29:50.000Z | 2021-07-09T16:29:50.000Z | """Remove limit used from query model
Revision ID: d7c1a0d6f2da
Revises: afc69274c25a
Create Date: 2019-06-04 10:12:36.675369
"""
# revision identifiers, used by Alembic.
revision = "d7c1a0d6f2da"
down_revision = "afc69274c25a"
import sqlalchemy as sa
from alembic import op
def upgrade():
with op.batch_alter_table("query") as batch_op:
batch_op.drop_column("limit_used")
def downgrade():
op.add_column("query", sa.Column("limit_used", sa.BOOLEAN(), nullable=True))
| 20.458333 | 80 | 0.735234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.482688 |
d79a6a449f7e90756102071f66e54f7f316037df | 13,393 | py | Python | realworld_benchmark/nets/eig_layer.py | DomInvivo/pna | 1a7d8ae645d093ebedeffcf148a98f6061957a23 | [
"MIT"
] | null | null | null | realworld_benchmark/nets/eig_layer.py | DomInvivo/pna | 1a7d8ae645d093ebedeffcf148a98f6061957a23 | [
"MIT"
] | null | null | null | realworld_benchmark/nets/eig_layer.py | DomInvivo/pna | 1a7d8ae645d093ebedeffcf148a98f6061957a23 | [
"MIT"
] | 2 | 2020-11-05T15:34:23.000Z | 2020-12-17T17:44:48.000Z | EPS = 1e-5
import threading
import torch
import torch.nn as nn
import torch.nn.functional as F
from .aggregators import AGGREGATORS
from .layers import MLP, FCLayer
from .scalers import SCALERS
class EIGLayerComplex(nn.Module):
def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, avg_d, residual,
edge_features, edge_dim, pretrans_layers=1, posttrans_layers=1):
super().__init__()
# retrieve the aggregators and scalers functions
aggregators = [AGGREGATORS[aggr] for aggr in aggregators.split()]
scalers = [SCALERS[scale] for scale in scalers.split()]
self.dropout = dropout
self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.edge_features = edge_features
self.residual = residual
self.aggregators = aggregators
self.scalers = scalers
self.batchnorm_h = nn.BatchNorm1d(out_dim)
self.pretrans = MLP(in_size=2 * in_dim + (edge_dim if edge_features else 0), hidden_size=in_dim,
out_size=in_dim, layers=pretrans_layers, mid_activation='relu', last_activation='none')
self.posttrans = MLP(in_size=(len(aggregators) * len(scalers) + 1) * in_dim, hidden_size=out_dim,
out_size=out_dim, layers=posttrans_layers, mid_activation='relu', last_activation='none')
self.avg_d = avg_d
if in_dim != out_dim:
self.residual = False
def pretrans_edges(self, edges):
if self.edge_features:
z2 = torch.cat([edges.src['h'], edges.dst['h'], edges.data['ef']], dim=1)
else:
z2 = torch.cat([edges.src['h'], edges.dst['h']], dim=1)
return {'e': self.pretrans(z2), 'eig_s': edges.src['eig'], 'eig_d': edges.dst['eig']}
def message_func(self, edges):
return {'e': edges.data['e'], 'eig_s': edges.data['eig_s'].to('cuda' if torch.cuda.is_available() else 'cpu'), 'eig_d': edges.data['eig_d'].to('cuda' if torch.cuda.is_available() else 'cpu')}
def reduce_func(self, nodes):
h_in = nodes.data['h']
h = nodes.mailbox['e']
eig_s = nodes.mailbox['eig_s']
eig_d = nodes.mailbox['eig_d']
D = h.shape[-2]
to_cat = []
for aggregate in self.aggregators:
try:
to_cat.append(aggregate(self, h, eig_s, eig_d))
except:
to_cat.append(aggregate(self, h, eig_s, eig_d, h_in))
h = torch.cat(to_cat, dim=1)
if len(self.scalers) > 1:
h = torch.cat([scale(h, D=D, avg_d=self.avg_d) for scale in self.scalers], dim=1)
return {'h': h}
def posttrans_nodes(self, nodes):
return self.posttrans(nodes.data['h'])
def forward(self, g, h, e, snorm_n):
h_in = h
g.ndata['h'] = h
if self.edge_features: # add the edges information only if edge_features = True
g.edata['ef'] = e
# pretransformation
g.apply_edges(self.pretrans_edges)
# aggregation
g.update_all(self.message_func, self.reduce_func)
h = torch.cat([h, g.ndata['h']], dim=1)
# posttransformation
h = self.posttrans(h)
# graph and batch normalization and residual
if self.graph_norm:
h = h * snorm_n
if self.batch_norm:
h = self.batchnorm_h(h)
h = F.relu(h)
if self.residual:
h = h_in + h
h = F.dropout(h, self.dropout, training=self.training)
return h
class EIGLayerSimple(nn.Module):
def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, residual, avg_d,
posttrans_layers=1):
super().__init__()
# retrieve the aggregators and scalers functions
aggregators = [AGGREGATORS[aggr] for aggr in aggregators.split()]
scalers = [SCALERS[scale] for scale in scalers.split()]
self.dropout = dropout
self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.residual = residual
self.aggregators = aggregators
self.scalers = scalers
self.batchnorm_h = nn.BatchNorm1d(out_dim)
self.posttrans = MLP(in_size=(len(aggregators) * len(scalers)) * in_dim, hidden_size=out_dim,
out_size=out_dim, layers=posttrans_layers, mid_activation='relu', last_activation='none')
self.avg_d = avg_d
if in_dim != out_dim:
self.residual = False
def pretrans_edges(self, edges):
return {'e': edges.src['h'], 'eig_s': edges.src['eig'], 'eig_d': edges.dst['eig']}
def message_func(self, edges):
return {'e': edges.data['e'], 'eig_s': edges.data['eig_s'].to('cuda' if torch.cuda.is_available() else 'cpu'),
'eig_d': edges.data['eig_d'].to('cuda' if torch.cuda.is_available() else 'cpu')}
def reduce_func(self, nodes):
h_in = nodes.data['h']
h = nodes.mailbox['e']
eig_s = nodes.mailbox['eig_s']
eig_d = nodes.mailbox['eig_d']
D = h.shape[-2]
to_cat = []
for aggregate in self.aggregators:
try:
to_cat.append(aggregate(self, h, eig_s, eig_d))
except:
to_cat.append(aggregate(self, h, eig_s, eig_d, h_in))
h = torch.cat(to_cat, dim=1)
if len(self.scalers) > 1:
h = torch.cat([scale(h, D=D, avg_d=self.avg_d) for scale in self.scalers], dim=1)
return {'h': h}
def posttrans_nodes(self, nodes):
return self.posttrans(nodes.data['h'])
def forward(self, g, h, e, snorm_n):
h_in = h
g.ndata['h'] = h
g.apply_edges(self.pretrans_edges)
# aggregation
g.update_all(self.message_func, self.reduce_func)
h = g.ndata['h']
# posttransformation
h = self.posttrans(h)
# graph and batch normalization and residual
if self.graph_norm:
h = h * snorm_n
if self.batch_norm:
h = self.batchnorm_h(h)
h = F.relu(h)
if self.residual:
h = h_in + h
h = F.dropout(h, self.dropout, training=self.training)
return h
class EIGTower(nn.Module):
def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, avg_d,
pretrans_layers, posttrans_layers, edge_features, edge_dim):
super().__init__()
self.dropout = dropout
self.graph_norm = graph_norm
self.batch_norm = batch_norm
self.edge_features = edge_features
self.aggregators = aggregators
self.scalers = scalers
self.batchnorm_h = nn.BatchNorm1d(out_dim)
self.pretrans = MLP(in_size=2 * in_dim + (edge_dim if edge_features else 0), hidden_size=in_dim,
out_size=in_dim, layers=pretrans_layers, mid_activation='relu', last_activation='none')
self.posttrans = MLP(in_size=(len(aggregators) * len(scalers) + 1) * in_dim,
hidden_size=out_dim,
out_size=out_dim, layers=posttrans_layers, mid_activation='relu', last_activation='none')
self.avg_d = avg_d
def pretrans_edges(self, edges):
if self.edge_features:
z2 = torch.cat([edges.src['h'], edges.dst['h'], edges.data['ef']], dim=1)
else:
z2 = torch.cat([edges.src['h'], edges.dst['h']], dim=1)
return {'e': self.pretrans(z2), 'eig_s': edges.src['eig'], 'eig_d': edges.dst['eig']}
def message_func(self, edges):
return {'e': edges.data['e'], 'eig_s': edges.data['eig_s'].to('cuda' if torch.cuda.is_available() else 'cpu'), 'eig_d': edges.data['eig_d'].to('cuda' if torch.cuda.is_available() else 'cpu')}
def reduce_func(self, nodes):
h_in = nodes.data['h']
h = nodes.mailbox['e']
eig_s = nodes.mailbox['eig_s']
eig_d = nodes.mailbox['eig_d']
D = h.shape[-2]
to_cat = []
for aggregate in self.aggregators:
try:
to_cat.append(aggregate(self, h, eig_s, eig_d))
except:
to_cat.append(aggregate(self, h, eig_s, eig_d, h_in))
h = torch.cat(to_cat, dim=1)
if len(self.scalers) > 1:
h = torch.cat([scale(h, D=D, avg_d=self.avg_d) for scale in self.scalers], dim=1)
return {'h': h}
def posttrans_nodes(self, nodes):
return self.posttrans(nodes.data['h'])
def forward(self, g, h, e, snorm_n):
g.ndata['h'] = h
if self.edge_features: # add the edges information only if edge_features = True
g.edata['ef'] = e
# pretransformation
g.apply_edges(self.pretrans_edges)
# aggregation
g.update_all(self.message_func, self.reduce_func)
h = torch.cat([h, g.ndata['h']], dim=1)
# posttransformation
h = self.posttrans(h)
# graph and batch normalization
if self.graph_norm:
h = h * snorm_n
if self.batch_norm:
h = self.batchnorm_h(h)
h = F.dropout(h, self.dropout, training=self.training)
return h
class EIGLayerTower(nn.Module):
"""
Param: [in_dim, out_dim, n_heads]
"""
def __init__(self, in_dim, out_dim, aggregators, scalers, avg_d, dropout, graph_norm, batch_norm, towers=5,
pretrans_layers=1, posttrans_layers=1, divide_input=True, residual=False, edge_features=False,
edge_dim=0):
super().__init__()
assert ((
not divide_input) or in_dim % towers == 0), "if divide_input is set the number of towers has to divide in_dim"
assert (out_dim % towers == 0), "the number of towers has to divide the out_dim"
assert avg_d is not None
# retrieve the aggregators and scalers functions
aggregators = [AGGREGATORS[aggr] for aggr in aggregators.split()]
scalers = [SCALERS[scale] for scale in scalers.split()]
self.divide_input = divide_input
self.input_tower = in_dim // towers if divide_input else in_dim
self.output_tower = out_dim // towers
self.in_dim = in_dim
self.out_dim = out_dim
self.edge_features = edge_features
self.residual = residual
if in_dim != out_dim:
self.residual = False
# convolution
self.towers = nn.ModuleList()
for _ in range(towers):
self.towers.append(EIGTower(in_dim=self.input_tower, out_dim=self.output_tower, aggregators=aggregators,
scalers=scalers, avg_d=avg_d, pretrans_layers=pretrans_layers,
posttrans_layers=posttrans_layers, batch_norm=batch_norm, dropout=dropout,
graph_norm=graph_norm, edge_features=edge_features, edge_dim=edge_dim))
# mixing network
self.mixing_network = FCLayer(out_dim, out_dim, activation='LeakyReLU')
def forward(self, g, h, e, snorm_n):
h_in = h # for residual connection
if self.divide_input:
h_cat = torch.cat( [tower(g, h[:, n_tower * self.input_tower: (n_tower + 1) * self.input_tower], e, snorm_n)
for n_tower, tower in enumerate(self.towers)], dim=1)
else:
h_cat = torch.cat([tower(g, h, e, snorm_n) for tower in self.towers], dim=1)
if len(self.towers) > 1:
h_out = self.mixing_network(h_cat)
else:
h_out = h_cat
if self.residual:
h_out = h_in + h_out # residual connection
return h_out
class EIGLayer(nn.Module):
def __init__(self, in_dim, out_dim, dropout, graph_norm, batch_norm, aggregators, scalers, avg_d, type_net, residual, towers=5, divide_input=True,
edge_features=None, edge_dim=None, pretrans_layers=1, posttrans_layers=1,):
super().__init__()
self.type_net = type_net
if type_net == 'simple':
self.model = EIGLayerSimple(in_dim=in_dim, out_dim=out_dim, dropout=dropout, graph_norm=graph_norm, batch_norm=batch_norm, residual=residual,
aggregators=aggregators, scalers=scalers, avg_d=avg_d, posttrans_layers=posttrans_layers)
elif type_net == 'complex':
self.model = EIGLayerComplex(in_dim=in_dim, out_dim=out_dim, dropout=dropout, graph_norm=graph_norm, batch_norm=batch_norm, aggregators=aggregators, residual=residual,
scalers=scalers, avg_d=avg_d, edge_features=edge_features, edge_dim=edge_dim, pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers)
elif type_net == 'towers':
self.model = EIGLayerTower(in_dim=in_dim, out_dim=out_dim, aggregators=aggregators, scalers=scalers, avg_d=avg_d, dropout=dropout, graph_norm=graph_norm,
batch_norm=batch_norm, towers=towers, pretrans_layers=pretrans_layers, posttrans_layers=posttrans_layers, divide_input=divide_input,
residual=residual, edge_features=edge_features, edge_dim=edge_dim)
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(self.__class__.__name__, self.in_dim, self.out_dim) | 37.099723 | 199 | 0.605615 | 13,046 | 0.974091 | 0 | 0 | 0 | 0 | 0 | 0 | 1,275 | 0.095199 |
d79aca613972336652669e46aaf23dbcf659d8b3 | 3,553 | py | Python | NLPEngine/utils/nlp_config.py | hmi-digital/bot_platform | 91a26e566b07fa309774d0333a6bccf3d64cccc5 | [
"MIT"
] | null | null | null | NLPEngine/utils/nlp_config.py | hmi-digital/bot_platform | 91a26e566b07fa309774d0333a6bccf3d64cccc5 | [
"MIT"
] | null | null | null | NLPEngine/utils/nlp_config.py | hmi-digital/bot_platform | 91a26e566b07fa309774d0333a6bccf3d64cccc5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import json
import hashlib
from warnings import simplefilter
from utils import log_util
# ignore all warnings
simplefilter(action='ignore')
##Global parameters
scriptDir = os.path.dirname(__file__)
dataPath = os.path.join(scriptDir, '..', 'training_data', 'intents')
propertyFile = os.path.join(scriptDir, '..', 'config', 'nlp.properties')
separator = "="
properties = {}
def load_parameters() -> None:
global properties
with open(propertyFile) as f:
for line in f:
if separator in line:
name, value = line.split(separator, 1)
properties[name.strip()] = value.strip()
def getProperties():
global properties
return properties
def get_parameter(param):
global properties
res = ""
if param in properties:
res = properties[param]
return res
else:
log_util.log_infomsg('[NLP_CONFIG] the required parameter could not be located'.format(param))
return res
def check_data_available(self) -> bool:
files = os.listdir(dataPath)
for file in files:
if (file.startswith(self.domain)):
if file.endswith(self.format):
return True
else:
pass
return False
def is_config_stale(domain, locale):
global properties
tmpFile = os.path.join(scriptDir, '..', 'training_data', 'tmp', domain + '_hashdump')
try:
tmp = open(tmpFile, 'r')
except IOError:
tmp = open(tmpFile, 'a+')
hash_original = tmp.read()
# need to check if any changes to data, property file or rasa config file
dataFile = os.path.join(dataPath, domain + '_' + locale + '.' + get_parameter('FORMAT'))
data_1 = open(dataFile, 'rb').read()
# check if any changs in properties
load_parameters()
data_2 = json.dumps(getProperties())
if (get_parameter('ALGORITHM') == 'NLU'):
rasaConfigFile = os.path.join(scriptDir, '..', 'core', 'config', get_parameter('CONFIG_FILE'))
data_3 = open(rasaConfigFile, 'rb').read()
else:
data_3 = None
totalData = str(data_1) + str(data_2) + str(data_3)
hash_current = hashlib.md5(totalData.encode('utf-8')).hexdigest()
if (hash_original == hash_current):
return True
else:
tmp.close()
tmp = open(tmpFile, 'w')
tmp.write(hash_current)
tmp.close()
return False
def ensemble_confidence_score(response_1, response_2):
scores_1 = get_scores(response_1)
scores_2 = get_scores(response_2)
# replace scores_1 with weighted average
for item in scores_1:
if item in scores_2:
scores_1[item] = "{:.2f}".format((float(scores_1[item]) + float(scores_2[item])) / 2)
else:
scores_1[item] = "{:.2f}".format(float(scores_1[item]) / 2)
# update the confidence score with new one
for items in response_1["intent_ranking"]:
if items["name"] in scores_1:
items['confidence'] = scores_1[items["name"]]
# update the intent JSONObject
response_1["intent"]["confidence"] = scores_1[response_1["intent"]["name"]]
return response_1
def get_scores(response):
scores = {}
for items in response["intent_ranking"]:
scores[items["name"]] = items["confidence"]
return scores
def normalise_entity_score(response):
if len(response["entities"]) != 0:
for items in response["entities"]:
items["confidence_entity"] = "{:.3f}".format(items["confidence_entity"])
return response
| 28.653226 | 102 | 0.633268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 721 | 0.202927 |
d79b4bcbf6a110684a6330158716bc8642e8c218 | 641 | py | Python | flow/core/gcp_credentials.py | hwknsj/synergy_flow | aba8f57b2cbeeb0368a64eaa7e5369fcef0a3136 | [
"BSD-3-Clause"
] | null | null | null | flow/core/gcp_credentials.py | hwknsj/synergy_flow | aba8f57b2cbeeb0368a64eaa7e5369fcef0a3136 | [
"BSD-3-Clause"
] | 1 | 2016-10-03T18:48:15.000Z | 2019-11-01T21:53:30.000Z | flow/core/gcp_credentials.py | hwknsj/synergy_flow | aba8f57b2cbeeb0368a64eaa7e5369fcef0a3136 | [
"BSD-3-Clause"
] | 1 | 2019-11-02T00:45:26.000Z | 2019-11-02T00:45:26.000Z | import io
import json
from google.auth import compute_engine
from google.oauth2 import service_account
def gcp_credentials(service_account_file):
if service_account_file:
with io.open(service_account_file, 'r', encoding='utf-8') as json_fi:
credentials_info = json.load(json_fi)
credentials = service_account.Credentials.from_service_account_info(credentials_info)
else:
# Explicitly use Compute Engine credentials. These credentials are
# available on Compute Engine, App Engine Flexible, and Container Engine.
credentials = compute_engine.Credentials()
return credentials
| 35.611111 | 93 | 0.75195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.232449 |
d79b6b0b33508785a7ea10e66bc23630bb1ac265 | 4,218 | py | Python | main.py | tremor6916/sakugabooru-bot | c9e226699e97201dfa1937638adfd30f70d8cbf9 | [
"MIT"
] | 1 | 2021-08-16T14:47:19.000Z | 2021-08-16T14:47:19.000Z | main.py | tremor6916/sakugabooru-bot | c9e226699e97201dfa1937638adfd30f70d8cbf9 | [
"MIT"
] | 2 | 2021-08-12T13:18:50.000Z | 2021-08-14T07:01:28.000Z | main.py | tremor6916/sakugabooru-bot | c9e226699e97201dfa1937638adfd30f70d8cbf9 | [
"MIT"
] | null | null | null | import requests
import tweepy
import random
import time
import os
import bs4
from bs4 import BeautifulSoup
from pybooru import Moebooru
siteurl='https://www.sakugabooru.com/post/show/'
header = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}
client = Moebooru(site_url='https://www.sakugabooru.com')
files = client.post_list(tags="order:random")
api_keys = open("token.txt") #Create your own token.txt file with your API Keys from Twitter
lines = api_keys.readlines()
consumer_key = lines[1].rstrip()
consumer_secret= lines[4].rstrip()
access_token = lines[7].rstrip()
access_token_secret=lines[10].rstrip()
def main():
try:
files = client.post_list(tags="order:random") #Random Post
choice = random.choice(files) #Select 1 Random Post from Query
boorurl=choice['file_url'] #File URL
tags = choice['tags'] #Post Tags
verdict=filetypechecker(boorurl) #Checker if .mp4 file or not
if(verdict):
posturl = siteurl+"{0}".format(choice['id']) #POST URL from SakugaBooru
animatorname=artistgrabber(posturl)
animename=animegrabber(posturl)
time.sleep(5)
data = requests.get(boorurl,headers=header)
print("data:",data.status_code)
with open("C:/Users/Admin/Documents/PersonalFiles/Repositories/sakugabooru-video-files/{}".format(choice['id'])+".mp4",'wb') as file: #Customize Directory
file.write(data.content)
#params="Animator Name: {}\nTags: {}\nPost URL: {}\n".format(animatorname,tags,posturl)
#BETA TESTING
params="Animator Name: {}\nListed Anime Name: {}\nTags: {}\nPost URL: {}\n".format(animatorname,animename,tags,posturl)
#print(params)
time.sleep(5)
mediapost(params)
except Exception as e:
print("Main() Error:",e)
def artistgrabber(posturl):
r = requests.get(posturl,headers=header)
print("artistgrabber:",r.status_code)
soup = bs4.BeautifulSoup(r.text,'lxml')
'''
for div in soup.find_all(class_="sidebar"):
artist=div.find(class_="tag-type-artist").text
artistname=(artist.strip("? "))
'''
for div in soup.find_all(class_="tag-type-artist"):
atags = div.find_all('a')
for artists in atags:
artiststr=artists.text
print(artiststr)
return artiststr
#BETA TESTING
def animegrabber(posturl):
r = requests.get(posturl,headers=header)
print("animegrabber:",r.status_code)
soup = bs4.BeautifulSoup(r.text,'lxml')
for div in soup.find_all(class_="tag-type-copyright"):
atags = div.find_all('a')
for anime in atags:
animestr=anime.text
print(animestr)
return animestr
def filetypechecker(boorurl):
if boorurl.find('/'):
if ".mp4" in (boorurl.rsplit('/',1)[1]):
return True
else:
return False
def mediapost(params):
try:
auth = tweepy.OAuthHandler(consumer_key,consumer_secret)
auth.set_access_token(access_token,access_token_secret)
api = tweepy.API(auth)
except Exception as e:
print (e)
try:
file_path=[]
directory_name='C:/Users/Admin/Documents/PersonalFiles/Repositories/sakugabooru-video-files' #Customize Directory
media_list=filter(lambda x: os.path.isfile(os.path.join(directory_name,x)),os.listdir(directory_name))
media_list=sorted(media_list,key=lambda x: os.path.getmtime(os.path.join(directory_name,x)),reverse=True)
for media in media_list:
file_path.append(os.path.join(directory_name,media))
media=file_path[0]
print(media)
upload_media=api.media_upload(media, media_category='tweet_video')
api.update_status(status=params, media_ids=[upload_media.media_id_string])
except Exception as e:
print("Mediapost() Error:",e)
if __name__ == '__main__':
main() | 35.745763 | 170 | 0.628497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,162 | 0.275486 |
d79bfd3dab6cfcc04ef37c5a0243968c7e22addb | 491 | py | Python | demos/template/src/urls.py | akornatskyy/wheezy.web | 417834db697cf1f78f3a60cc880b9fd25d40c6de | [
"MIT"
] | 17 | 2020-08-29T18:45:51.000Z | 2022-03-02T19:37:13.000Z | demos/template/src/urls.py | akornatskyy/wheezy.web | 417834db697cf1f78f3a60cc880b9fd25d40c6de | [
"MIT"
] | 29 | 2020-07-18T04:34:03.000Z | 2021-07-06T09:42:36.000Z | demos/template/src/urls.py | akornatskyy/wheezy.web | 417834db697cf1f78f3a60cc880b9fd25d40c6de | [
"MIT"
] | 1 | 2022-03-14T08:41:42.000Z | 2022-03-14T08:41:42.000Z | """
"""
from membership.web.urls import membership_urls
from public.web.urls import error_urls, public_urls, static_urls
from public.web.views import home
from wheezy.routing import url
locale_pattern = "{locale:(en|ru)}/"
locale_defaults = {"locale": "en"}
locale_urls = public_urls + membership_urls
locale_urls.append(("error/", error_urls))
all_urls = [
url("", home, locale_defaults, name="default"),
(locale_pattern, locale_urls, locale_defaults),
]
all_urls += static_urls
| 25.842105 | 64 | 0.747454 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.11609 |
d79c181f52cc059ca3916a4f670fd6822c3c776d | 5,571 | py | Python | olmm.py | wesselb/olmm | 367d69a4a646083d6b3f6ffd1963aeb2b7816ef9 | [
"MIT"
] | 2 | 2020-09-01T17:09:23.000Z | 2021-08-21T12:19:19.000Z | olmm.py | wesselb/olmm | 367d69a4a646083d6b3f6ffd1963aeb2b7816ef9 | [
"MIT"
] | null | null | null | olmm.py | wesselb/olmm | 367d69a4a646083d6b3f6ffd1963aeb2b7816ef9 | [
"MIT"
] | 2 | 2019-11-04T17:38:43.000Z | 2021-08-21T12:19:20.000Z | from stheno import (
B, # Linear algebra backend
Graph, # Graph that keep track of the graphical model
GP, # Gaussian process
EQ, # Squared-exponential kernel
Matern12, # Matern-1/2 kernel
Matern52, # Matern-5/2 kernel
Delta, # Noise kernel
Normal, # Gaussian distribution
Diagonal, # Diagonal matrix
dense, # Convert matrix objects to regular matrices
)
__all__ = ['model', 'project', 'objective', 'predict']
def model(vs, m):
"""Construct model.
Args:
vs (:class:`varz.Vars`): Variable container.
m (int): Number of latent processes.
Returns:
tuple: Tuple containing a list of the latent processes, the
observation noise, and the noises on the latent processes.
"""
g = Graph()
# Observation noise:
noise_obs = vs.bnd(0.1, name='noise_obs')
def make_latent_process(i):
# Long-term trend:
variance = vs.bnd(0.9, name=f'{i}/long_term/var')
scale = vs.bnd(2 * 30, name=f'{i}/long_term/scale')
kernel = variance * EQ().stretch(scale)
# Short-term trend:
variance = vs.bnd(0.1, name=f'{i}/short_term/var')
scale = vs.bnd(20, name=f'{i}/short_term/scale')
kernel += variance * Matern12().stretch(scale)
return GP(kernel, graph=g)
# Latent processes:
xs = [make_latent_process(i) for i in range(m)]
# Latent noises:
noises_latent = vs.bnd(0.1 * B.ones(m), name='noises_latent')
return xs, noise_obs, noises_latent
def project(vs, m, y_data, locs):
"""Project the data.
Args:
vs (:class:`varz.Vars`): Variable container.
m (int): Number of latent processes.
y_data (tensor): Observations.
locs (tensor): Spatial locations of observations.
Returns:
tuple: Tuple containing the projected outputs, the mixing matrix,
S from the mixing matrix, and the observation noises.
"""
_, noise_obs, noises_latent = model(vs, m)
# Construct mixing matrix and projection.
scales = vs.bnd(B.ones(2), name='scales')
K = dense(Matern52().stretch(scales)(locs))
U, S, _ = B.svd(K)
S = S[:m]
H = U[:, :m] * S[None, :] ** .5
T = B.transpose(U[:, :m]) / S[:, None] ** .5
# Project data and unstack over latent processes.
y_proj = B.unstack(B.matmul(T, y_data, tr_b=True))
# Observation noises:
noises_obs = noise_obs * B.ones(B.dtype(noise_obs), B.shape(y_data)[1])
return y_proj, H, S, noises_obs
def objective(vs, m, x_data, y_data, locs):
"""NLML objective.
Args:
vs (:class:`varz.Vars`): Variable container.
m (int): Number of latent processes.
x_data (tensor): Time stamps of the observations.
y_data (tensor): Observations.
locs (tensor): Spatial locations of observations.
Returns:
scalar: Negative log-marginal likelihood.
"""
y_proj, _, S, noises_obs = project(vs, m, y_data, locs)
xs, noise_obs, noises_latent = model(vs, m)
# Add contribution of latent processes.
lml = 0
for i, (x, y) in enumerate(zip(xs, y_proj)):
e_signal = GP((noise_obs / S[i] + noises_latent[i]) * Delta(),
graph=x.graph)
lml += (x + e_signal)(x_data).logpdf(y)
e_noise = GP(noise_obs / S[i] * Delta(), graph=x.graph)
lml -= e_noise(x_data).logpdf(y)
# Add regularisation contribution.
lml += B.sum(Normal(Diagonal(noises_obs)).logpdf(B.transpose(y_data)))
# Return negative the evidence, normalised by the number of data points.
n, p = B.shape(y_data)
return -lml / (n * p)
def predict(vs, m, x_data, y_data, locs, x_pred):
"""Make predictions.
Args:
vs (:class:`varz.Vars`): Variable container.
m (int): Number of latent processes.
x_data (tensor): Time stamps of the observations.
y_data (tensor): Observations.
locs (tensor): Spatial locations of observations.
x_pred (tensor): Time stamps to predict at.
Returns:
tuple: Tuple containing the predictions for the latent processes and
predictions for the observations.
"""
# Construct model and project data for prediction.
xs, noise_obs, noises_latent = model(vs, m)
y_proj, H, S, noises_obs = project(vs, m, y_data, locs)
L = noise_obs / S + noises_latent
# Condition latent processes.
xs_posterior = []
for x, noise, y in zip(xs, L, y_proj):
e = GP(noise * Delta(), graph=x.graph)
xs_posterior.append(x | ((x + e)(x_data), y))
xs = xs_posterior
# Extract posterior means and variances of the latent processes.
x_means, x_vars = zip(*[(x.mean(x_pred)[:, 0],
x.kernel.elwise(x_pred)[:, 0]) for x in xs])
# Construct predictions for latent processes.
lat_preds = [B.to_numpy(mean,
mean - 2 * (var + L[i]) ** .5,
mean + 2 * (var + L[i]) ** .5)
for i, (mean, var) in enumerate(zip(x_means, x_vars))]
# Pull means through mixing matrix.
x_means = B.stack(*x_means, axis=0)
y_means = B.matmul(H, x_means)
# Pull variances through mixing matrix and add noise.
x_vars = B.stack(*x_vars, axis=0)
y_vars = B.matmul(H ** 2, x_vars + noises_latent[:, None]) + noise_obs
# Construct predictions for observations.
obs_preds = [(mean, mean - 2 * var ** .5, mean + 2 * var ** .5)
for mean, var in zip(y_means, y_vars)]
return lat_preds, obs_preds
| 32.578947 | 76 | 0.605277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,585 | 0.46401 |
d79c63266fd7ef84a044820de7a71feb3ff446ff | 1,995 | py | Python | tests/molecular/molecules/utilities/building_block.py | andrewtarzia/stk | 1ac2ecbb5c9940fe49ce04cbf5603fd7538c475a | [
"MIT"
] | 21 | 2018-04-12T16:25:24.000Z | 2022-02-14T23:05:43.000Z | tests/molecular/molecules/utilities/building_block.py | JelfsMaterialsGroup/stk | 0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2 | [
"MIT"
] | 8 | 2019-03-19T12:36:36.000Z | 2020-11-11T12:46:00.000Z | tests/molecular/molecules/utilities/building_block.py | supramolecular-toolkit/stk | 0d3e1b0207aa6fa4d4d5ee8dfe3a29561abb08a2 | [
"MIT"
] | 5 | 2018-08-07T13:00:16.000Z | 2021-11-01T00:55:10.000Z | import itertools as it
from tests.utilities import (
is_equivalent_atom,
is_equivalent_molecule,
)
def are_equivalent_functional_groups(
functional_groups1,
functional_groups2,
):
functional_groups = it.zip_longest(
functional_groups1,
functional_groups2,
)
for fg1, fg2 in functional_groups:
is_equivalent_functional_group(fg1, fg2)
def is_equivalent_functional_group(
functional_group1,
functional_group2,
):
assert functional_group1.__class__ is functional_group2.__class__
atoms = it.zip_longest(
functional_group1.get_atoms(),
functional_group2.get_atoms(),
)
for atom1, atom2 in atoms:
is_equivalent_atom(atom1, atom2)
for placer_id1, placer_id2 in it.zip_longest(
functional_group1.get_placer_ids(),
functional_group2.get_placer_ids(),
):
assert placer_id1 == placer_id2
for core_atom_id1, core_atom_id2 in it.zip_longest(
functional_group1.get_core_atom_ids(),
functional_group2.get_core_atom_ids(),
):
assert core_atom_id1 == core_atom_id2
def is_equivalent_building_block(building_block1, building_block2):
is_equivalent_molecule(building_block1, building_block2)
are_equivalent_functional_groups(
functional_groups1=building_block1.get_functional_groups(),
functional_groups2=building_block2.get_functional_groups(),
)
for placer_id1, placer_id2 in it.zip_longest(
building_block1.get_placer_ids(),
building_block2.get_placer_ids(),
):
assert placer_id1 == placer_id2
for core_atom_id1, core_atom_id2 in it.zip_longest(
building_block1.get_core_atom_ids(),
building_block2.get_core_atom_ids(),
):
assert core_atom_id1 == core_atom_id2
def is_clone_building_block(building_block1, building_block2):
assert building_block1 is not building_block2
is_equivalent_building_block(building_block1, building_block2)
| 28.913043 | 69 | 0.736341 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d79c6838f09a05f79a094ede5cbcfc88762b1778 | 36 | py | Python | pydeps/__main__.py | miketheman/pydeps | 907a1a29cc0e04ad3698a812082775ccf39b6479 | [
"BSD-2-Clause"
] | 981 | 2015-07-08T15:55:30.000Z | 2022-03-31T08:53:30.000Z | pydeps/__main__.py | miketheman/pydeps | 907a1a29cc0e04ad3698a812082775ccf39b6479 | [
"BSD-2-Clause"
] | 129 | 2016-09-03T16:51:52.000Z | 2022-03-19T23:07:50.000Z | pydeps/__main__.py | miketheman/pydeps | 907a1a29cc0e04ad3698a812082775ccf39b6479 | [
"BSD-2-Clause"
] | 88 | 2015-03-24T03:25:54.000Z | 2022-03-24T07:35:02.000Z | from .pydeps import pydeps
pydeps()
| 12 | 26 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d79dec81c3204981b242083307d7250e99c10afb | 271 | py | Python | fileInfo.py | Jiaoma/fileStr | f400f607c7ab6923b36ffdda4e6eccdbc12c32cd | [
"MIT"
] | null | null | null | fileInfo.py | Jiaoma/fileStr | f400f607c7ab6923b36ffdda4e6eccdbc12c32cd | [
"MIT"
] | null | null | null | fileInfo.py | Jiaoma/fileStr | f400f607c7ab6923b36ffdda4e6eccdbc12c32cd | [
"MIT"
] | null | null | null | from os import listdir
from os.path import join
import os, errno
def getImageNum(rootDir):
return len(listdir(join(rootDir)))
def safeMkdir(path:str):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise | 19.357143 | 38 | 0.649446 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d79e215cb8f499e47e5107fabf650f63a2a2ff52 | 305 | py | Python | text-classify/test.py | ubear/MachineLearn | c24947f7078269f51bf7ee0d2167ca6b71152d1e | [
"MIT"
] | null | null | null | text-classify/test.py | ubear/MachineLearn | c24947f7078269f51bf7ee0d2167ca6b71152d1e | [
"MIT"
] | null | null | null | text-classify/test.py | ubear/MachineLearn | c24947f7078269f51bf7ee0d2167ca6b71152d1e | [
"MIT"
] | null | null | null | #coding:utf-8
import re
import math
from docclass import Classifier
def test_infc_func():
c = Classifier(getfeatures=None)
c.infc("python", "good")
c.infc("python", "good")
c.infc("the", "bad")
c.infc("the", "good")
print c.fc
if __name__ == "__main__":
test_infc_func()
| 15.25 | 36 | 0.629508 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 72 | 0.236066 |
d79e31449a493b9c8402aeee5862015258d4756b | 8,560 | py | Python | libs/hermes/hermes.cloudbreak/hermes/cloudbreak/kubernetes/base.py | KAGRA-TW-ML/gw-iaas | 3f4ff0fef16d39b75615b80fedf106a8dd4f846d | [
"MIT"
] | 2 | 2021-12-21T22:34:30.000Z | 2021-12-22T01:26:57.000Z | libs/hermes/hermes.cloudbreak/hermes/cloudbreak/kubernetes/base.py | KAGRA-TW-ML/gw-iaas | 3f4ff0fef16d39b75615b80fedf106a8dd4f846d | [
"MIT"
] | 3 | 2021-11-09T21:38:26.000Z | 2022-02-26T20:44:31.000Z | libs/hermes/hermes.cloudbreak/hermes/cloudbreak/kubernetes/base.py | KAGRA-TW-ML/gw-iaas | 3f4ff0fef16d39b75615b80fedf106a8dd4f846d | [
"MIT"
] | 3 | 2021-08-07T17:19:10.000Z | 2022-02-11T02:00:38.000Z | import abc
import time
from dataclasses import dataclass
from functools import partial
from typing import TYPE_CHECKING
import kubernetes
from kubernetes.utils.create_from_yaml import FailToCreateError
from urllib3.exceptions import MaxRetryError
from hermes.cloudbreak.utils import snakeify, wait_for
if TYPE_CHECKING:
from hermes.cloudbreak.kubernetes import K8sApiClient
@dataclass
class Resource(abc.ABC):
_client: "K8sApiClient"
name: str
namespace: str = "default"
MAX_RETRY_GRACE_SECONDS = 300
STATUS_AVAILABLE_GRACE_SECONDS = 10
@classmethod
def create(cls, client, config):
if config["kind"] == "Deployment":
cls = Deployment
elif config["kind"] == "Service":
cls = Service
elif config["kind"] == "DaemonSet":
cls = DaemonSet
else:
raise ValueError(
"Resource kind {} not supported yet".format(config["kind"])
)
metadata = config["metadata"]
obj = cls(client, metadata["name"], metadata["namespace"])
create_fn = partial(
kubernetes.utils.create_from_dict,
k8s_client=client._client,
data=config,
)
response = obj._make_a_request(create_fn)
if response is None:
raise MaxRetryError
return obj
def __post_init__(self):
self._creation_time = time.time()
self._unavailable = False
self._unavailable_time = None
@abc.abstractproperty
def client(self):
pass
def _make_a_request(self, request_fn, do_raise=False):
try:
# try to make the request
return request_fn()
except (
kubernetes.client.exceptions.ApiException,
FailToCreateError,
) as e:
try:
# create from yaml wraps around API exceptions,
# so grab the underlying exception here first
status = e.api_exceptions[0].status
except AttributeError:
status = e.status
if status != 401:
raise
if not do_raise:
self._client.cluster.refresh_credentials()
self._client._client.configuration.api_key[
"authorization"
] = self._client.cluster.token
# try the request again with do_raise set to
# true to indicate that these credentials just
# don't have access to this cluster
return self._make_a_request(request_fn, do_raise=True)
else:
# if do_raise is set, indicate that the request
# is unauthorized
raise RuntimeError("Unauthorized request to cluster")
except MaxRetryError:
# sometimes this error can get raised if the master nodes
# of the cluster are busy doing something. Return None
# to indicate this is happening but give things a few
# minutes to get back to normal
if not self._unavailable:
self._unavailable = True
self._unavailable_time = time.time()
elif (
time.time() - self._unavailable_time
) < self.MAX_RETRY_GRACE_SECONDS:
raise RuntimeError(
"Deployment {} has been unavailable for {} seconds".format(
self.name, self.MAX_RETRY_GRACE_SECONDS
)
)
return None
except Exception as e:
print(type(e), e)
raise
def get(self):
resource_type = snakeify(self.__class__.__name__)
get_fn = partial(
getattr(self.client, f"read_namespaced_{resource_type}_status"),
name=self.name,
namespace=self.namespace,
)
try:
response = self._make_a_request(get_fn)
self._unavailable = False
return response
except kubernetes.client.ApiException as e:
if e.status == 404:
raise RuntimeError(f"{self.message} no longer exists")
raise
def delete(self):
resource_type = snakeify(self.__class__.__name__)
delete_fn = partial(
getattr(self.client, f"delete_namespaced_{resource_type}_status"),
name=self.name,
namespace=self.namespace,
)
return self._make_a_request(delete_fn)
@abc.abstractmethod
def is_ready(self):
pass
def wait_for_ready(self):
wait_for(
self.is_ready,
f"Waiting for {self.message} to become ready",
)
def submit_delete(self):
try:
response = self.delete()
return response is not None
except kubernetes.client.ApiException as e:
if e.status == 404:
return True
raise
def is_deleted(self):
try:
self.get()
except RuntimeError as e:
if str(e).endswith("no longer exists"):
return True
raise
else:
return False
def remove(self):
if not self.submit_delete():
wait_for(
self.submit_delete,
f"Waiting for {self.message} to become available to delete",
)
if not self.is_deleted():
# give us a chance to not have to display the progress bar
wait_for(self.is_deleted, f"Waiting for {self.message} to delete")
else:
# TODO: logging?
print(f"Deleted {self.message}")
# TODO: remove this from self._client resources?
@property
def message(self):
resource_type = snakeify(self.__class__.__name__).replace("_", " ")
return " ".join([resource_type, self.name])
class Deployment(Resource):
@property
def client(self):
return kubernetes.client.AppsV1Api(self._client._client)
# TODO: custom wait that clocks that the number of available instances
def is_ready(self):
response = self.get()
if response is None:
return False
conditions = response.status.conditions
if conditions is None:
return False
statuses = {i.type: eval(i.status) for i in conditions}
if len(statuses) == 0 and (
(time.time() - self._creation_time)
> self.STATUS_AVAILABLE_GRACE_SECONDS
):
raise RuntimeError(
"Deployment {} has gone {} seconds with no "
"available status information".format(
self.name, self.STATUS_AVAILABLE_GRACE_SECONDS
)
)
try:
if statuses["Available"]:
return True
except KeyError:
try:
if not statuses["Progressing"]:
raise RuntimeError(f"{self.message} stopped progressing")
except KeyError:
return False
def scale(self, replicas: int):
response = self.get()
if response is None:
return False
response.spec.replicas = replicas
scale_fn = partial(
self.client.patch_namespaced_deployment_scale,
name=self.name,
namespace=self.namespace,
body=response,
)
return self._make_a_request(scale_fn)
@dataclass
class Service(Resource):
"""Really represents specifically a LoadBalancer"""
def __post_init__(self):
self._ip = None
@property
def client(self):
return kubernetes.client.CoreV1Api(self._client._client)
@property
def ip(self):
if self._ip is None:
response = self.get()
if response is None:
return None
try:
self._ip = response.status.load_balancer.ingress[0].ip
except TypeError:
return None
return self._ip
def is_ready(self):
# server is considered ready once it has a public IP address
return self.ip is not None
class DaemonSet(Resource):
@property
def client(self):
return kubernetes.client.AppsV1Api(self._client._client)
def is_ready(self):
response = self.get()
if response is None:
return False
status = response.status
return status.desired_number_scheduled == status.number_ready
| 30.140845 | 79 | 0.573248 | 8,145 | 0.951519 | 0 | 0 | 6,439 | 0.75222 | 0 | 0 | 1,493 | 0.174416 |
d79fcf121072d7dc68656d391c409e397c811e06 | 333 | py | Python | src/chapter2/exercise7.py | group6BCS1/BCS-2021 | 272b1117922163cde03901cfdd82f8e0cfab9a67 | [
"MIT"
] | null | null | null | src/chapter2/exercise7.py | group6BCS1/BCS-2021 | 272b1117922163cde03901cfdd82f8e0cfab9a67 | [
"MIT"
] | null | null | null | src/chapter2/exercise7.py | group6BCS1/BCS-2021 | 272b1117922163cde03901cfdd82f8e0cfab9a67 | [
"MIT"
] | null | null | null | c = float(input("Enter Amount Between 0-99 :"))
print(c // 20, "Twenties")
c = c % 20
print(c // 10, "Tens")
c = c % 10
print(c // 5, "Fives")
c = c % 5
print(c // 1, "Ones")
c = c % 1
print(c // 0.25, "Quarters")
c = c % 0.25
print(c // 0.1, "Dimes")
c = c % 0.1
print(c // 0.05, "Nickles")
c = c % 0.05
print(c // 0.01, "Pennies")
| 19.588235 | 47 | 0.51952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.279279 |
d7a074b6e62790e1749e72555067425bbae6f923 | 4,772 | py | Python | gmn/src/d1_gmn/tests/test_content_disposition.py | DataONEorg/d1_python | dfab267c3adea913ab0e0073ed9dc1ee50b5b8eb | [
"Apache-2.0"
] | 15 | 2016-10-28T13:56:52.000Z | 2022-01-31T19:07:49.000Z | gmn/src/d1_gmn/tests/test_content_disposition.py | DataONEorg/d1_python | dfab267c3adea913ab0e0073ed9dc1ee50b5b8eb | [
"Apache-2.0"
] | 56 | 2017-03-16T03:52:32.000Z | 2022-03-12T01:05:28.000Z | gmn/src/d1_gmn/tests/test_content_disposition.py | DataONEorg/d1_python | dfab267c3adea913ab0e0073ed9dc1ee50b5b8eb | [
"Apache-2.0"
] | 11 | 2016-05-31T16:22:02.000Z | 2020-10-05T14:37:10.000Z | #!/usr/bin/env python
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test that the Content-Disposition holds the correct filename."""
import logging
import os
import re
import freezegun
import pytest
import responses
import d1_gmn.tests.gmn_mock
import d1_gmn.tests.gmn_test_case
import d1_test.d1_test_case
import d1_test.instance_generator
import d1_test.instance_generator.identifier
logger = logging.getLogger(__name__)
@d1_test.d1_test_case.reproducible_random_decorator("TestContentDisposition")
class TestContentDisposition(d1_gmn.tests.gmn_test_case.GMNTestCase):
@responses.activate
def _check(
self,
client,
did,
sysmeta_filename,
sysmeta_format_id,
expected_base_name,
expected_file_ext,
):
with freezegun.freeze_time("1981-05-02"):
with d1_gmn.tests.gmn_mock.disable_auth():
base_name, file_ext = self._create_obj(
client, did, sysmeta_filename, sysmeta_format_id
)
assert base_name == expected_base_name
assert file_ext == expected_file_ext
def _create_obj(self, client, did, sysmeta_filename, sysmeta_format_id):
pid, sid, send_sciobj_bytes, send_sysmeta_pyxb = self.create_obj(
client, pid=did, fileName=sysmeta_filename, formatId=sysmeta_format_id
)
# View response
response = client.get(pid)
# self.sample.gui_sxs_diff(response, "", "response")
# View SysMeta
# self.sample.gui_sxs_diff(client.getSystemMetadata(pid), "", "sysmeta")
return self._extract_filename(response)
def _extract_filename(self, response):
file_name = re.search(
r'filename="(.*)"', response.headers["Content-Disposition"]
).group(1)
return os.path.splitext(file_name)
def test_1000(self, gmn_client_v2):
"""SciObj without fileName returns filename generated from PID and formatId.
When formatId is unknown, returns filename with extension, ".data".
"""
pid = d1_test.instance_generator.identifier.generate_pid()
self._check(gmn_client_v2, pid, None, "unknown_format_id", pid, ".data")
@pytest.mark.parametrize(
"format_id,file_ext",
[
("text/tsv", ".tsv"),
("video/x-ms-wmv", ".wmv"),
("-//ecoinformatics.org//eml-access-2.0.0beta4//EN", ".xml"),
],
)
def test_1010(self, gmn_client_v2, format_id, file_ext):
"""SciObj without fileName returns filename generated from PID and formatId.
When formatId is valid, returns filename with extension from objectFormatList.
"""
pid = d1_test.instance_generator.identifier.generate_pid()
self._check(gmn_client_v2, pid, None, format_id, pid, file_ext)
@pytest.mark.parametrize(
"format_id,file_ext,base_name",
[
("text/tsv", ".tsv", "myfile"),
("video/x-ms-wmv", ".wmv", "my video file"),
(
"-//ecoinformatics.org//eml-access-2.0.0beta4//EN",
".xml",
"An EML XML file",
),
],
)
def test_1020(self, gmn_client_v2, format_id, base_name, file_ext):
"""SciObj with fileName without extension returns filename generated from
fileName and formatId.
When formatId is valid, returns filename with extension from objectFormatList.
"""
pid = d1_test.instance_generator.identifier.generate_pid()
self._check(gmn_client_v2, pid, base_name, format_id, base_name, file_ext)
def test_1030(self, gmn_client_v2):
"""SciObj with fileName without extension returns filename generated from
fileName and formatId.
When formatId is unknown, returns filename with extension, ".data".
"""
pid = d1_test.instance_generator.identifier.generate_pid()
self._check(gmn_client_v2, pid, pid, "unknown_format_id", pid, ".data")
| 35.61194 | 86 | 0.667854 | 3,518 | 0.737217 | 0 | 0 | 3,596 | 0.753562 | 0 | 0 | 2,159 | 0.452431 |
d7a429ba73c168c4f1bfd1cea778f28be08cf20c | 2,159 | py | Python | tests/test_models/test_ner_model.py | xyzhu8/mmocr | f62b4513f5411bde9f24e1902b1cb1945340022a | [
"Apache-2.0"
] | null | null | null | tests/test_models/test_ner_model.py | xyzhu8/mmocr | f62b4513f5411bde9f24e1902b1cb1945340022a | [
"Apache-2.0"
] | null | null | null | tests/test_models/test_ner_model.py | xyzhu8/mmocr | f62b4513f5411bde9f24e1902b1cb1945340022a | [
"Apache-2.0"
] | null | null | null | import copy
import os.path as osp
import tempfile
import pytest
import torch
from mmocr.models import build_detector
def _create_dummy_vocab_file(vocab_file):
with open(vocab_file, 'w') as fw:
for char in list(map(chr, range(ord('a'), ord('z') + 1))):
fw.write(char + '\n')
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_mod = Config.fromfile(fname)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
@pytest.mark.parametrize(
'cfg_file', ['configs/ner/bert_softmax/bert_softmax_cluener_18e.py'])
def test_bert_softmax(cfg_file):
# prepare data
texts = ['中'] * 47
img = [31] * 47
labels = [31] * 128
input_ids = [0] * 128
attention_mask = [0] * 128
token_type_ids = [0] * 128
img_metas = {
'texts': texts,
'labels': torch.tensor(labels).unsqueeze(0),
'img': img,
'input_ids': torch.tensor(input_ids).unsqueeze(0),
'attention_masks': torch.tensor(attention_mask).unsqueeze(0),
'token_type_ids': torch.tensor(token_type_ids).unsqueeze(0)
}
# create dummy data
tmp_dir = tempfile.TemporaryDirectory()
vocab_file = osp.join(tmp_dir.name, 'fake_vocab.txt')
_create_dummy_vocab_file(vocab_file)
model = _get_detector_cfg(cfg_file)
model['label_convertor']['vocab_file'] = vocab_file
detector = build_detector(model)
losses = detector.forward(img, img_metas)
assert isinstance(losses, dict)
model['loss']['type'] = 'MaskedFocalLoss'
detector = build_detector(model)
losses = detector.forward(img, img_metas)
assert isinstance(losses, dict)
tmp_dir.cleanup()
# Test forward test
with torch.no_grad():
batch_results = []
result = detector.forward(None, img_metas, return_loss=False)
batch_results.append(result)
| 27.679487 | 78 | 0.673923 | 0 | 0 | 0 | 0 | 1,385 | 0.640907 | 0 | 0 | 482 | 0.223045 |
d7a7c1b8bf94815da1b2a43d746bb8543d773174 | 1,106 | py | Python | Cita.py | Desquivel501/Backend_Proyecto2_IPC1 | b983682eef4bcc379f6d57bf034675bf623fb33d | [
"Apache-2.0"
] | null | null | null | Cita.py | Desquivel501/Backend_Proyecto2_IPC1 | b983682eef4bcc379f6d57bf034675bf623fb33d | [
"Apache-2.0"
] | null | null | null | Cita.py | Desquivel501/Backend_Proyecto2_IPC1 | b983682eef4bcc379f6d57bf034675bf623fb33d | [
"Apache-2.0"
] | null | null | null | class Cita:
def __init__(self,id,solicitante,fecha,hora,motivo,estado,doctor):
self.id = id
self.solicitante = solicitante
self.fecha = fecha
self.hora = hora
self.motivo = motivo
self.estado = estado
self.doctor = doctor
def getId(self):
return self.id
def getSolicitante(self):
return self.solicitante
def getFecha(self):
return self.fecha
def getHora(self):
return self.hora
def getMotivo(self):
return self.motivo
def getEstado(self):
return self.estado
def getDoctor(self):
return self.doctor
def setSolicitante(self,solicitante):
self.solicitante = solicitante
def setFecha(self,fecha):
self.fecha = fecha
def setHora(self,hora):
self.hora = hora
def setMotivo(self,motivo):
self.motivo = motivo
def setEstado(self,estado):
self.estado = estado
def setDoctor(self,doctor):
self.doctor = doctor
| 20.867925 | 70 | 0.567812 | 1,095 | 0.990054 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d7a7ff3bd6f840a0858a02d02a7395eeedc612d9 | 3,333 | py | Python | recohut/models/layers/graph.py | sparsh-ai/recohut | 4121f665761ffe38c9b6337eaa9293b26bee2376 | [
"Apache-2.0"
] | null | null | null | recohut/models/layers/graph.py | sparsh-ai/recohut | 4121f665761ffe38c9b6337eaa9293b26bee2376 | [
"Apache-2.0"
] | 1 | 2022-01-12T05:40:57.000Z | 2022-01-12T05:40:57.000Z | recohut/models/layers/graph.py | RecoHut-Projects/recohut | 4121f665761ffe38c9b6337eaa9293b26bee2376 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: nbs/models/layers/models.layers.graph.ipynb (unless otherwise specified).
__all__ = ['FiGNN_Layer', 'GraphLayer']
# Cell
import torch
import torch.nn as nn
import torch.nn.functional as F
from itertools import product
# Cell
class FiGNN_Layer(nn.Module):
def __init__(self,
num_fields,
embedding_dim,
gnn_layers=3,
reuse_graph_layer=False,
use_gru=True,
use_residual=True,
device=None):
super(FiGNN_Layer, self).__init__()
self.num_fields = num_fields
self.embedding_dim = embedding_dim
self.gnn_layers = gnn_layers
self.use_residual = use_residual
self.reuse_graph_layer = reuse_graph_layer
self.device = device
if reuse_graph_layer:
self.gnn = GraphLayer(num_fields, embedding_dim)
else:
self.gnn = nn.ModuleList([GraphLayer(num_fields, embedding_dim)
for _ in range(gnn_layers)])
self.gru = nn.GRUCell(embedding_dim, embedding_dim) if use_gru else None
self.src_nodes, self.dst_nodes = zip(*list(product(range(num_fields), repeat=2)))
self.leaky_relu = nn.LeakyReLU(negative_slope=0.01)
self.W_attn = nn.Linear(embedding_dim * 2, 1, bias=False)
def build_graph_with_attention(self, feature_emb):
src_emb = feature_emb[:, self.src_nodes, :]
dst_emb = feature_emb[:, self.dst_nodes, :]
concat_emb = torch.cat([src_emb, dst_emb], dim=-1)
alpha = self.leaky_relu(self.W_attn(concat_emb))
alpha = alpha.view(-1, self.num_fields, self.num_fields)
mask = torch.eye(self.num_fields).to(self.device)
alpha = alpha.masked_fill(mask.byte(), float('-inf'))
graph = F.softmax(alpha, dim=-1) # batch x field x field without self-loops
return graph
def forward(self, feature_emb):
g = self.build_graph_with_attention(feature_emb)
h = feature_emb
for i in range(self.gnn_layers):
if self.reuse_graph_layer:
a = self.gnn(g, h)
else:
a = self.gnn[i](g, h)
if self.gru is not None:
a = a.view(-1, self.embedding_dim)
h = h.view(-1, self.embedding_dim)
h = self.gru(a, h)
h = h.view(-1, self.num_fields, self.embedding_dim)
else:
h = a + h
if self.use_residual:
h += feature_emb
return h
# Cell
class GraphLayer(nn.Module):
def __init__(self, num_fields, embedding_dim):
super(GraphLayer, self).__init__()
self.W_in = torch.nn.Parameter(torch.Tensor(num_fields, embedding_dim, embedding_dim))
self.W_out = torch.nn.Parameter(torch.Tensor(num_fields, embedding_dim, embedding_dim))
nn.init.xavier_normal_(self.W_in)
nn.init.xavier_normal_(self.W_out)
self.bias_p = nn.Parameter(torch.zeros(embedding_dim))
def forward(self, g, h):
h_out = torch.matmul(self.W_out, h.unsqueeze(-1)).squeeze(-1) # broadcast multiply
aggr = torch.bmm(g, h_out)
a = torch.matmul(self.W_in, aggr.unsqueeze(-1)).squeeze(-1) + self.bias_p
return a | 40.646341 | 117 | 0.610561 | 3,052 | 0.915692 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.068407 |
d7a8eecbc304f83f8cc307ef1ff32ce4d5ad23c6 | 20,638 | py | Python | euchre.py | duck57/euchre.py | 838ef7b3bdd9e3bb6af6edce532300b004a4f1a4 | [
"0BSD"
] | 1 | 2020-03-26T00:00:18.000Z | 2020-03-26T00:00:18.000Z | euchre.py | duck57/euchre.py | 838ef7b3bdd9e3bb6af6edce532300b004a4f1a4 | [
"0BSD"
] | null | null | null | euchre.py | duck57/euchre.py | 838ef7b3bdd9e3bb6af6edce532300b004a4f1a4 | [
"0BSD"
] | null | null | null | #!venv/bin/python
# coding=UTF-8
# -*- coding: UTF-8 -*-
# vim: set fileencoding=UTF-8 :
"""
Double-deck bid euchre
Implementation is similar to the rules given by Craig Powers
https://www.pagat.com/euchre/bideuch.html
Notable differences (to match how I learned in high school calculus) include:
* Minimum bid of 6 (which can be stuck to the dealer)
* Shooters and loners are separate bids (guessing as ±18 for shooter, similar to a loner)
* Shooters are a mandatory 2 card exchange with your partner
* Trump isn't announced until after bidding has concluded
* Winner of bid leads the first hand
* Winning your bid gives you (tricks earned + 2) points
Mothjab is a funny word with no current meaning.
"""
from cardstock import *
debug: Optional[bool] = False
o: Optional[TextIO] = None
log_dir: str = game_out_dir(os.path.basename(__file__).split(".py")[0])
def p(msg):
global o
click.echo(msg, o)
def px(msg) -> None:
global debug
if debug:
p(msg)
class EuchrePlayer(BasePlayer, abc.ABC):
desired_trump: Bid
def __init__(self, g: "GameType", /, name: str, is_bot: int = 1, **kwargs):
super().__init__(g, name, is_bot)
self.tricks: int = 0
self.bid_estimates: Dict[Bid, int] = {}
self.reset_bids()
def reset_bids(self) -> None:
for t in Bid:
self.bid_estimates[t] = 0
@property
def shoot_strength(self) -> int:
return self.in_game.shoot_strength
@property
def choose_trump(self) -> Bid:
return self.desired_trump
@abc.abstractmethod
def make_bid(
self,
valid_bids: List[int],
min_bid: int = 0,
leading_player: "Optional[EuchrePlayer]" = None,
) -> int:
pass
def trumpify_hand(self, trump_suit: Optional[Suit], is_lo: bool = False) -> None:
"""Marks the trump suit and sort the hands"""
if trump_suit:
self.hand.trumpify(trump_suit)
self.sort_hand(is_lo)
def receive_shooter(self, **kwargs) -> None:
shot = PassList(
list(self.teammates),
directions=[pass_shoot] * self.in_game.shoot_strength,
specific_destination=cycle([self]),
sort_low=self.in_game.low_win,
)
shot.collect_cards()
shot.distribute_cards()
class HumanPlayer(BaseHuman, EuchrePlayer):
def __init__(self, g: "GameType", /, name: str):
BaseHuman.__init__(self, g, name)
EuchrePlayer.__init__(self, g, name, 0)
@property
def choose_trump(self) -> Bid:
p(self.hand) # give a closer look at your hand before bidding
bids: List[str] = [c for c in Bid.__members__]
bids.extend([Bid[c].short_name for c in Bid.__members__])
bid: str = click.prompt(
"Declare Trump", type=click.Choice(bids, False), show_choices=False,
).upper()
return Bid[[b for b in Bid.__members__ if (bid in b)][0]]
def make_bid(
self,
valid_bids: List[int],
min_bid: int = 0,
leading_player: "Optional[EuchrePlayer]" = None,
) -> int:
self.hand.sort(key=key_display4human)
p(self.hand)
return int(
click.prompt(
"How much to bid",
type=click.Choice(
["0"] + [str(x) for x in valid_bids if (x >= min_bid)], False,
),
)
)
class ComputerPlayer(BaseComputer, EuchrePlayer):
sort_key = key_trump_power
def __init__(self, g: "GameType", /, name: str):
BaseComputer.__init__(self, g, name)
EuchrePlayer.__init__(self, g, name, 1)
def make_bid(
self,
valid_bids: List[int],
min_bid: int = 0,
leading_player: "Optional[EuchrePlayer]" = None,
) -> int:
if max(self.bid_estimates.values()) == 0:
self.bid_estimates = {
t: self.simulate_hand(
h_p=deepcopy(self.hand),
d_p=deepcopy(self.card_count),
handedness=self.in_game.handedness,
t=t,
)
for t in Bid
}
# pick the biggest
# any decisions based on the current winning bid should happen here
bid: int = max(self.bid_estimates.values())
self.desired_trump = random.choice(
[k for k in self.bid_estimates.keys() if (self.bid_estimates[k] == bid)]
)
# don't outbid your partner (within reason)
if leading_player in self.teammates and bid - min_bid < 2:
return 0
# can you do it by yourself?
if bid == len(self.hand) - 1:
return valid_bids[-2] # call a shooter
elif bid == len(self.hand):
return valid_bids[-1] # call a loner
# don't bid outrageously if you don't have to
# count on two tricks from your partner
return bid + self.shoot_strength * len(self.teammates)
def pick_card(self, valid_cards: Hand, **kwargs,) -> Card:
tp: Trick = kwargs.get("trick_in_progress")
is_low: bool = kwargs.get("is_low")
unplayed: Hand = self.card_count
broken: Dict[Suit, Union[Team, None, bool]] = self.in_game.suit_safety
# TODO be less stupid with large games (>4 players)
def winning_leads(ss: List[Suit], st: bool = True) -> List[Card]:
wl: List[Card] = []
for s in ss:
wl.extend(
self.estimate_tricks_by_suit(
follow_suit(s, valid_cards, True),
follow_suit(s, unplayed, True),
is_low,
strict=st,
)
)
return wl
if not tp: # you have the lead
safer_suits: List[Suit] = [
s for s in broken.keys() if broken[s] is False or broken[s] == self.team
] if broken else suits
w: List[Card] = []
if safer_suits: # unbroken suits to lead aces
px("Checking suits")
w += winning_leads(safer_suits)
else: # lead with good trump
px("Leading with a good trump")
w += winning_leads([Suit.TRUMP])
if not w: # try a risky ace
px("Risky bet")
w += winning_leads(suits, st=bool(self.teammates))
if not w and self.teammates: # seamless passing of the lead
is_low = not is_low
w += winning_leads(suits + [Suit.TRUMP], st=False)
px("Lead pass")
if not w: # YOLO time
px("YOLO")
return random.choice(valid_cards)
px(w)
return random.choice(w)
# you don't have the lead
# win if you can (and the current lead isn't on your team)
# play garbage otherwise
junk_ranks: Set[Rank] = (
{Rank.ACE_HI, Rank.KING} if is_low else {Rank.NINE, Rank.TEN, Rank.JACK}
) | {Rank.QUEEN}
wc, wp = tp.winner(is_low)
w = Hand(c for c in valid_cards if c.beats(wc, is_low))
junk_cards = Hand(h for h in valid_cards if h not in w)
if w: # you have something that can win
if wp in self.teammates and junk_cards: # your partner is winning
if wc.rank in junk_ranks: # but their card is rubbish
return random.choice(w)
return random.choice(junk_cards)
return random.choice(w)
return random.choice(junk_cards)
def simulate_hand(self, *, h_p: Hand, d_p: Hand, t: Bid, **kwargs) -> int:
def slice_by_suit(h: Hand, s: Suit) -> Hand:
return follow_suit(
s,
sorted(
h.trumpify(t.trump_suit), key=key_trump_power, reverse=not t.is_low,
),
strict=True,
ok_empty=True,
)
return sum(
[
len(
self.estimate_tricks_by_suit(
my_suit=slice_by_suit(h_p, s),
mystery_suit=slice_by_suit(d_p, s),
is_low=t.is_low,
is_trump=(s == Suit.TRUMP),
)
)
for s in suits + [Suit.TRUMP]
]
)
@staticmethod
def estimate_tricks_by_suit(
my_suit: Iterable[Card],
mystery_suit: Iterable[Card],
is_low: bool,
is_trump: Optional[bool] = False,
strict: bool = False,
) -> Hand:
"""
Slices up your hand and unplayed cards to estimate which suit has the most potential
:param my_suit: list of your cards presumed of the same suit
:param mystery_suit: unplayed cards of the suit
:param is_low: lo no?
:param is_trump: unused
:param strict: True to pick a trick, False to estimate total tricks in a hand
:return: winning cards for the suit
"""
est = Hand()
for rank in (
euchre_ranks
if is_low
else [Rank.RIGHT_BOWER, Rank.LEFT_BOWER] + list(reversed(euchre_ranks))
):
me: List[Card] = match_by_rank(my_suit, rank)
oth: List[Card] = match_by_rank(mystery_suit, rank)
# p(f"{me} {rank} {oth}") # debugging
est.extend(me)
if oth and (strict or not me and not strict):
break # there are mystery cards that beat your cards
return est
class Team(BaseTeam, MakesBid, WithScore):
def __init__(self, players: Iterable[BasePlayer]):
BaseTeam.__init__(self, players)
MakesBid.__init__(self)
WithScore.__init__(self)
self.bid_history: List[str] = []
self.tricks_taken: List[int] = []
def hand_tab(self, hand: Optional[int], tab: str = "\t") -> str:
return tab.join(
[
str(self.bid_history[hand]),
str(self.tricks_taken[hand]),
str(self.score_changes[hand]),
]
if hand is not None
else [
str(sum([1 for b in self.bid_history if b != str(None)])),
str(sum(self.tricks_taken)),
str(self.score),
]
)
class BidEuchre(BaseGame):
def __init__(self, *, minimum_bid: int = 6, **kwargs):
"""
A game of bid euchre
:param minimum_bid: minimum bid that will get stuck to the dealer
:param kwargs: things to pass along to BaseGame
"""
# setup for the super() call
if not kwargs.get("deck_replication"):
kwargs["deck_replication"] = 2
if not kwargs.get("team_size"):
kwargs["team_size"] = (
2 if (h := kwargs.get("handedness")) and not (h % 2) else 1
)
if kwargs.get("pass_size") is None:
kwargs["pass_size"] = 2
if kwargs.get("minimum_kitty_size") is None:
kwargs["minimum_kitty_size"] = 0
if not kwargs.get("minimum_hand_size"):
kwargs["minimum_hand_size"] = 8
super().__init__(
human_player_type=HumanPlayer,
computer_player_type=ComputerPlayer,
team_type=Team,
game_name="Euchre",
deck_generator=make_euchre_deck,
**kwargs,
)
self.trump: Optional[Suit] = None
self.low_win: bool = False
# set the bidding
c = configparser.ConfigParser()
c.read("constants.cfg")
minimum_bid: int = minimum_bid if minimum_bid else (
6 if self.handedness == 3 else (self.hand_size // 2)
)
self.valid_bids: List[int] = [
i for i in range(minimum_bid, self.hand_size + 1)
] + (
[round(self.hand_size * 1.5), self.hand_size * 2]
if len(self.teams) != len(self.players)
else []
)
if (
self.victory_threshold is not None and self.victory_threshold > 0
): # negative thresholds get dunked on
self.mercy_rule: int = -self.victory_threshold
self.bad_ai_end: int = -self.victory_threshold // 2
else:
self.victory_threshold: int = c["Scores"].getint("victory")
self.mercy_rule: int = c["Scores"].getint("mercy")
self.bad_ai_end: int = c["Scores"].getint("broken_ai")
@property
def shoot_strength(self) -> int:
"""Alias so I don't break existing code"""
return self.pass_size
def bidding(self, bid_order: List[EuchrePlayer]) -> EuchrePlayer:
first_round: bool = True
count: int = 1
hands: int = len(bid_order)
wp: Optional[EuchrePlayer] = None
wb: int = 0
bid_order = cycle(bid_order)
min_bid: int = min(self.valid_bids)
max_bid: int = max(self.valid_bids)
for pl in bid_order:
# everyone has passed
if count == hands:
if first_round: # stuck the dealer
wb = min_bid
p(f"Dealer {pl} got stuck with {min_bid}")
if pl.is_bot: # dealer picks suit
pl.make_bid(self.valid_bids, min_bid, pl)
wp = pl
else: # someone won the bid
wb = min_bid - 1
break
# end bidding early for a loner
if min_bid > max_bid:
wb = max_bid
break
# get the bid
bid: int = pl.make_bid(self.valid_bids, min_bid, wp)
# player passes
if bid < min_bid:
p(f"{pl} passes")
count += 1
continue
# bid successful
min_bid = bid + 1
wp = pl
count = 1
first_round = False
p(f"{pl} bids {bid}")
wp.team.bid = wb
return wp
def play_hand(self, dealer: EuchrePlayer) -> EuchrePlayer:
self.deal()
hn: int = len(dealer.team.score_changes) + 1
p(f"\nHand {hn}")
p(f"Dealer: {dealer}")
po: List[EuchrePlayer] = get_play_order(dealer)
po.append(po.pop(0)) # because the dealer doesn't lead bidding
# deal the cards
for pl in po:
pl.tricks = 0
pl.reset_bids()
# bidding
lead: EuchrePlayer = self.bidding(po)
# declare Trump
trump: Bid = lead.choose_trump
p(trump)
self.low_win = trump.is_low
p(f"{lead} bid {lead.team.bid} {trump.name}\n")
# modify hands if trump called
[player.trumpify_hand(trump.trump_suit, trump.is_low) for player in po]
self.unplayed_cards.trumpify(trump.trump_suit) # for card-counting
self.suit_safety[trump.trump_suit] = None
# check for shooters and loners
lone: Optional[EuchrePlayer] = None
if lead.team.bid > self.hand_size:
if lead.team.bid < 2 * self.hand_size:
lead.receive_shooter()
lone = lead
# play the tricks
for _ in range(self.hand_size):
lead = self.play_trick(lead, trump.is_low, lone)
# calculate scores
p(f"Hand {hn} scores:")
for t in self.teams:
tr_t: int = 0
ls: int = 0
bid: int = t.bid
for pl in t.players:
tr_t += pl.tricks
if bid:
# loners and shooters
if lone:
ls = bid
bid = self.hand_size
if tr_t < bid:
p(f"{t} got Euchred and fell {bid - tr_t} short of {bid}")
t.score = -bid if not ls else -bid * 3 // 2
elif ls:
p(f"{lone} won all alone, the absolute madman!")
t.score = ls
else:
p(f"{t} beat their bid of {bid} with {tr_t} tricks")
t.score = tr_t + 2
else: # tricks the non-bidding team earned
p(f"{t} earned {tr_t} tricks")
t.score = tr_t
# bookkeeping
t.bid_history.append(
f"{ls if ls else bid} {trump.name}" if bid else str(None)
)
t.tricks_taken.append(tr_t)
p(f"{t}: {t.score}")
t.bid = 0 # reset for next time
return dealer.next_player
def play_trick(
self,
lead: EuchrePlayer,
is_low: bool = False,
lone: Optional[EuchrePlayer] = None,
) -> EuchrePlayer:
pl: EuchrePlayer = lead
po: List[EuchrePlayer] = get_play_order(lead)
trick_in_progress: Trick = Trick()
# play the cards
for pl in po:
if lone and pl in lone.teammates:
continue
c: Card = pl.play_card(
trick_in_progress,
handedness=self.handedness,
is_low=is_low,
broken_suits=self.suit_safety,
trump=self.trump,
)
trick_in_progress.append(TrickPlay(c, pl))
p(f"{pl.name} played {repr(c)}")
# find the winner
w: TrickPlay = trick_in_progress.winner(is_low)
w.played_by.tricks += 1
p(f"{w.played_by.name} won the trick\n")
l_suit: Suit = trick_in_progress.lead_suit
if w.card.suit != l_suit:
self.suit_safety[l_suit] = (
True if self.suit_safety[l_suit] else w.played_by.team
)
return w.played_by
def write_log(self, ld: str, splitter: str = "\t|\t") -> None:
stop_time: str = str(datetime.now()).split(".")[0]
f: TextIO = open(os.path.join(ld, f"{self.start_time}.gamelog"), "w")
t_l: List[Team] = list(self.teams) # give a consistent ordering
def w(msg):
click.echo(msg, f)
# headers
w(splitter.join([self.start_time] + [f"{t}\t\t" for t in t_l]))
w(splitter.join([""] + ["Bid\tTricks Taken\tScore Change" for _ in t_l]))
w(splitter.join(["Hand"] + ["===\t===\t===" for _ in t_l]))
w( # body
"\n".join(
[
splitter.join([f"{hand + 1}"] + [t.hand_tab(hand) for t in t_l])
for hand in range(len(t_l[0].bid_history))
]
)
)
# totals
w(splitter.join([stop_time] + ["===\t===\t===" for _ in t_l]))
w(splitter.join(["Totals"] + [t.hand_tab(None) for t in t_l]))
f.close()
def victory_check(self) -> Tuple[int, Optional[Team]]:
scorecard: List[Team] = sorted(self.teams, key=score_key)
best_score: int = scorecard[-1].score
if best_score < self.bad_ai_end:
return -2, None # everyone went too far negative
if best_score == scorecard[-2].score:
return 0, None # keep playing for a tie
if best_score > self.victory_threshold: # a team won
return 1, scorecard[-1]
if scorecard[0].score < self.mercy_rule: # a team lost
return -1, scorecard[0] # should never tie for last
return 0, None
def play(self) -> None:
v: Tuple[int, Optional[Team]] = self.victory_check()
global o
while v[0] == 0:
self.current_dealer = self.play_hand(self.current_dealer)
v = self.victory_check()
def final_score(pf: Callable = print):
pf(f"\nFinal Scores")
for t in self.teams:
pf(f"{t}: {t.score}")
pf(f"({len(self.current_dealer.team.bid_history)} hands)")
final_score(p)
if o: # final scores to terminal
final_score()
def score_key(t: Team) -> int:
return t.score
@click.command()
@common_options
@click.option(
"--minimum-bid",
type=click.IntRange(0, None),
help="The minimum bid (will usually be 6 if not set)",
)
def main(**kwargs):
global o
global debug
global log_dir
if kwargs.get("all_bots"):
st: str = str(datetime.now()).split(".")[0]
o = open(os.path.join(log_dir, f"{st}.gameplay"), "w")
kwargs["start_time"] = st
debug = True
make_and_play_game(BidEuchre, log_dir, **kwargs)
if __name__ == "__main__":
Path(log_dir).mkdir(parents=True, exist_ok=True)
main()
| 34.168874 | 93 | 0.537358 | 18,980 | 0.919618 | 0 | 0 | 2,598 | 0.125878 | 0 | 0 | 4,231 | 0.205 |
d7a9af578243dd70e67e5e691e31327e0ed63c8d | 3,908 | py | Python | src/consumer/catalog-search/swagger_server/models/error_response.py | CADDE-sip/connector | 233b63df334dea67c05d379e925ebb7cc15f4b4d | [
"MIT"
] | 1 | 2022-03-29T05:44:47.000Z | 2022-03-29T05:44:47.000Z | src/provider/provenance-management/swagger_server/models/error_response.py | CADDE-sip/connector | 233b63df334dea67c05d379e925ebb7cc15f4b4d | [
"MIT"
] | null | null | null | src/provider/provenance-management/swagger_server/models/error_response.py | CADDE-sip/connector | 233b63df334dea67c05d379e925ebb7cc15f4b4d | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class ErrorResponse(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, detail: str = None, status: float = None, title: str = None, type: str = None): # noqa: E501
"""ErrorResponse - a model defined in Swagger
:param detail: The detail of this ErrorResponse. # noqa: E501
:type detail: str
:param status: The status of this ErrorResponse. # noqa: E501
:type status: float
:param title: The title of this ErrorResponse. # noqa: E501
:type title: str
:param type: The type of this ErrorResponse. # noqa: E501
:type type: str
"""
self.swagger_types = {
'detail': str,
'status': float,
'title': str,
'type': str
}
self.attribute_map = {
'detail': 'detail',
'status': 'status',
'title': 'title',
'type': 'type'
}
self._detail = detail
self._status = status
self._title = title
self._type = type
@classmethod
def from_dict(cls, dikt) -> 'ErrorResponse':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The ErrorResponse of this ErrorResponse. # noqa: E501
:rtype: ErrorResponse
"""
return util.deserialize_model(dikt, cls)
@property
def detail(self) -> str:
"""Gets the detail of this ErrorResponse.
エラーメッセージ # noqa: E501
:return: The detail of this ErrorResponse.
:rtype: str
"""
return self._detail
@detail.setter
def detail(self, detail: str):
"""Sets the detail of this ErrorResponse.
エラーメッセージ # noqa: E501
:param detail: The detail of this ErrorResponse.
:type detail: str
"""
if detail is None:
raise ValueError("Invalid value for `detail`, must not be `None`") # noqa: E501
self._detail = detail
@property
def status(self) -> float:
"""Gets the status of this ErrorResponse.
HTTPステータスコード # noqa: E501
:return: The status of this ErrorResponse.
:rtype: float
"""
return self._status
@status.setter
def status(self, status: float):
"""Sets the status of this ErrorResponse.
HTTPステータスコード # noqa: E501
:param status: The status of this ErrorResponse.
:type status: float
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def title(self) -> str:
"""Gets the title of this ErrorResponse.
タイトル # noqa: E501
:return: The title of this ErrorResponse.
:rtype: str
"""
return self._title
@title.setter
def title(self, title: str):
"""Sets the title of this ErrorResponse.
タイトル # noqa: E501
:param title: The title of this ErrorResponse.
:type title: str
"""
self._title = title
@property
def type(self) -> str:
"""Gets the type of this ErrorResponse.
タイプ # noqa: E501
:return: The type of this ErrorResponse.
:rtype: str
"""
return self._type
@type.setter
def type(self, type: str):
"""Sets the type of this ErrorResponse.
タイプ # noqa: E501
:param type: The type of this ErrorResponse.
:type type: str
"""
self._type = type
| 25.376623 | 116 | 0.570368 | 3,761 | 0.94025 | 0 | 0 | 2,564 | 0.641 | 0 | 0 | 2,399 | 0.59975 |
d7aa0f6c9b402105d7439f34f24ee84832389102 | 1,534 | py | Python | dh_example.py | afakharany93/DH_Matrix_Python | 97e29d5b14511f68884a89852d014b010aa4e0f2 | [
"MIT"
] | null | null | null | dh_example.py | afakharany93/DH_Matrix_Python | 97e29d5b14511f68884a89852d014b010aa4e0f2 | [
"MIT"
] | null | null | null | dh_example.py | afakharany93/DH_Matrix_Python | 97e29d5b14511f68884a89852d014b010aa4e0f2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
from dh import dh_solver
#from IPython.display import Latex
import sympy
from sympy import Symbol
import numpy as np
#create an object
mtb = dh_solver()
# ## adding the dh paramters
# just use obj.add() method to add a set of dh paramters in the kinematic chain, the first one to add is the base and the last one is the end effector, the method takes input a list \[ d, theta, a, alpha\]
#
# you can get the parameters from vrep http://www.forum.coppeliarobotics.com/viewtopic.php?f=9&t=5367
# In[3]:
#you can add the paramters wiht the variable as string
mtb.add([0,Symbol("theta1"),0.467,0])
#or you can add the variable as a Sympy symbol, in this case you can also shift the variable
mtb.add([0,Symbol("theta2")+sympy.pi/2,0.4005,0])
mtb.add([0.2,sympy.pi/3,0,Symbol("alpha3")])
#to get the dh matrices in symbolic form
T = mtb.calc_symbolic_matrices()
print(T)
#simplifing T
sympy.simplify(T)
#to get the intermediate the transormation matrices
print(mtb.T_list)
#to get the matrix with the constants substituted
T1 = mtb.calc_dh_matrix()
T2 = sympy.simplify(T1)
print(T2)
#printing T2 in latex
# a = sympy.latex(T2)
# print(a)
#to substitute with the variables and return a numpy array of floats, all variables must be subistituted
arr = mtb.get_numpy_matrix([ ["theta1", sympy.pi/2], ["theta2", sympy.pi/3] ,["alpha3", 0.5]])
print(arr)
# to call obj.get_numpy_matrix() you have to at least have called obj.calc_dh_matrix() and of coarse added your parameters :D
| 25.147541 | 205 | 0.730769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,062 | 0.692308 |
d7aa249c00c97006115a30d68bd7d12ff4ce1c5c | 1,646 | py | Python | src/python/pants/help/list_goals_integration_test.py | thamenato/pants | bc4a8fb3f07f6145649f02b06a1e5599aa28b36c | [
"Apache-2.0"
] | null | null | null | src/python/pants/help/list_goals_integration_test.py | thamenato/pants | bc4a8fb3f07f6145649f02b06a1e5599aa28b36c | [
"Apache-2.0"
] | null | null | null | src/python/pants/help/list_goals_integration_test.py | thamenato/pants | bc4a8fb3f07f6145649f02b06a1e5599aa28b36c | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.testutil.pants_integration_test import run_pants
def test_goals() -> None:
pants_run = run_pants(["goals"])
pants_run.assert_success()
assert "to get help for a particular goal" in pants_run.stdout
# Spot check a few core goals.
for goal in ["filedeps", "list", "roots", "validate"]:
assert goal in pants_run.stdout
def test_only_show_implemented_goals() -> None:
# Some core goals, such as `./pants test`, require downstream implementations to work
# properly. We should only show those goals when an implementation is provided.
goals_that_need_implementation = ["binary", "fmt", "lint", "run", "test"]
command = ["--pants-config-files=[]", "goals"]
not_implemented_run = run_pants(["--backend-packages=[]", *command])
not_implemented_run.assert_success()
for goal in goals_that_need_implementation:
assert goal not in not_implemented_run.stdout
implemented_run = run_pants(
[
"--backend-packages=['pants.backend.python', 'pants.backend.python.lint.isort']",
*command,
],
)
implemented_run.assert_success()
for goal in goals_that_need_implementation:
assert goal in implemented_run.stdout
def test_ignored_args() -> None:
# Test that arguments (some of which used to be relevant) are ignored.
pants_run = run_pants(["goals", "--all", "--graphviz", "--llama"])
pants_run.assert_success()
assert "to get help for a particular goal" in pants_run.stdout
| 38.27907 | 93 | 0.694411 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 703 | 0.427096 |
d7ac1e6f92f48c0869fd34f768c9769c6cff0aee | 78 | py | Python | tests/data/custom_loader2.py | cambiegroup/aizynthfinder | f5bafb2ac4749284571c05ae6df45b6f45cccd30 | [
"MIT"
] | 219 | 2020-06-15T08:04:53.000Z | 2022-03-31T09:02:47.000Z | tests/data/custom_loader2.py | cambiegroup/aizynthfinder | f5bafb2ac4749284571c05ae6df45b6f45cccd30 | [
"MIT"
] | 56 | 2020-08-14T14:50:42.000Z | 2022-03-22T12:49:06.000Z | tests/data/custom_loader2.py | cambiegroup/aizynthfinder | f5bafb2ac4749284571c05ae6df45b6f45cccd30 | [
"MIT"
] | 58 | 2020-06-15T13:36:42.000Z | 2022-03-21T06:18:02.000Z | def extract_smiles():
return ["c1ccccc1", "Cc1ccccc1", "c1ccccc1", "CCO"]
| 26 | 55 | 0.653846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.461538 |
d7ac26c759283e3913ac9e8cb81d30bdc1dc518a | 9,903 | py | Python | proa.py | andrehigher/ProA | bd05d231f6edb1c2f87a3c067ff0cf1f2867b9b5 | [
"Apache-2.0"
] | null | null | null | proa.py | andrehigher/ProA | bd05d231f6edb1c2f87a3c067ff0cf1f2867b9b5 | [
"Apache-2.0"
] | null | null | null | proa.py | andrehigher/ProA | bd05d231f6edb1c2f87a3c067ff0cf1f2867b9b5 | [
"Apache-2.0"
] | null | null | null | """ A Python Class
A simple Python graph class to do essential operations into graph.
"""
import operator
import math
from random import choice
from collections import defaultdict
import networkx as nx
class ProA():
def __init__(self, graph):
""" Initializes util object.
"""
self.__graph = graph
self.__relations = {}
self.__relations_distribution = defaultdict(int)
self.__hits1 = 0.0
self.__hits3 = 0.0
self.__hits5 = 0.0
self.__hits10 = 0.0
def clear(self):
""" Clear current graph
"""
self.__graph.clear()
def set_graph(self, graph):
""" A method to set graph.
"""
self.__graph = graph
def get_graph(self):
""" A method to get graph.
"""
return self.__graph
def get_hits1(self):
""" A method to get hits1.
"""
return self.__hits1
def get_hits3(self):
""" A method to get hits3.
"""
return self.__hits3
def get_hits5(self):
""" A method to get hits5.
"""
return self.__hits5
def get_hits10(self):
""" A method to get hits10.
"""
return self.__hits10
def set_relation(self, source, target, relation):
""" A method to set an edge label.
"""
self.__relations[(source,target)] = relation
def get_relation(self, source, target):
""" A method to return an edge label.
"""
try:
return self.__relations[(source,target)]
except KeyError:
try:
return self.__relations[(target,source)]
except KeyError:
pass
def get_domain(self, source):
""" Get domain from outgoings relations from source vertex.
"""
try:
dicti = defaultdict(int)
for neighbor in self.__graph.neighbors(source):
relation = self.get_relation(source, neighbor).split('/')
dicti[relation[1]] += 1
sorted_dicti = sorted(dicti.items(), key=operator.itemgetter(1))
return sorted_dicti[0][0]
except IndexError:
pass
def generate_distribution(self, source, target, length):
""" Generate relations distribution from a source to target.
"""
paths = nx.all_simple_paths(self.__graph, source, target, cutoff=length)
paths = list(paths)
print 'len', len(paths)
distribution = defaultdict(int)
for path in paths:
relations_list = list()
for i in range(0, len(path) - 1):
# print path[i], path[i + 1], self.get_relation(path[i], path[i+1])
relations_list.append(self.get_relation(path[i], path[i+1]))
# print 'list', relations_list
distribution[tuple(relations_list)] += 1
return distribution
def recur_generate_paths(self, g, node_initial, node_source, node_target, distribution, key, index, dicti, source, target):
""" Recursive method do generate dictionary from exists edges between v1 and v2 until the limit passed.
"""
if key[index] == self.get_relation(node_source, node_target):
index = index + 1
if len(key) > index:
for neighbor in g.neighbors(node_target):
self.recur_generate_paths(g, node_initial, node_target, neighbor, distribution, key, index, dicti, source, target)
else:
if source == node_initial and target == node_target:
pass
else:
dicti[self.get_relation(node_initial, node_target)] += 1
def generate_edges_between_paths(self, distribution, source, target):
""" Generate dictionary from exists edges between v1 and v2.
"""
path_distribution = {}
g = self.get_graph()
for key, value in distribution.iteritems():
print '-------- Calculating: ', key,'---------'
dicti = defaultdict(int)
for edge in g.edges():
try:
self.recur_generate_paths(g, edge[0], edge[0], edge[1], distribution, key, 0, dicti, source, target)
except IndexError:
pass
path_distribution[key] = dicti
return path_distribution
def generate_final_distribution(self, distribution, distribution_path):
""" Generate final distribution from possible edges.
"""
total_edges = float(sum(distribution.values()))
final_path_distribution = defaultdict(float)
for dist in distribution:
final_path_distribution[dist] += float(distribution[dist])/total_edges
final_distribution = defaultdict(float)
for path in distribution_path:
temp_total = 0
for path2 in distribution_path[path]:
temp_total += distribution_path[path][path2]
for path2 in distribution_path[path]:
final_distribution[path2] += (float(distribution_path[path][path2])/temp_total)*final_path_distribution[path]
return final_distribution
def evaluate(self, MMR, final_distribution_sorted, edge_to_be_predicted):
""" Evaluate MMR.
"""
count = 0.0
for relation, probability in final_distribution_sorted:
print 'Predicting', relation
if relation == edge_to_be_predicted:
count += 1.0
break
if relation == None and probability > 0.92:
count += 1.0
elif relation != None:
count += 1.0
if count == 0:
count = 20.0
else:
MMR += (1.0/count)
self.update_hits(count)
return MMR
def update_hits(self, count):
""" Evaluate Hits.
"""
if count == 1:
self.__hits1 += 1
if count <= 3:
self.__hits3 += 1
if count <= 5:
self.__hits5 += 1
if count <= 10:
self.__hits10 += 1
def calculate_entropy(self, source, target):
""" Calculates the entropy from source and target.
"""
prod = 1.0
for i in range(1, self.__graph.degree(target)+1):
prod = prod * (float(self.__graph.number_of_edges()-self.__graph.degree(source)-i+1)/float(self.__graph.number_of_edges()-i+1))
return -math.log(1 - prod, 2)
def calculate_common_neighbors(self, source, target):
""" Calculates the common neighbors from source and target.
"""
return sorted(nx.common_neighbors(self.__graph, source, target))
def calculate_resource_allocation(self, source, target):
""" Calculates the common neighbors from source and target.
"""
return nx.resource_allocation_index(self.__graph, [(source, target)])
def random_walk(self):
""" A method to get started a random walk into graph
selecting a node from random.
"""
print 'Number of nodes', self.__graph.number_of_nodes()
print 'Number of edges', self.__graph.number_of_edges()
# Get a node randomly
# Probability to get this first node is 1/N
seed = choice(self.__graph.nodes())
print 'Selected a node randomly', seed
print 'Degree', self.__graph.degree(seed)
print 'In degree', self.__graph.in_degree(seed)
print 'Out degree', self.__graph.out_degree(seed)
print 'Successors', self.__graph.successors(seed)
num_edges = len(self.__graph.edges())
prob_vertex = {}
entropy_vertex = {}
for possibility in self.__graph.nodes():
if possibility != seed:
if possibility not in self.__graph.successors(seed):
prod = 1.0
for i in range(self.__graph.degree(possibility)):
prod = prod * ((num_edges-self.__graph.degree(seed)+(-i+1)+1)/float(num_edges+(-i+1)+1))
prob_vertex[possibility] = 1 - prod
entropy_vertex[possibility] = -math.log(1 - prod)
prob_vertex = sorted(prob_vertex.items(), key=operator.itemgetter(1))
entropy_vertex = sorted(entropy_vertex.items(), key=operator.itemgetter(1))
print entropy_vertex
print seed
# Print edges with relation
# print DG.edges(data='relation')
def entropy(self, source, target):
""" A method to get started entropy calculation into graph
selecting a node.
"""
print('source:', source, 'target:', target, 'entropy:', self.calculate_entropy(source, target))
def predict_facts(self, source, target, length):
""" A method to predict facts based on shannon entropy.
"""
print(source, target)
print 'Selected a node', source
print 'Source Degree', self.__graph.degree(source)
print 'Neighbors', self.__graph.neighbors(source)
print 'Target Degree', self.__graph.degree(target)
print 'Neighbors', self.__graph.neighbors(target)
# print(sorted(nx.all_neighbors(self.__graph, source)))
print(len(self.__graph.edges()))
# print(self.__graph.edges())
count = 0.0
for edge in self.__graph.edges():
if edge[0] == 'teamplayssport' or edge[1] == 'teamplayssport':
count = count + 1
# print(edge)
# print 'In degree', self.__graph.in_degree(source)
# print 'Out degree', self.__graph.out_degree(source)
# print 'Successors', self.__graph.successors(source)
# print(sorted(nx.common_neighbors(self.__graph, source, target)))
print(count)
print(count/(len(self.__graph.edges())))
| 36.951493 | 139 | 0.58144 | 9,699 | 0.9794 | 0 | 0 | 0 | 0 | 0 | 0 | 2,240 | 0.226194 |
d7acd6c4288f7d67024c98e799deb3358d6fb4d1 | 1,214 | py | Python | fix-sfz.py | SpotlightKid/sfzparser | 9a339274bc44931965ebb36336f43c4fa5f1e498 | [
"MIT"
] | 5 | 2017-01-10T23:22:57.000Z | 2020-05-08T06:29:38.000Z | fix-sfz.py | SpotlightKid/sfzparser | 9a339274bc44931965ebb36336f43c4fa5f1e498 | [
"MIT"
] | 2 | 2017-03-28T20:14:27.000Z | 2017-03-29T07:54:59.000Z | fix-sfz.py | SpotlightKid/sfzparser | 9a339274bc44931965ebb36336f43c4fa5f1e498 | [
"MIT"
] | 2 | 2017-03-28T20:00:42.000Z | 2020-10-24T16:39:31.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
from os.path import basename, exists, isdir, splitext
from sfzparser import SFZParser
def main(args=None):
fn = args[0]
bn = splitext(basename(fn))[0]
parser = SFZParser(fn)
fixed = False
for name, sect in parser.sections:
# fix sample filename without directory prefix
if name == 'region' and 'sample' in sect and isdir(bn) and '/' not in sect['sample']:
print("Setting prefix for sample '{}' to '{}'.".format(sect['sample'], bn))
sect['sample'] = bn + '/' + sect['sample']
fixed = True
if fixed:
if not exists(fn + '.bak'):
shutil.copy(fn, fn + '.bak')
with open(args[0], 'w') as sfz:
for name, sect in parser.sections:
if name == 'comment':
sfz.write(sect + '\n')
else:
sfz.write("<{}>\n".format(name))
for key, value in sorted(sect.items()):
sfz.write(" {}={}\n".format(key, value))
else:
print("Nothing to fix.")
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv[1:] or 0))
| 28.904762 | 93 | 0.521417 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.214992 |
d7ad4862b9b6c2d525c8d2c7b82c06b8e22cd716 | 103 | py | Python | pytoolkit/applications/__init__.py | ak110/pytoolk | 8eef7e0add7bbc0ced1f1f1d82ed245388cc6684 | [
"MIT"
] | 26 | 2018-12-03T23:02:56.000Z | 2020-08-07T06:33:21.000Z | pytoolkit/applications/__init__.py | ak110/pytoolk | 8eef7e0add7bbc0ced1f1f1d82ed245388cc6684 | [
"MIT"
] | null | null | null | pytoolkit/applications/__init__.py | ak110/pytoolk | 8eef7e0add7bbc0ced1f1f1d82ed245388cc6684 | [
"MIT"
] | 5 | 2019-08-10T11:10:59.000Z | 2020-11-18T02:39:37.000Z | """Kerasの各種モデル。"""
# pylint: skip-file
# flake8: noqa
from . import darknet53, efficientnet, xception
| 17.166667 | 47 | 0.718447 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 65 | 0.555556 |
d7ada5e8230786197ee97f5ef4dfe811a9ba707b | 1,536 | py | Python | tests/rest/integration/test_common_endpoints.py | Palem1988/huobi | 400d07ea309dc7bc75fb953ddf507c3592678b05 | [
"MIT"
] | 19 | 2018-05-18T09:49:47.000Z | 2021-06-18T20:25:14.000Z | tests/rest/integration/test_common_endpoints.py | tianshanghong/huobi | b2d1a14ed83ac890cc63b513618c8f5ba2574512 | [
"MIT"
] | 14 | 2018-05-15T16:30:40.000Z | 2022-03-17T08:52:06.000Z | tests/rest/integration/test_common_endpoints.py | tianshanghong/huobi | b2d1a14ed83ac890cc63b513618c8f5ba2574512 | [
"MIT"
] | 9 | 2018-05-27T01:34:37.000Z | 2022-03-04T20:11:30.000Z | import unittest
from huobi.rest.client import HuobiRestClient
from huobi.rest.error import (
HuobiRestiApiError
)
import os
from os.path import join, dirname
from dotenv import load_dotenv
dotenv_path = join(dirname(dirname(dirname(dirname(__file__)))), '.env')
load_dotenv(dotenv_path)
class TestCommonEndpoint(unittest.TestCase):
def setUp(self):
access_key = os.environ['ACCESS_KEY']
secret_key = os.environ['SECRET_KEY']
self.client = HuobiRestClient(
access_key=access_key, secret_key=secret_key)
def tearDown(self):
self.client.close()
class TestCommonSymbols(TestCommonEndpoint):
def test_success(self):
res = self.client.symbols()
self.assertEqual(res.res.status_code, 200)
self.assertIn('data', res.data)
self.assertIsInstance(res.data['data'], list)
def test_authentication_fail(self):
client = HuobiRestClient(
access_key='1',
secret_key='2',
)
with self.assertRaises(HuobiRestiApiError):
client.accounts()
class TestCommonCurrencies(TestCommonEndpoint):
def test_success(self):
res = self.client.currencies()
self.assertEqual(res.res.status_code, 200)
def test_alias(self):
res = self.client.currencys()
self.assertEqual(res.res.status_code, 200)
class TestCommonTimestamp(TestCommonEndpoint):
def test_success(self):
res = self.client.timestamp()
self.assertEqual(res.res.status_code, 200)
| 25.6 | 72 | 0.682292 | 1,231 | 0.801432 | 0 | 0 | 0 | 0 | 0 | 0 | 48 | 0.03125 |
d7afcd461d31d762481c2b25c06e4d95251be3d2 | 8,477 | py | Python | gerritviewer/views/accounts.py | tivaliy/gerrit-quick-viewer | 86d6995faf2a1eeb611eb861fef2a8c856f4be86 | [
"Apache-2.0"
] | null | null | null | gerritviewer/views/accounts.py | tivaliy/gerrit-quick-viewer | 86d6995faf2a1eeb611eb861fef2a8c856f4be86 | [
"Apache-2.0"
] | null | null | null | gerritviewer/views/accounts.py | tivaliy/gerrit-quick-viewer | 86d6995faf2a1eeb611eb861fef2a8c856f4be86 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2017 Vitalii Kulanov
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
from flask import Blueprint, current_app, flash, Markup, render_template, \
request, redirect, url_for
from gerritclient import client
from gerritclient import error as client_error
from gerritviewer import common
from .forms import CreateUserAccountForm, EditContactInfoForm, \
QueryUserAccountForm
accounts = Blueprint('accounts', __name__)
@accounts.route('/accounts', methods=['GET', 'POST'])
def fetch():
form = QueryUserAccountForm()
gerrit_accounts = None
account_client = client.get_client('account',
connection=common.get_connection())
try:
if form.validate_on_submit():
gerrit_accounts = account_client.get_all(
form.query_string.data, detailed=form.details.data)
flash(Markup("Search results for <strong>'{}'</strong>: {}".format(
form.query_string.data,
"Nothing Found" if not gerrit_accounts else '')),
category='note')
except (requests.ConnectionError, client_error.HTTPError) as error:
current_app.logger.error(error)
flash(error, category='error')
return render_template('accounts/accounts.html',
gerrit_url=common.get_gerrit_url(),
gerrit_version=common.get_version(),
entry_category='accounts',
entries=gerrit_accounts,
form=form)
@accounts.route('/accounts/<account_id>')
def fetch_single(account_id):
account = {}
account_client = client.get_client('account',
connection=common.get_connection())
try:
account = account_client.get_by_id(
account_id, detailed=request.args.get('details', False))
account['is_active'] = account_client.is_active(account_id)
account['membership'] = account_client.get_membership(account_id)
action = request.args.get('action')
if action:
account_actions = {'enable': account_client.enable,
'disable': account_client.disable}
account_actions[action](account_id)
flash(Markup("Account with <strong>ID={}</strong> was "
"successfully <strong>{}d</strong>".format(
account_id, action)), category='note')
return redirect(url_for('accounts.fetch_single',
account_id=account_id))
except (requests.ConnectionError, client_error.HTTPError) as error:
current_app.logger.error(error)
flash(error, category='error')
return render_template('accounts/profile.html',
gerrit_url=common.get_gerrit_url(),
gerrit_version=common.get_version(),
entry_category='accounts',
entry_item=account,
entry_item_name=account.get('name'))
@accounts.route('/accounts/contact/<account_id>', methods=['GET', 'POST'])
def edit_contact_info(account_id):
form = EditContactInfoForm()
account = {}
account_client = client.get_client('account',
connection=common.get_connection())
try:
account = account_client.get_by_id(account_id, detailed=False)
current_status = get_account_status(account_id)
if form.validate_on_submit():
fullname, username = form.fullname.data, form.username.data
status = form.status.data
response = {}
if account.get('name') != fullname:
response['full name'] = account_client.set_name(account_id,
fullname)
if username and account.get('username') != username:
response['username'] = account_client.set_username(account_id,
username)
if status != current_status:
response['status'] = account_client.set_status(account_id,
status) or ''
if response:
flash(Markup("The following parameters were successfully "
"updated: {0}".format(", ".join(
":: ".join(_) for _ in response.items()))),
category='note')
return redirect(url_for('accounts.fetch_single',
account_id=account_id))
except (requests.ConnectionError, client_error.HTTPError) as error:
current_app.logger.error(error)
flash(error, category='error')
return render_template('accounts/contacts.html',
gerrit_url=common.get_gerrit_url(),
gerrit_version=common.get_version(),
entry_category='accounts',
entry_item=account,
entry_item_name=account.get('name'),
form=form)
@accounts.route('/accounts/ssh/<account_id>')
def ssh(account_id):
account_client = client.get_client('account',
connection=common.get_connection())
account, ssh_keys = {}, []
try:
account = account_client.get_by_id(account_id, detailed=False)
ssh_keys = account_client.get_ssh_keys(account_id)
except (requests.ConnectionError, client_error.HTTPError) as error:
current_app.logger.error(error)
flash(error, category='error')
return render_template('accounts/ssh.html',
gerrit_url=common.get_gerrit_url(),
gerrit_version=common.get_version(),
entry_category='accounts',
entry_item=account,
entry_item_name=account.get('name'),
entries=ssh_keys)
@accounts.route('/accounts/create', methods=['GET', 'POST'])
def create():
form = CreateUserAccountForm()
if form.validate_on_submit():
account_client = client.get_client('account',
connection=common.get_connection())
data = {k: v for k, v in (('username', form.username.data),
('name', form.fullname.data),
('email', form.email.data)) if v}
try:
response = account_client.create(form.username.data, data=data)
msg = Markup("A new user account '<strong>{0}</strong>' "
"with ID={1} was successfully created.".format(
response['username'], response['_account_id']))
flash(msg, category='note')
return redirect(url_for('accounts.fetch_single',
account_id=response['_account_id']))
except (requests.ConnectionError, client_error.HTTPError) as error:
current_app.logger.error(error)
flash(error, category='error')
return render_template('accounts/create.html',
gerrit_url=common.get_gerrit_url(),
gerrit_version=common.get_version(),
form=form)
# Status of account is only available since gerrit 2.14,
# so we have to fetch it in a proper way for all versions
def get_account_status(account_id):
account_client = client.get_client('account',
connection=common.get_connection())
try:
current_status = account_client.get_status(account_id)
except client_error.HTTPError:
current_status = None
return current_status
| 46.070652 | 79 | 0.575911 | 0 | 0 | 0 | 0 | 7,037 | 0.830129 | 0 | 0 | 1,673 | 0.197358 |
d7afd3383399873e7b024e174d09b83c6155eba6 | 2,928 | py | Python | infra/tools/git-push-speed.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | null | null | null | infra/tools/git-push-speed.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | null | null | null | infra/tools/git-push-speed.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Measures the average time used by git-push command in CQ based on data from
chromium-cq-status.appspot.com."""
import argparse
import json
import logging
import sys
import urllib
CQ_STATUS_QUERY_URL = 'http://chromium-cq-status.appspot.com/query'
def load_options():
parser = argparse.ArgumentParser(description=sys.modules['__main__'].__doc__)
parser.add_argument('--project', default='chromium', help='Project name.')
parser.add_argument('--count', '-c', default=1000, type=int, required=True,
help='Number of issues to average over.')
parser.add_argument('--verbose', '-v', action='store_true',
help='Print debugging messages to console')
return parser.parse_args()
def get_stats(filters, cursor=None):
url = '%s/%s' % (CQ_STATUS_QUERY_URL, '/'.join(filters))
if cursor:
url += '?cursor=%s' % cursor
logging.debug('Loading %s', url)
data = json.load(urllib.urlopen(url))
return data['results'], data['cursor'], data['more']
def main():
options = load_options()
filters = []
if options.project:
filters += ['project=%s' % options.project]
logging.basicConfig(level=logging.DEBUG if options.verbose else logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# We search for committed timestamps first, because this guarantees that all
# these issues will also have comitting timestamp. The opposite is not always
# true - some issues with committing timestamp may not be comitted yet.
logging.info('Searching for committed issues')
issues = []
cursor = None
more = True
while len(issues) < options.count and more:
results, cursor, more = get_stats(filters + ['action=patch_committed'],
cursor)
for result in results:
issues.append({'issue': result['fields']['issue'],
'patchset': result['fields']['patchset'],
'committed': result['fields']['timestamp']})
if len(issues) > options.count:
issues = issues[:options.count]
logging.debug('Searching committing timestamp for found issues')
for issue in issues:
results, _, _ = get_stats(filters + ['action=patch_committing',
'issue=%s' % issue['issue'],
'patchset=%s' % issue['patchset']])
assert len(results) >= 1, 'Incorrect number of results: %s' % results
issue['committing'] = results[0]['fields']['timestamp']
logging.debug(issues)
push_times = [i['committed'] - i['committing'] for i in issues]
average_push_time = sum(push_times) / len(push_times)
print 'Average git push time is %.2f seconds' % average_push_time
if __name__ == '__main__':
sys.exit(main())
| 36.148148 | 79 | 0.653005 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,193 | 0.407445 |
d7b09b3899eab74c7da7d05d1a738239da6a6f8c | 93 | py | Python | backoffice/transactions/utils.py | AlejandroUPC/pythonmicroservices | 9d42bd6dfd9847ad4a8e6029e808de927292c251 | [
"MIT"
] | null | null | null | backoffice/transactions/utils.py | AlejandroUPC/pythonmicroservices | 9d42bd6dfd9847ad4a8e6029e808de927292c251 | [
"MIT"
] | null | null | null | backoffice/transactions/utils.py | AlejandroUPC/pythonmicroservices | 9d42bd6dfd9847ad4a8e6029e808de927292c251 | [
"MIT"
] | null | null | null | import random
def create_random_id():
return str(random.randint(100000,999999999999999)) | 23.25 | 54 | 0.795699 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d7b1f098471299492d3a164aae5bda72d5a2c99e | 113 | py | Python | bitmovin_api_sdk/encoding/infrastructure/kubernetes/configuration/__init__.py | hofmannben/bitmovin-api-sdk-python | 71aae5cd8a31aa0ad54ca07a6f546a624e8686a9 | [
"MIT"
] | null | null | null | bitmovin_api_sdk/encoding/infrastructure/kubernetes/configuration/__init__.py | hofmannben/bitmovin-api-sdk-python | 71aae5cd8a31aa0ad54ca07a6f546a624e8686a9 | [
"MIT"
] | 1 | 2020-07-06T07:13:43.000Z | 2020-07-06T07:13:43.000Z | bitmovin_api_sdk/encoding/infrastructure/kubernetes/configuration/__init__.py | hofmannben/bitmovin-api-sdk-python | 71aae5cd8a31aa0ad54ca07a6f546a624e8686a9 | [
"MIT"
] | 1 | 2020-07-06T07:07:26.000Z | 2020-07-06T07:07:26.000Z | from bitmovin_api_sdk.encoding.infrastructure.kubernetes.configuration.configuration_api import ConfigurationApi
| 56.5 | 112 | 0.920354 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d7b416d06c92253082c35eb75c67b3efcc2cb569 | 3,427 | py | Python | python/cudf/cudf/core/column/methods.py | shridharathi/cudf | 664712eb124e35dd2e8f28c287adbb48fc8049d6 | [
"Apache-2.0"
] | null | null | null | python/cudf/cudf/core/column/methods.py | shridharathi/cudf | 664712eb124e35dd2e8f28c287adbb48fc8049d6 | [
"Apache-2.0"
] | null | null | null | python/cudf/cudf/core/column/methods.py | shridharathi/cudf | 664712eb124e35dd2e8f28c287adbb48fc8049d6 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020, NVIDIA CORPORATION.
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Union, overload
from typing_extensions import Literal
import cudf
if TYPE_CHECKING:
from cudf.core.column import ColumnBase
class ColumnMethodsMixin:
_column: ColumnBase
_parent: Optional[Union["cudf.Series", "cudf.Index"]]
def __init__(
self,
column: ColumnBase,
parent: Union["cudf.Series", "cudf.Index"] = None,
):
self._column = column
self._parent = parent
@overload
def _return_or_inplace(
self, new_col, inplace: Literal[False], expand=False, retain_index=True
) -> Union["cudf.Series", "cudf.Index"]:
...
@overload
def _return_or_inplace(
self, new_col, expand: bool = False, retain_index: bool = True
) -> Union["cudf.Series", "cudf.Index"]:
...
@overload
def _return_or_inplace(
self, new_col, inplace: Literal[True], expand=False, retain_index=True
) -> None:
...
@overload
def _return_or_inplace(
self,
new_col,
inplace: bool = False,
expand: bool = False,
retain_index: bool = True,
) -> Optional[Union["cudf.Series", "cudf.Index"]]:
...
def _return_or_inplace(
self, new_col, inplace=False, expand=False, retain_index=True
):
"""
Returns an object of the type of the column owner or updates the column
of the owner (Series or Index) to mimic an inplace operation
"""
if inplace:
if self._parent is not None:
self._parent._mimic_inplace(
self._parent.__class__._from_table(
cudf._lib.table.Table({self._parent.name: new_col})
),
inplace=True,
)
return None
else:
self._column._mimic_inplace(new_col, inplace=True)
return None
else:
if self._parent is None:
return new_col
if expand or isinstance(
self._parent, (cudf.DataFrame, cudf.MultiIndex)
):
# This branch indicates the passed as new_col
# is a Table
table = new_col
if isinstance(self._parent, cudf.BaseIndex):
idx = self._parent._constructor_expanddim._from_table(
table=table
)
idx.names = None
return idx
else:
return self._parent._constructor_expanddim(
data=table._data, index=self._parent.index
)
elif isinstance(self._parent, cudf.Series):
if retain_index:
return cudf.Series(
new_col,
name=self._parent.name,
index=self._parent.index,
)
else:
return cudf.Series(new_col, name=self._parent.name)
elif isinstance(self._parent, cudf.BaseIndex):
return cudf.core.index.as_index(
new_col, name=self._parent.name
)
else:
return self._parent._mimic_inplace(new_col, inplace=False)
| 31.440367 | 79 | 0.534578 | 3,170 | 0.925007 | 0 | 0 | 713 | 0.208054 | 0 | 0 | 387 | 0.112927 |
d7b668f8518c7dd4a692c3180e8c31313bbcc189 | 8,139 | py | Python | spa/tests/dom_helper.py | fergalmoran/dss.api | d1b9fb674b6dbaee9b46b9a3daa2027ab8d28073 | [
"BSD-2-Clause"
] | null | null | null | spa/tests/dom_helper.py | fergalmoran/dss.api | d1b9fb674b6dbaee9b46b9a3daa2027ab8d28073 | [
"BSD-2-Clause"
] | null | null | null | spa/tests/dom_helper.py | fergalmoran/dss.api | d1b9fb674b6dbaee9b46b9a3daa2027ab8d28073 | [
"BSD-2-Clause"
] | null | null | null | from selenium.common.exceptions import NoSuchElementException, TimeoutException
class DomHelper(object):
driver = None
waiter = None
def open_page(self, url):
self.driver.get(url)
def reload_page(self):
self.driver.refresh()
def print_el(self, element):
print('tag: ' + element.tag_name + ' id: ' + element.get_attribute('id') + ' class: ' + element.get_attribute('class') + ' text: ' + element.text)
def get_el(self, selector):
if isinstance(selector, str):
return self.driver.find_element_by_css_selector(selector)
else:
return selector
def get_els(self, selector):
if isinstance(selector, str):
return self.driver.find_elements_by_css_selector(selector)
else:
return selector
def get_child_el(self, parent, selector):
try:
return parent.find_element_by_css_selector(selector)
except NoSuchElementException:
return None
def get_child_els(self, parent, selector):
return parent.find_elements_by_css_selector(selector)
def is_el_present(self, selector):
try:
self.driver.find_element_by_css_selector(selector)
return True
except NoSuchElementException:
return False
def verify_el_present(self, selector):
if not self.is_el_present(selector):
raise Exception('Element %s not found' % selector)
def is_el_visible(self, selector):
return self.get_el(selector).is_displayed()
def click_button(self, selector):
if self.driver.name == 'iPhone':
self.driver.execute_script('$("%s").trigger("tap")' % (selector))
else:
self.get_el(selector).click()
def enter_text_field(self, selector, text):
text_field = self.get_el(selector)
text_field.clear()
text_field.send_keys(text)
def select_checkbox(self, selector, name, deselect=False):
found_checkbox = False
checkboxes = self.get_els(selector)
for checkbox in checkboxes:
if checkbox.get_attribute('name') == name:
found_checkbox = True
if not deselect and not checkbox.is_selected():
checkbox.click()
if deselect and checkbox.is_selected():
checkbox.click()
if not found_checkbox:
raise Exception('Checkbox %s not found.' % (name))
def select_option(self, selector, value):
found_option = False
options = self.get_els(selector)
for option in options:
if option.get_attribute('value') == str(value):
found_option = True
option.click()
if not found_option:
raise Exception('Option %s not found' % (value))
def get_selected_option(self, selector):
options = self.get_els(selector)
for option in options:
if option.is_selected():
return option.get_attribute('value')
def is_option_selected(self, selector, value):
options = self.get_els(selector)
for option in options:
if option.is_selected() != (value == option.get_attribute('value')):
print(option.get_attribute('value'))
return False
return True
def is_text_equal(self, selector, text):
return self.get_el(selector).text == text
def verify_inputs_checked(self, selector, checked):
checkboxes = self.get_els(selector)
for checkbox in checkboxes:
name = checkbox.get_attribute('name')
if checkbox.is_selected() != (name in checked):
raise Exception('Input isnt checked as expected - %s' % (name))
def verify_option_selected(self, selector, value):
if not self.is_option_selected(selector, value):
raise Exception('Option isnt selected as expected')
def verify_radio_value(self, selector, value):
value = str(value)
radios = self.get_els(selector)
for radio in radios:
radio_value = radio.get_attribute('value')
if radio.is_selected() and radio_value != value:
raise Exception('Radio with value %s is checked and shouldnt be' % radio_value)
elif not radio.is_selected() and radio_value == value:
raise Exception('Radio with value %s isnt checked and should be' % radio_value)
def verify_text_field(self, selector, text):
text_field = self.get_el(selector)
value = text_field.get_attribute('value')
if value != text:
raise Exception('Text field contains %s, not %s' % (value, text))
def verify_text_value(self, selector, value):
text_field = self.get_el(selector)
if text_field.get_attribute('value') != value:
raise Exception('Value of %s not equal to "%s" - instead saw "%s"' % (selector, value, text_field.get_attribute('value')))
def verify_text_of_el(self, selector, text):
if not self.is_text_equal(selector, text):
raise Exception('Text of %s not equal to "%s" - instead saw "%s"' % (selector, text, self.get_el(selector).text))
def verify_text_in_els(self, selector, text):
els = self.get_els(selector)
found_text = False
for el in els:
if text in el.text:
found_text = True
if not found_text:
raise Exception('Didnt find text: %s' % (text))
def verify_text_not_in_els(self, selector, text):
els = self.get_els(selector)
found_text = False
for el in els:
if text in el.text:
found_text = True
if found_text:
raise Exception('Found text: %s' % (text))
def is_button_enabled(self, selector):
return (self.get_el(selector).get_attribute('disabled') == 'false')
def check_title(self, title):
return self.driver.title == title or self.driver.title == 'eatdifferent.com: ' + title
def wait_for(self, condition):
self.waiter.until(lambda driver: condition())
def check_num(self, selector, num):
els = self.get_els(selector)
return len(els) == num
def wait_for_num_els(self, selector, num):
try:
self.waiter.until(lambda driver: self.check_num(selector, num))
except TimeoutException:
raise Exception('Never saw %s number of els for %s' % (num, selector))
def wait_for_visible(self, selector):
try:
self.waiter.until(lambda driver: self.is_el_visible(selector))
except TimeoutException:
raise Exception('Never saw element %s become visible' % (selector))
def wait_for_hidden(self, selector):
try:
self.waiter.until(lambda driver: not self.is_el_visible(selector))
except TimeoutException:
raise Exception('Never saw element %s become hidden' % (selector))
def wait_for_button(self, selector):
try:
self.waiter.until(lambda driver: self.is_button_enabled(selector))
except TimeoutException:
raise Exception('Never saw button %s enabled' % (selector))
def wait_for_text(self, selector, text):
try:
self.waiter.until(lambda driver: self.is_text_equal(selector, text))
except TimeoutException:
raise Exception('Never saw text %s for %s' % (text, selector))
def wait_for_el(self, selector):
try:
self.waiter.until(lambda driver: self.is_el_present(selector))
except TimeoutException:
raise Exception('Never saw element %s' % (selector))
def wait_for_title(self, title):
try:
self.waiter.until(lambda driver: self.check_title(title))
except TimeoutException:
raise Exception('Never saw title change to %s' % (title))
def __init__(self, driver, waiter):
self.driver = driver
self.waiter = waiter | 38.03271 | 154 | 0.615678 | 8,057 | 0.989925 | 0 | 0 | 0 | 0 | 0 | 0 | 798 | 0.098046 |
d7b71be599126f3487b3d6c940f7c879926c6044 | 1,491 | py | Python | gravityspytools/search/models.py | Gravity-Spy/gravityspytools | 23ef83e36ed934f7c39440bf43f4d5c7b7b4abb0 | [
"BSD-3-Clause"
] | 4 | 2019-03-11T12:32:24.000Z | 2020-12-01T06:31:39.000Z | gravityspytools/search/models.py | johnwick211/gravityspytools | 23ef83e36ed934f7c39440bf43f4d5c7b7b4abb0 | [
"BSD-3-Clause"
] | 19 | 2018-01-29T21:28:39.000Z | 2020-07-14T18:38:23.000Z | gravityspytools/search/models.py | johnwick211/gravityspytools | 23ef83e36ed934f7c39440bf43f4d5c7b7b4abb0 | [
"BSD-3-Clause"
] | 4 | 2018-02-02T16:47:16.000Z | 2020-12-01T06:31:49.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django.contrib.postgres.fields import ArrayField
# Create your models here.
"""
class NewSearch(models.Model):
SINGLEVIEW = 'similarityindex'
MULTIVIEW = 'updated_similarity_index'
DATABASE_CHOICES = (
(MULTIVIEW, 'Multiview Model'),
(SINGLEVIEW, 'Single View Model'),
)
H1 = "\'H1\'"
H1L1 = "\'H1\', \'L1\'"
H1L1V1 = "\'H1\', \'L1\', \'V1\'"
L1 = "\'L1\'"
L1V1 = "\'L1\', \'V1\'"
V1 = "\'V1\'"
IFO_CHOICES = (
(H1L1, 'H1 L1'),
(H1, 'H1'),
(H1L1V1, 'H1 L1 V1'),
(L1, 'L1'),
(L1V1, 'L1 V1'),
(V1, 'V1'),
)
database = models.ChoiceField(choices=DATABASE_CHOICES,)
howmany = models.IntegerField(label='How many similar images would you like to return', max_value=500, min_value=1)
zooid = models.CharField(label = 'This is the Zooniverse assigned random ID of the image (an integer value)', max_length=10, required=False)
imageid = models.CharField(label='The GravitySpy uniqueid (this is the 10 character hash that uniquely identifies all gravity spy images)', max_length=10, required=False)
ifo = models.ChoiceField(choices=IFO_CHOICES,)
user = models.ForeignKey(User)
new_subjects = ArrayField(models.CharField(max_length=10), blank=True)
created_at = models.DateTimeField(auto_now_add=True)
"""
| 31.723404 | 174 | 0.646546 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,319 | 0.884641 |
d7b7548d8ddf5ce24d29bb3004f66eaa2e7d0d79 | 776 | py | Python | simple_menu/simple_encoder.py | mexclub/simple_menu | 9e37b2101c74f451270ceab0f50e19457562d20a | [
"MIT"
] | null | null | null | simple_menu/simple_encoder.py | mexclub/simple_menu | 9e37b2101c74f451270ceab0f50e19457562d20a | [
"MIT"
] | null | null | null | simple_menu/simple_encoder.py | mexclub/simple_menu | 9e37b2101c74f451270ceab0f50e19457562d20a | [
"MIT"
] | 3 | 2020-01-07T14:36:31.000Z | 2021-01-04T15:37:30.000Z | from machine import Pin
class simple_encoder():
def __init__(self, ra, rb, pin_irq):
self.ra = ra
self.rb = rb
self.counter = 0
self.ra.irq(trigger=pin_irq, handler=self.turn)
self.rb.irq(trigger=pin_irq, handler=self.turn)
def turn(self, pin):
changed = False
enc_turn = 0
while (not self.ra.value()) or (not self.rb.value()):
if not changed:
if self.ra.value() == pin.value():
enc_turn = 2
self.counter = self.counter + 1
if self.rb.value() == pin.value():
enc_turn = 1
self.counter = self.counter - 1
changed = True
return True
| 32.333333 | 62 | 0.487113 | 747 | 0.962629 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d7b9607848de2585415aac4ac43c5891e9257879 | 7,928 | py | Python | Final/prediction/nb1.py | NaguGowda/machin-learning- | dd04f44a06d6f83e58ed6eb4d69db09620040e49 | [
"Apache-2.0"
] | null | null | null | Final/prediction/nb1.py | NaguGowda/machin-learning- | dd04f44a06d6f83e58ed6eb4d69db09620040e49 | [
"Apache-2.0"
] | null | null | null | Final/prediction/nb1.py | NaguGowda/machin-learning- | dd04f44a06d6f83e58ed6eb4d69db09620040e49 | [
"Apache-2.0"
] | null | null | null | # Example of Naive Bayes implemented from Scratch in Python
import csv
import random
import math
import xgboost as xgb
import matplotlib.pyplot as plt
import numpy as np
def loadCsv(filename):
lines = csv.reader(open(filename, "r"))
dataset = list(lines)
for i in range(len(dataset)):
dataset[i] = [float(x) for x in dataset[i]]
return dataset
def loadDataset_ckd(filename, trainingSet=[]):
lines = csv.reader(open(filename, "r"))
dataset = list(lines)
#print(len(dataset),range(len(dataset)))
for x in range(len(dataset)):
for y in range(15):
dataset[x][y] = float(dataset[x][y])
trainingSet.append(dataset[x])
def loadDataset_ckd1(filename, testSet=[]):
lines1 = csv.reader(open(filename, "r"))
dataset1 = list(lines1)
#print(len(dataset1),range(len(dataset1)))
for x in range(len(dataset1)):
for y in range(15):
dataset1[x][y] = float(dataset1[x][y])
testSet.append(dataset1[x])
def loadDataset_ml(filename, trainingSet=[]):
lines = csv.reader(open(filename, "r"))
dataset = list(lines)
for x in range(len(dataset)):
for y in range(9):
dataset[x][y] = float(dataset[x][y])
trainingSet.append(dataset[x])
def loadDataset_hd(filename, trainingSet=[]):
lines = csv.reader(open(filename, "r"))
dataset = list(lines)
for x in range(len(dataset)):
for y in range(12):
dataset[x][y] = float(dataset[x][y])
trainingSet.append(dataset[x])
def loadDataset_ml1(filename, testSet=[]):
lines1 = csv.reader(open(filename, "r"))
dataset1 = list(lines1)
#print(len(dataset1),range(len(dataset1)))
for x in range(len(dataset1)):
for y in range(9):
dataset1[x][y] = float(dataset1[x][y])
testSet.append(dataset1[x])
def loadDataset_hd1(filename, testSet=[]):
lines1 = csv.reader(open(filename, "r"))
dataset1 = list(lines1)
#print(len(dataset1),range(len(dataset1)))
for x in range(len(dataset1)):
for y in range(12):
dataset1[x][y] = float(dataset1[x][y])
testSet.append(dataset1[x])
def splitDataset(dataset, splitRatio):
trainSize = int(len(dataset) * splitRatio)
trainSet = []
copy = list(dataset)
while len(trainSet) < trainSize:
index = random.randrange(len(copy))
trainSet.append(copy.pop(index))
return [trainSet, copy]
def separateByClass(dataset):
separated = {}
for i in range(len(dataset)):
vector = dataset[i]
if (vector[-1] not in separated):
separated[vector[-1]] = []
separated[vector[-1]].append(vector)
return separated
def mean(numbers):
return sum(numbers)/float(len(numbers))
def stdev(numbers):
avg = mean(numbers)
variance = sum([pow(x-avg,2) for x in numbers])/float(len(numbers)-1)
return math.sqrt(variance)
def summarize(dataset):
summaries = [(mean(attribute), stdev(attribute)) for attribute in zip(*dataset)]
del summaries[-1]
return summaries
def summarizeByClass(dataset):
separated = separateByClass(dataset)
summaries = {}
#print(separated)
for classValue, instances in separated.items():
#print(instances)
summaries[classValue] = summarize(instances)
return summaries
def calculateProbability(x, mean, stdev):
#print(x,mean,stdev)
if(x==0 and mean==0 and stdev==0):
x = 1
mean = 1
stdev = 1
#print(x,mean,stdev)
part2 = (2*math.pow(stdev,2))
if(part2==0) :
part2 = 0.1
#print(part2)
exponent = math.exp(-(math.pow(x-mean,2)/part2))
part3 = (math.sqrt(2*math.pi) * stdev)
if(part3==0) :
part3 = 0.1
fin = (1 / part3) * exponent
return fin
def calculateClassProbabilities(summaries, inputVector):
probabilities = {}
for classValue, classSummaries in summaries.items():
probabilities[classValue] = 1
for i in range(len(classSummaries)):
mean, stdev = classSummaries[i]
x = inputVector[i]
probabilities[classValue] *= calculateProbability(x, mean, stdev)
return probabilities
def predict(summaries, inputVector):
probabilities = calculateClassProbabilities(summaries, inputVector)
bestLabel, bestProb = None, -1
for classValue, probability in probabilities.items():
if bestLabel is None or probability > bestProb:
bestProb = probability
bestLabel = classValue
return bestLabel
def getPredictions(summaries, testSet):
predictions = []
for i in range(len(testSet)):
result = predict(summaries, testSet[i])
predictions.append(result)
return predictions
def getAccuracy(testSet, predictions):
correct = 0
for i in range(len(testSet)):
if testSet[i][-1] == predictions[i]:
correct += 1
return (correct/float(len(testSet))) * 100.0
def main():
print ('\n~~~~~~~~~~~');
#checking of presence of ckd disease
# prepare data
matched_count = 0 ;
total_datas = 0
trainingSet=[]
testSet=[]
loadDataset_ckd('dataset_ckd_train.csv', trainingSet)
total_datas = total_datas+int(repr(len(trainingSet)))
loadDataset_ckd1('dataset_ckd_test.csv', testSet)
print ('Train set of ckd: ',repr(len(trainingSet)))
#print ('Train set: ', trainingSet)
#print ('Test set: ', repr(len(testSet)))
print ('Input for CKD disease related parameters :\n ',testSet)
summaries = summarizeByClass(trainingSet)
matched_count = matched_count+int(repr(len(summaries)))
print('matches: ',repr(len(summaries)))
# test model
predictions = getPredictions(summaries, testSet)
#print('> predicted=' , predictions)
print('> disease presence =' , predictions )
accuracy = getAccuracy(testSet, predictions)
#print('Accuracy: {0}%').format(accuracy)
#print('Accuracy: ',accuracy)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print ('\n~~~~~~~~~~~');
#checking of presence of diabetes disease
trainingSet=[]
testSet=[]
loadDataset_ml('dataset_diabetes_train.csv', trainingSet)
total_datas = total_datas+int(repr(len(trainingSet)))
loadDataset_ml1('dataset_diabetes_test.csv', testSet)
print ('Train set of diabetes: ',repr(len(trainingSet)))
print ('Input for Diabetes disease related parameters :\n ',testSet)
#print(trainingSet)
#print(testSet)
# prepare model
summaries = summarizeByClass(trainingSet)
#print(summaries)
matched_count = matched_count+int(repr(len(summaries)))
print('matches: ',repr(len(summaries)))
# test model
predictions = getPredictions(summaries, testSet)
#print('> predicted=' , predictions)
print('> disease presence =' , predictions )
accuracy = getAccuracy(testSet, predictions)
#print('Accuracy: {0}%').format(accuracy)
#print('Accuracy: ',accuracy)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print ('\n~~~~~~~~~~~');
#checking of presence of heart disease
trainingSet=[]
testSet=[]
loadDataset_hd('dataset_heartdisease_train.csv', trainingSet)
total_datas = total_datas+int(repr(len(trainingSet)))
loadDataset_hd1('dataset_heartdisease_test.csv', testSet)
print ('Train set of heart disease: ',repr(len(trainingSet)))
print ('Input for heart disease related parameters :\n ',testSet)
summaries = summarizeByClass(trainingSet)
#print(summaries)
matched_count = matched_count+int(repr(len(summaries)))
print('matches: ',repr(len(summaries)))
# test model
predictions = getPredictions(summaries, testSet)
#print('> predicted=' , predictions)
print('> disease presence =' , predictions )
accuracy = getAccuracy(testSet, predictions)
#print('Accuracy: {0}%').format(accuracy)
#print('Accuracy: ',accuracy)
print('Total Datas',total_datas,'Matched Accuracy: ',matched_count)
main() | 31.839357 | 101 | 0.645686 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,778 | 0.224268 |
d7ba1f7895f8a28f6a1b180ed84e6be8b3b520b7 | 7,367 | py | Python | wingstructure/data/wing.py | helo9/wingstructure | ff82eb0b87e3b5ececff39895f959bfef468e7c3 | [
"MIT"
] | 7 | 2019-01-02T16:47:31.000Z | 2020-10-10T10:06:15.000Z | wingstructure/data/wing.py | helo9/wingstructure | ff82eb0b87e3b5ececff39895f959bfef468e7c3 | [
"MIT"
] | 9 | 2019-01-13T20:11:23.000Z | 2019-10-10T21:38:58.000Z | wingstructure/data/wing.py | helo9/wingstructure | ff82eb0b87e3b5ececff39895f959bfef468e7c3 | [
"MIT"
] | 1 | 2018-12-27T14:20:36.000Z | 2018-12-27T14:20:36.000Z | from collections import namedtuple
from copy import deepcopy
import numpy as np
# data type for 3D-Coordinates
Point = namedtuple('Point', 'x y z')
# monkey patch function
def serializesection(self):
data = dict(self._asdict())
data['pos'] = dict(self.pos._asdict())
return data
class _Wing:
"""
A data structue for multi trapez wing definitions.
"""
_Section = namedtuple('Section', ['pos', 'chord', 'twist', 'airfoil'])
_Section.serialize = serializesection
def __init__(self, pos=(0.0, 0.0, 0.0)):
self.x, self.y, self.z = pos
self.sections = []
def append(self, pos=(0.0, 0.0, 0.0), chord=1.0, twist=0.0, airfoil=''):
self.sections.append(
self._Section(Point(*pos), chord, twist, airfoil)
)
def get_mac(self):
"""Calculate mean aerodynamic chord.
Returns
-------
pos: arraylike
leading edge position of mean aerodynamic chord
mac: float
mac length
Notes
-----
Implements formulas reported in http://dx.doi.org/10.1063/1.4951901
"""
pos = np.zeros(3)
area = 0.0
mac = 0.0
lastsec = None
for sec in self.sections:
if lastsec is None:
lastsec = sec
continue
# short aliases for usage in formulas
x1, x2 = lastsec.pos.x, sec.pos.x
y1, y2 = lastsec.pos.y, sec.pos.y
c1, c2 = lastsec.chord, sec.chord
# segment properties
S = (c1+c2)/2 * (y2-y1)
λ = c2 / c1
segmac = 2/3 * c1 * (λ**2 + λ + 1) / (λ + 1)
segx = x1 + (x2-x1) * (1+2*λ)/(3+3*λ)
segy = y1 + (y2-y1) * (1+2*λ)/(3+3*λ)
# sum up values weighted by segment area
pos += np.array([segx, segy, 0]) * S
mac += segmac * S
area += S
lastsec = sec
pos /= area
mac /= area
return pos, mac
@property
def span(self):
"""Get span of wing."""
return 2*max((sec.pos.y for sec in self.sections))
@property
def area(self):
"""Get wing area."""
span_positions = [sec.pos.y for sec in self.sections]
chord_lengths = [sec.chord for sec in self.sections]
area = np.trapz(chord_lengths, span_positions)
return 2*area
@property
def aspectratio(self):
"""Get aspect ratio."""
return self.span**2/self.area
@property
def mac(self):
"""Get mac length"""
return self.get_mac()[1]
class Wing(_Wing):
"""A object representing lift generating airplane parts.
Parameters
----------
pos: float
coordinate system offset
rot: float
"""
_ControlSurface = namedtuple('ControlSurface',
['pos1', 'pos2', 'depth1', 'depth2', 'cstype'])
def __init__(self, pos=(0.0, 0.0, 0.0)):
super().__init__(pos)
self.controlsurfaces = {}
def add_controlsurface(self, name, pos1, pos2, depth1, depth2, cstype):
"""Add controlsurface to Wing instance
Parameters
----------
name : str
identifier for control surface
pos1 : float
starting position (spanwise)
pos2 : float
end position (spanwise)
depth1 : float
start depth or chordwise position (depends on type)
depth2 : float
end depth or chordwise position (depends on type)
cstype : str
use one of the following type strings: flap, spoiler, airbrake
"""
self.controlsurfaces[name] = self._ControlSurface(
pos1, pos2, depth1, depth2, cstype)
@property
def chords(self):
return np.array([sec.chord for sec in self.sections])
@property
def xs(self):
return np.array([sec.pos.x for sec in self.sections])
@property
def ys(self):
return np.array([sec.pos.y for sec in self.sections])
@property
def twists(self):
return np.array([sec.twist for sec in self.sections])
@property
def airfoils(self):
return np.array([sec.airfoil for sec in self.sections])
def within_control(self, csname, y):
y = np.abs(y)
try:
cs = self.controlsurfaces[csname]
return (cs.pos1 <= y) & (y <= cs.pos2)
except KeyError:
raise KeyError('{} is not a control surface'.format(csname))
def within_airbrake(self, ys):
ys = np.abs(ys)
within_ab = np.full_like(ys, False, dtype=bool)
for cs in self.controlsurfaces.values():
if cs.cstype in ('airbrake', 'spoiler'):
within_tmp = (cs.pos1 <= ys) & (cs.pos2 >= ys)
within_ab = np.where(within_tmp, True, within_ab)
return within_ab
def serialize(self):
data = {
'pos': {'x': self.x, 'y': self.y, 'z': self.z},
'sections': [deepcopy(sec.serialize()) for sec in self.sections],
'controlsurfaces': {name: dict(cs._asdict()) for name, cs in self.controlsurfaces.items()}
}
return data
@classmethod
def load_from_file(cls, filename):
import yaml
with open(filename, 'r') as datfile:
wingdata = yaml.safe_load(datfile)
return cls.deserialize(wingdata['wing'])
@classmethod
def deserialize(cls, adict):
"""Create new Wing instance from dict
Parameters
----------
adict : dict
dictionary containing wing data
Returns
-------
Wing
instance object
"""
# create Wing instance
wing = cls(pos=Point(**adict['pos']))
# generate sections
for secdict in adict['sections']:
secdict_ = deepcopy(secdict)
secdict_['pos'] = Point(**secdict_['pos'])
wing.append(**secdict_)
# add control surfaces
try:
for name, csdict in adict['controlsurfaces'].items():
wing.add_controlsurface(name, **csdict)
except KeyError:
pass
return wing
def plot(self):
import matplotlib.pyplot as plt
# draw centerline
#plt.axvline(x=0, linestyle='-.')
# draw sections
x_positions = []
y_positions = []
chord_lengths = []
for section in self.sections:
x = section.pos.x+self.x
y = section.pos.y
chord = section.chord
plt.plot((y, y), (x, x+chord), 'r')
x_positions.append(x)
y_positions.append(y)
chord_lengths.append(chord)
y_positions = np.array(y_positions)
# draw leading edge
plt.plot(y_positions, np.array(x_positions), 'b' )
# draw trailing edge
plt.plot(y_positions, np.array(x_positions)+np.array(chord_lengths), 'b')
# format
plt.axis('equal')
plt.axis('off')
plt.gca().invert_yaxis()
plt.xlim(-max(y_positions)/100, max(y_positions)+1)
| 26.692029 | 102 | 0.529659 | 7,071 | 0.95878 | 0 | 0 | 2,083 | 0.282441 | 0 | 0 | 2,025 | 0.274576 |
d7ba596dc0cf678dc2c3cf189ee8dd90496c1aa2 | 3,103 | py | Python | crossdock/problems/experiment/crossdock_15_25_15_5_5.py | krerkkiat/icpr-2019-public | f3023c009f3335ce58204a45c270cfeb6ef19367 | [
"BSD-3-Clause"
] | null | null | null | crossdock/problems/experiment/crossdock_15_25_15_5_5.py | krerkkiat/icpr-2019-public | f3023c009f3335ce58204a45c270cfeb6ef19367 | [
"BSD-3-Clause"
] | null | null | null | crossdock/problems/experiment/crossdock_15_25_15_5_5.py | krerkkiat/icpr-2019-public | f3023c009f3335ce58204a45c270cfeb6ef19367 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Cross-docking truck data.
This data is generated by a generate_dataset.py script.
Created: Feb 18, 2019 at 07:30:02 PM
Copyright (c) 2022, Krerkkiat Chusap
This souce code is licensed under BSD 3-Clause "New" or "Revised" License (see LICENSE for details).
"""
from pathlib import Path
# Problem data.
name = Path(__file__).stem
inbound_gate_count = 5
outbound_gate_count = 5
# Parameters used to generate this data.
number_of_total_product_types = 15
product_per_truck_rate = 0.35
possible_inbound_total_product = [250, 340]
# Truck data.
_inbound_truck_raw_data = [
[0, 0, 0, 0, 0, 0, 184, 12, 0, 0, 3, 0, 51, 0, 0],
[76, 0, 0, 0, 0, 0, 0, 0, 41, 73, 42, 0, 20, 88, 0],
[0, 113, 74, 0, 83, 18, 0, 0, 0, 0, 30, 22, 0, 0, 0],
[0, 41, 15, 7, 0, 0, 44, 113, 0, 0, 0, 120, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 33, 211, 0, 6, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 58, 0, 0, 9, 38, 23, 0, 0, 93, 119, 0],
[69, 0, 0, 26, 0, 20, 0, 0, 205, 0, 0, 0, 6, 0, 14],
[0, 62, 82, 71, 0, 92, 2, 0, 0, 0, 0, 0, 31, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 54, 50, 0, 0, 22, 124, 0],
[0, 0, 0, 141, 0, 0, 0, 0, 27, 0, 3, 0, 113, 10, 46],
[0, 0, 0, 143, 0, 0, 53, 21, 0, 0, 0, 33, 0, 0, 0],
[0, 76, 32, 47, 0, 7, 0, 0, 0, 87, 0, 0, 0, 0, 91],
[0, 0, 42, 0, 0, 115, 47, 0, 0, 0, 0, 0, 0, 46, 0],
[9, 47, 0, 0, 0, 93, 4, 0, 46, 141, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 60, 0, 46, 0, 47, 39, 98, 50, 0, 0, 0],
]
_outbound_truck_raw_data = [
[0, 16, 11, 41, 5, 28, 11, 32, 12, 13, 2, 2, 16, 17, 5],
[9, 56, 11, 16, 9, 2, 2, 4, 99, 20, 9, 33, 1, 2, 15],
[1, 7, 8, 0, 1, 22, 19, 7, 5, 8, 0, 14, 50, 10, 13],
[2, 5, 4, 6, 1, 12, 4, 47, 2, 11, 2, 6, 4, 29, 0],
[7, 4, 6, 8, 58, 12, 5, 4, 1, 0, 3, 6, 14, 22, 5],
[4, 4, 16, 5, 0, 4, 1, 11, 2, 6, 2, 9, 3, 2, 1],
[15, 10, 24, 23, 7, 5, 3, 27, 33, 16, 10, 9, 44, 6, 10],
[5, 0, 5, 14, 1, 0, 35, 30, 36, 41, 16, 4, 4, 20, 1],
[4, 21, 12, 25, 2, 7, 13, 14, 8, 30, 2, 4, 6, 27, 6],
[22, 42, 1, 47, 8, 33, 0, 7, 13, 2, 20, 17, 50, 9, 4],
[10, 15, 3, 55, 17, 1, 23, 2, 11, 56, 9, 7, 0, 12, 8],
[14, 18, 1, 11, 11, 5, 19, 27, 34, 6, 1, 3, 4, 31, 0],
[1, 1, 45, 29, 11, 14, 62, 2, 5, 10, 1, 7, 13, 7, 23],
[4, 7, 26, 46, 15, 3, 1, 1, 13, 21, 1, 0, 4, 20, 1],
[4, 17, 7, 12, 11, 19, 7, 2, 4, 69, 3, 18, 3, 16, 16],
[4, 5, 8, 10, 8, 1, 28, 59, 2, 23, 7, 10, 11, 2, 1],
[18, 6, 12, 7, 1, 27, 2, 5, 4, 28, 7, 27, 2, 26, 7],
[6, 2, 10, 3, 13, 61, 2, 1, 52, 5, 1, 1, 45, 39, 5],
[1, 4, 4, 12, 5, 24, 16, 7, 16, 22, 15, 1, 4, 6, 2],
[1, 35, 2, 3, 1, 28, 55, 7, 16, 9, 4, 6, 9, 2, 8],
[0, 13, 2, 13, 3, 5, 1, 11, 2, 12, 18, 1, 25, 17, 1],
[4, 5, 5, 15, 8, 4, 31, 1, 12, 7, 18, 30, 3, 34, 2],
[4, 31, 4, 3, 2, 16, 10, 22, 51, 1, 18, 5, 2, 9, 12],
[2, 13, 13, 22, 0, 9, 30, 36, 9, 2, 6, 5, 19, 22, 3],
[12, 2, 5, 9, 3, 3, 33, 0, 16, 1, 1, 0, 0, 0, 2],
]
# Derived data.
inbound_truck_count = len(_inbound_truck_raw_data)
outbound_truck_count = len(_outbound_truck_raw_data)
total_truck_count = inbound_truck_count + outbound_truck_count
| 41.373333 | 100 | 0.477602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 373 | 0.120206 |
d7ba6d2580da27c50c90b24f37ff13d8bc2e1d42 | 1,121 | py | Python | CIFAR10/train.py | aidezone/pytorch-learning | c9c5c185e4de71503002083e271c6b19321520c1 | [
"MIT"
] | null | null | null | CIFAR10/train.py | aidezone/pytorch-learning | c9c5c185e4de71503002083e271c6b19321520c1 | [
"MIT"
] | null | null | null | CIFAR10/train.py | aidezone/pytorch-learning | c9c5c185e4de71503002083e271c6b19321520c1 | [
"MIT"
] | null | null | null | import base
# 开始真正的模型训练
import torch
import torch.nn as nn
# 实例化网络
net = base.MyCustomNet()
# 定义数据集的训练迭代轮次
max_epoch = 5
# 定义学习率
learning_rate = 0.001
# 定义loss函数、定义优化器
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
# 进行模型训练
for epoch in range(max_epoch): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(base.trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}')
running_loss = 0.0
print('Finished Training')
PATH = './data/cifar_net.pth'
torch.save(net.state_dict(), PATH)
| 22.42 | 79 | 0.6405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 458 | 0.376955 |
d7bae06c160ec85487c4a5b05e1c32a823c606b8 | 14,934 | py | Python | coord2vec/models/baselines/coord2vec_model.py | jonzarecki/coord2vec | 4f267fdd87af7b3d3558ca834b88e9ab7c309c18 | [
"Apache-2.0"
] | null | null | null | coord2vec/models/baselines/coord2vec_model.py | jonzarecki/coord2vec | 4f267fdd87af7b3d3558ca834b88e9ab7c309c18 | [
"Apache-2.0"
] | null | null | null | coord2vec/models/baselines/coord2vec_model.py | jonzarecki/coord2vec | 4f267fdd87af7b3d3558ca834b88e9ab7c309c18 | [
"Apache-2.0"
] | 1 | 2021-01-25T09:21:17.000Z | 2021-01-25T09:21:17.000Z | import os
import random
from typing import List, Tuple, Callable
import torch
from ignite.contrib.handlers import ProgressBar, LRScheduler
from ignite.handlers import ModelCheckpoint
from sklearn.base import BaseEstimator, TransformerMixin
from torch import nn
from torch import optim
from torch.nn.modules.loss import _Loss, L1Loss
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, MultiStepLR
from torch.utils.data import DataLoader
from ignite.metrics import Metric, RunningAverage
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
from coord2vec.common.itertools import flatten
from coord2vec.common.mtl.metrics import EmbeddingData, DistanceCorrelation, RootMeanSquaredError
from coord2vec import config
from coord2vec.config import HALF_TILE_LENGTH, TENSORBOARD_DIR
from coord2vec.feature_extraction.features_builders import FeaturesBuilder
from coord2vec.image_extraction.tile_image import generate_static_maps, render_multi_channel
from coord2vec.image_extraction.tile_utils import build_tile_extent
from coord2vec.models.architectures import dual_fc_head, multihead_model, simple_cnn, simple_head
from coord2vec.models.baselines.tensorboard_utils import TrainExample, \
create_summary_writer, add_metrics_to_tensorboard, add_embedding_visualization, build_example_image_figure
from coord2vec.models.data_loading.tile_features_loader import TileFeaturesDataset
from coord2vec.models.losses import MultiheadLoss
from coord2vec.models.resnet import wide_resnet50_2, resnet18, resnet50, resnet34
class Coord2Vec(BaseEstimator, TransformerMixin):
"""
Wrapper for the coord2vec algorithm
Project's "main"
"""
def __init__(self, feature_builder: FeaturesBuilder, n_channels: int, losses: List[_Loss] = None,
losses_weights: List[float] = None, log_loss: bool = False, exponent_heads: bool = False,
cnn_model: Callable = resnet34, model_save_path: str = None,
embedding_dim: int = 128, multi_gpu: bool = False, cuda_device: int = 0, lr: float = 1e-4,
lr_steps: List[int] = None, lr_gamma: float = 0.1):
"""
Args:
feature_builder: FeatureBuilder to create features with the features were created with
n_channels: the number of channels in the input images
losses: a list of losses to use. must be same length of the number of features
losses_weights: weights to give the different losses. if None then equals weights of 1
log_loss: whether to use the log function on the loss before back propagation
embedding_dim: dimension of the embedding to create
multi_gpu: whether to use more than one GPU or not
cuda_device: if multi_gpu==False, choose the GPU to work on
lr: learning rate for the Adam optimizer
lr_steps: Training steps in which we apply a multiply by lr_gamma to the LR
lr_gamma: The multiplier we multiply the LR
"""
self.model_save_path = model_save_path
self.losses_weights = losses_weights
self.log_loss = log_loss
self.exponent_head = exponent_heads
self.embedding_dim = embedding_dim
self.cnn_model = cnn_model
self.n_channels = n_channels
self.multi_gpu = multi_gpu
if not multi_gpu:
self.device = torch.device(f'cuda:{cuda_device}' if torch.cuda.is_available() else 'cpu')
else:
self.device = torch.device(f'cuda' if torch.cuda.is_available() else 'cpu')
# self.device = 'cpu'
self.feature_names = feature_builder.features_names
self.n_features = len(self.feature_names)
# create L1 losses if not supplied
self.losses = [L1Loss() for _ in range(self.n_features)] if losses is None else losses
assert len(self.losses) == self.n_features, "Number of losses must be equal to number of features"
# create the model
self.model = self._build_model(cnn_model, self.n_channels, self.n_features)
if multi_gpu:
self.model = nn.DataParallel(self.model)
self.model.to(self.device)
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
self.step_scheduler = MultiStepLR(self.optimizer, milestones=lr_steps, gamma=lr_gamma)
def fit(self, train_dataset: TileFeaturesDataset,
val_dataset: TileFeaturesDataset = None,
epochs: int = 10,
batch_size: int = 10,
num_workers: int = 10,
evaluate_every: int = 300,
save_every: int = 1000):
"""
Args:
train_dataset: The dataset object for training data
val_dataset: The dataset object for validation data, optional
epochs: number of epochs to train the network
batch_size: batch size for the network
num_workers: number of workers for the network
evaluate_every: every how many steps to run evaluation
save_every: every how many steps to save the model
Returns:
a trained pytorch model
"""
# create data loader
train_data_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
num_workers=num_workers)
if val_dataset is not None:
val_data_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True,
num_workers=num_workers)
else:
val_data_loader = None
# create the model
criterion = MultiheadLoss(self.losses, use_log=self.log_loss, weights=self.losses_weights).to(self.device)
# create tensorboard
writer = create_summary_writer(self.model, train_data_loader, log_dir=TENSORBOARD_DIR)
def multihead_loss_func(y_pred, y):
return criterion(y_pred[1], torch.split(y, 1, dim=1))[0]
def multihead_output_transform(x, y, y_pred, *args):
embedding, output = y_pred
y_pred_tensor = torch.stack(output).squeeze(2).transpose(0, 1)
y_tensor = y
data = x
with torch.no_grad():
loss, multi_losses = criterion(output, torch.split(y, 1, dim=1))
return data, embedding, loss, multi_losses, y_pred_tensor, y_tensor
eval_metrics = {'rmse': RootMeanSquaredError(), # 'corr': DistanceCorrelation(),
# 'embedding_data': EmbeddingData()
}
train_metrics = {'rmse': RootMeanSquaredError() # , 'corr': DistanceCorrelation()
}
trainer = create_supervised_trainer(self.model, self.optimizer, multihead_loss_func, device=self.device,
output_transform=multihead_output_transform)
for name, metric in train_metrics.items(): # Calculate metrics also on trainer
metric.attach(trainer, name)
evaluator = create_supervised_evaluator(self.model,
metrics=eval_metrics,
device=self.device,
output_transform=multihead_output_transform)
if self.model_save_path is not None:
# do we want to use it ? from Ignite
checkpoint_handler = ModelCheckpoint(self.model_save_path, 'checkpoint',
save_interval=save_every,
n_saved=10, require_empty=False, create_dir=True)
pbar = ProgressBar()
# RunningAverage(output_transform=lambda x: x[2])
pbar.attach(trainer)
scheduler = LRScheduler(self.step_scheduler)
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
trainer.add_event_handler(Events.EPOCH_COMPLETED, checkpoint_handler, {'mymodel': self.model})
@trainer.on(Events.EPOCH_STARTED)
def init_state_params(engine):
engine.state.plusplus_ex, engine.state.plusminus_ex = [None] * self.n_features, [None] * self.n_features
engine.state.minusminus_ex, engine.state.minusplus_ex = [None] * self.n_features, [None] * self.n_features
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
writer.add_scalar('General/LR', scheduler.get_param(), global_step=engine.state.iteration)
_, embedding, loss, multi_losses, y_pred_tensor, y_tensor = engine.state.output
images_batch, features_batch = engine.state.batch
plusplus_ex, plusminus_ex = engine.state.plusplus_ex, engine.state.plusminus_ex
minusminus_ex, minusplus_ex = engine.state.minusminus_ex, engine.state.minusplus_ex
writer.add_scalar('General/Train Loss', loss, global_step=engine.state.iteration)
feat_diff = (y_pred_tensor - y_tensor) # / y_tensor + 1
feat_sum = y_pred_tensor + y_tensor
for j in range(self.n_features):
writer.add_scalar(f'Multiple Losses/{self.feature_names[j]}', multi_losses[j],
global_step=engine.state.iteration)
for i in range(len(images_batch)):
itm_diff, itm_sum = feat_diff[i][j].item(), feat_sum[i][j].item()
itm_pred, itm_actual = y_pred_tensor[i][j].item(), y_tensor[i][j].item()
ex = TrainExample(images_batch[i], predicted=itm_pred, actual=itm_actual, sum=itm_sum,
diff=itm_diff)
if minusminus_ex[j] is None or minusminus_ex[j].sum > itm_sum:
engine.state.minusminus_ex[j] = ex
elif plusminus_ex[j] is None or plusminus_ex[j].diff < itm_diff:
engine.state.plusminus_ex[j] = ex
elif minusplus_ex[j] is None or minusplus_ex[j].diff > itm_diff:
engine.state.minusplus_ex[j] = ex
elif plusplus_ex[j] is None or plusplus_ex[j].sum < itm_sum:
engine.state.plusplus_ex[j] = ex
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
global_step = engine.state.iteration
metrics = engine.state.metrics # already attached to the trainer engine to save
# can add more metrics here
add_metrics_to_tensorboard(metrics, writer, self.feature_names, global_step, log_str="train")
# plot min-max examples
plusplus_ex, plusminus_ex = engine.state.plusplus_ex, engine.state.plusminus_ex
minusminus_ex, minusplus_ex = engine.state.minusminus_ex, engine.state.minusplus_ex
for j in range(self.n_features):
if plusplus_ex[j] is None:
continue
writer.add_figure(tag=f"{self.feature_names[j]}/plusplus",
figure=build_example_image_figure(plusplus_ex[j]), global_step=global_step)
writer.add_figure(tag=f"{self.feature_names[j]}/plusminus",
figure=build_example_image_figure(plusminus_ex[j]), global_step=global_step)
writer.add_figure(tag=f"{self.feature_names[j]}/minusminus",
figure=build_example_image_figure(minusminus_ex[j]), global_step=global_step)
writer.add_figure(tag=f"{self.feature_names[j]}/minusplus",
figure=build_example_image_figure(minusplus_ex[j]), global_step=global_step)
@trainer.on(Events.ITERATION_COMPLETED)
def log_validation_results(engine):
global_step = engine.state.iteration
if global_step % evaluate_every == 0:
evaluator.run(val_data_loader)
metrics = evaluator.state.metrics
# can add more metrics here
add_metrics_to_tensorboard(metrics, writer, self.feature_names, global_step, log_str="validation")
# add_embedding_visualization(writer, metrics, global_step)
if global_step % save_every == 0:
self.save_trained_model(f"{self.model_save_path}/{global_step}_model.pth")
trainer.run(train_data_loader, max_epochs=epochs)
return self.model
def load_trained_model(self, path: str):
"""
load a trained model
Args:
path: path of the saved torch NN
Returns:
the trained model in 'path'
"""
checkpoint = torch.load(path)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.embedding_dim = checkpoint['embedding_dim']
self.losses = checkpoint['losses']
self.model = self.model.to(self.device)
return self
def _model_to(self):
self.model = self.model.to(self.device)
# from apex import amp
# if self.amp:
# model, optimizer = amp.initialize(model.to('cuda'), optimizer, opt_level="O1")
def save_trained_model(self, path: str):
"""
save a trained model
Args:
path: path of the saved torch NN
"""
self.model = self.model.to('cpu')
os.makedirs(os.path.dirname(path), exist_ok=True)
torch.save({
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'embedding_dim': self.embedding_dim,
'losses': self.losses,
}, path)
self.model = self.model.to(self.device)
def transform(self, coords: List[Tuple[float, float]]) -> torch.tensor:
"""
get the embedding of coordinates
Args:
coords: a list of tuple like (lat, long) to predict on
Returns:
A tensor of shape [n_coords, embedding_dim]
"""
# create tiles using the coords
s = generate_static_maps(config.tile_server_dns_noport, config.tile_server_ports)
images = []
for coord in coords:
ext = build_tile_extent(coord, radius_in_meters=HALF_TILE_LENGTH)
image = render_multi_channel(s, ext)
images.append(image)
images = torch.tensor(images).float().to(self.device)
# predict the embedding
embeddings, output = self.model(images)
return embeddings.to('cpu')
def _build_model(self, cnn_model, n_channels, n_heads):
model = cnn_model(n_channels, self.embedding_dim)
# model = simple_cnn(n_channels, self.embedding_dim)
heads = [simple_head(self.embedding_dim) for _ in range(n_heads)]
model = multihead_model(model, heads)
return model
| 47.56051 | 118 | 0.64169 | 13,371 | 0.895339 | 0 | 0 | 4,320 | 0.289273 | 0 | 0 | 3,262 | 0.218428 |
d7bb7502a2e24be8eaf3bc4172023cbaaaa79d16 | 1,716 | py | Python | PythonExercicios/ex045.py | Caio-Moretti/115.Exercicios-Python | 7e66fb1f44ea3eb4ade63f37d843242ac42ade84 | [
"MIT"
] | null | null | null | PythonExercicios/ex045.py | Caio-Moretti/115.Exercicios-Python | 7e66fb1f44ea3eb4ade63f37d843242ac42ade84 | [
"MIT"
] | null | null | null | PythonExercicios/ex045.py | Caio-Moretti/115.Exercicios-Python | 7e66fb1f44ea3eb4ade63f37d843242ac42ade84 | [
"MIT"
] | null | null | null | from random import choice
from time import sleep
jokenpo = ['Pedra', 'Papel', 'Tesoura']
jokenposter_stainger = choice(jokenpo)
jogador = int(input('Qual a sua jogada?'
'\n1. Pedra'
'\n2. Papel'
'\n3. Tesoura'
'\nEscolha: '))
print('\nJO...')
sleep(1)
print('KEN...')
sleep(1)
print('PO!!!'
'\n ')
if jogador == 1:
print('Você: Pedra')
elif jogador == 2:
print('Você: Papel')
elif jogador == 3:
print('Você: Tesoura')
else:
print('Escolha uma opção válida.')
print('Jokenposter Stainger: {}'.format(jokenposter_stainger))
sleep(2)
print('--' * 20)
if jokenposter_stainger == 'Pedra' and jogador == 1:
print('Empate!')
w = 0
elif jokenposter_stainger == 'Pedra' and jogador == 2:
print('Você ganhou!')
w = 2
elif jokenposter_stainger == 'Pedra' and jogador == 3:
print('Você perdeu!')
w = 1
elif jokenposter_stainger == 'Papel' and jogador == 1:
print('Você perdeu!')
w = 1
elif jokenposter_stainger == 'Papel' and jogador == 2:
print('Empate!')
w = 0
elif jokenposter_stainger == 'Papel' and jogador == 3:
print('Você ganhou!')
w = 2
elif jokenposter_stainger == 'Tesoura' and jogador == 1:
print('Você ganhou!')
w = 2
elif jokenposter_stainger == 'Tesoura' and jogador == 2:
print('Você perdeu!')
w = 1
elif jokenposter_stainger == 'Tesoura' and jogador == 3:
print('Empate!')
w = 0
if w == 0:
print('Jokenposter Stainger: Vamo de novo! Ta com medinho?')
elif w == 1:
print('Jokenposter Stainger: OTÁRIO ')
elif w == 2:
print('Jokenposter Stainger: TAAAVA DEMORAANDO! Revanche!')
else:
print(' ')
print('--' * 20)
| 23.189189 | 64 | 0.597902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 557 | 0.322152 |
d7bba15bb04519ffa2576bb1d237b14798175a4e | 102 | py | Python | demo_backend/text_generator/apps.py | WallaceLiu/Web-Full-Stack-Practice | 2551ab12d7b500387cc511f1cb7428aef4d59ab4 | [
"BSD-2-Clause"
] | 33 | 2019-03-04T12:56:47.000Z | 2021-11-27T02:29:45.000Z | demo_backend/text_generator/apps.py | WallaceLiu/Web-Full-Stack-Practice | 2551ab12d7b500387cc511f1cb7428aef4d59ab4 | [
"BSD-2-Clause"
] | 1 | 2021-06-10T21:39:42.000Z | 2021-06-10T21:39:42.000Z | demo_backend/text_generator/apps.py | WallaceLiu/Web-Full-Stack-Practice | 2551ab12d7b500387cc511f1cb7428aef4d59ab4 | [
"BSD-2-Clause"
] | 9 | 2019-03-04T13:15:51.000Z | 2021-08-24T01:24:34.000Z | from django.apps import AppConfig
class TextGeneratorConfig(AppConfig):
name = 'text_generator'
| 17 | 37 | 0.784314 | 65 | 0.637255 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.156863 |
d7bd08d0f54f08c80ef0751f3fb01d94c3153c59 | 760 | py | Python | tools/find_protoc.py | Kill-Console/xresloader | dc11f4cffecb9f9d706c91a83d5b70a1d418cfc5 | [
"MIT"
] | 219 | 2015-05-21T03:27:46.000Z | 2022-03-22T08:39:41.000Z | tools/find_protoc.py | Kill-Console/xresloader | dc11f4cffecb9f9d706c91a83d5b70a1d418cfc5 | [
"MIT"
] | 9 | 2016-04-26T07:11:26.000Z | 2021-12-30T02:45:00.000Z | tools/find_protoc.py | Kill-Console/xresloader | dc11f4cffecb9f9d706c91a83d5b70a1d418cfc5 | [
"MIT"
] | 59 | 2015-05-19T01:29:28.000Z | 2022-03-22T08:39:43.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import stat
protoc_exec = None
def find_protoc():
global protoc_exec
if protoc_exec is not None:
return protoc_exec
script_dir = os.path.dirname(os.path.realpath(__file__))
if sys.platform[0:5].lower() == "linux":
protoc_exec = os.path.join(script_dir, 'linux_x86_64', 'protoc')
elif sys.platform[0:6].lower() == "darwin":
protoc_exec = os.path.join(script_dir, 'macos_x86_64', 'protoc')
else:
protoc_exec = os.path.join(script_dir, 'windows_x86_64', 'protoc.exe')
os.chmod(protoc_exec, stat.S_IRWXU + stat.S_IRWXG + stat.S_IRWXO)
return protoc_exec
""" run as a executable """
if __name__ == "__main__":
print(find_protoc())
| 29.230769 | 78 | 0.665789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.215789 |
d7bd3f72caaeb6260431ad1a59089ba8056f0635 | 43,253 | py | Python | inventory/views.py | yellowjaguar5/lnldb | dea7708f5e4e103ef6ef968c9f3a4deaa58861c5 | [
"MIT"
] | 5 | 2017-09-25T21:24:59.000Z | 2021-12-18T17:08:13.000Z | inventory/views.py | yellowjaguar5/lnldb | dea7708f5e4e103ef6ef968c9f3a4deaa58861c5 | [
"MIT"
] | 304 | 2015-03-24T17:44:22.000Z | 2022-03-29T14:09:41.000Z | inventory/views.py | yellowjaguar5/lnldb | dea7708f5e4e103ef6ef968c9f3a4deaa58861c5 | [
"MIT"
] | 10 | 2017-10-24T02:18:12.000Z | 2021-09-20T20:40:25.000Z | from io import BytesIO
import json
import re
import requests
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from django.core.exceptions import PermissionDenied
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.db.models import Count
from django.forms.models import inlineformset_factory
from django.http import (HttpResponse, HttpResponseBadRequest, HttpResponseNotFound,
HttpResponseRedirect)
from django.shortcuts import get_object_or_404, render
from django.template.loader import render_to_string
from django.urls.base import reverse
from django.utils import timezone
from xhtml2pdf import pisa
from . import forms, models
from events.models import Location
from emails.generators import DefaultLNLEmailGenerator
from pdfs.views import link_callback
NUM_IN_PAGE = 25
@login_required
def view_all(request):
""" Lists all items in LNL's inventory (no longer maintained - read-only) """
if not request.user.has_perm('inventory.view_equipment'):
raise PermissionDenied
context = {}
inv = models.EquipmentClass.objects.order_by('name') \
.annotate(item_count=Count('items'))
categories = models.EquipmentCategory.objects.all()
paginator = Paginator(inv, NUM_IN_PAGE)
page = request.GET.get('page')
try:
context['inv'] = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
context['inv'] = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
context['inv'] = paginator.page(paginator.num_pages)
context['h2'] = "Inventory: Item List"
context['cats'] = categories
return render(request, 'inventory/list.html', context)
@login_required
def cat(request, category_id):
"""
List items by category
:param category_id: The primary key value of the equipment category
"""
if not request.user.has_perm('inventory.view_equipment'):
raise PermissionDenied
context = {}
category = get_object_or_404(models.EquipmentCategory, pk=category_id)
if 'exclusive' in request.GET and request.GET['exclusive']:
inv = models.EquipmentClass.objects.filter(category=category)
context['exclusive'] = True
else:
inv = models.EquipmentClass.objects.filter(category__in=category.get_descendants_inclusive)
context['exclusive'] = False
inv = inv.order_by('category__level', 'category__name', 'name') \
.annotate(item_count=Count('items'))
subcategories = models.EquipmentCategory.objects.all()
paginator = Paginator(inv, NUM_IN_PAGE)
page = request.GET.get('page')
try:
context['inv'] = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
context['inv'] = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
context['inv'] = paginator.page(paginator.num_pages)
context['h2'] = "Inventory: %s" % category.name
context['cat'] = category
context['cats'] = subcategories
return render(request, 'inventory/list.html', context)
# Inventory is currently read-only now that we are using Snipe
# @login_required
# def quick_bulk_add(request, type_id):
# if request.method != 'POST':
# return HttpResponseBadRequest('Invalid operation')
# if 'num_to_add' not in request.POST:
# return HttpResponseBadRequest('Missing parameters')
#
# try:
# num_to_add = int(request.POST['num_to_add'])
# except (ValueError, TypeError):
# return HttpResponseBadRequest('Bad parameters')
#
# try:
# e_type = models.EquipmentClass.objects.get(pk=int(type_id))
# except models.EquipmentClass.DoesNotExist:
# return HttpResponseNotFound()
#
# if not request.user.has_perm('inventory.add_equipmentitem', e_type):
# raise PermissionDenied
#
# models.EquipmentItem.objects.bulk_add_helper(e_type, num_to_add)
#
# messages.add_message(request, messages.SUCCESS,
# "%d items added and saved. Now editing." % num_to_add)
#
# return HttpResponseRedirect(reverse('inventory:bulk_edit',
# kwargs={'type_id': type_id}))
#
#
# @login_required
# def quick_bulk_edit(request, type_id):
# e_type = get_object_or_404(models.EquipmentClass, pk=int(type_id))
#
# if not request.user.has_perm('inventory.change_equipmentitem', e_type):
# raise PermissionDenied
#
# can_delete = request.user.has_perm('inventory.delete_equipmentitem', e_type)
# fs_factory = inlineformset_factory(models.EquipmentClass, models.EquipmentItem,
# form=forms.EquipmentItemForm,
# extra=0, can_delete=can_delete)
#
# if request.method == 'POST':
# formset = fs_factory(request.POST, request.FILES, instance=e_type)
# if formset.is_valid():
# formset.save()
# messages.add_message(request, messages.SUCCESS,
# "Items saved.")
# return HttpResponseRedirect(reverse('inventory:type_detail',
# kwargs={'type_id': type_id}))
# else:
# formset = fs_factory(instance=e_type)
# qs = models.EquipmentCategory.possible_locations()
# for form in formset:
# form.fields['home'].queryset = qs
# return render(request, "formset_grid.html", {
# 'msg': "Bulk inventory edit for '%s'" % e_type.name,
# "formset": formset,
# 'form_show_errors': True
# })
#
#
# @login_required
# def type_edit(request, type_id):
# try:
# e_type = models.EquipmentClass.objects.get(pk=int(type_id))
# except models.EquipmentClass.DoesNotExist:
# return HttpResponseNotFound()
#
# if not request.user.has_perm('inventory.change_equipmentclass', e_type):
# raise PermissionDenied
#
# if request.method == 'POST':
# form = forms.EquipmentClassForm(request.POST, request.FILES, instance=e_type)
# if form.is_valid():
# form.save()
# messages.add_message(request, messages.SUCCESS,
# "Equipment type saved.")
# return HttpResponseRedirect(reverse('inventory:type_detail',
# kwargs={'type_id': type_id}))
# else:
# form = forms.EquipmentClassForm(instance=e_type)
# return render(request, "form_crispy.html", {
# 'msg': "Edit '%s'" % e_type.name,
# "form": form,
# })
#
#
# @login_required
# def type_mk(request):
# if not request.user.has_perm('inventory.add_equipmentclass'):
# raise PermissionDenied
#
# category = request.GET.get('default_cat')
#
# if request.method == 'POST':
# form = forms.EquipmentClassForm(request.POST, request.FILES)
# if form.is_valid():
# obj = form.save()
# messages.add_message(request, messages.SUCCESS,
# "Equipment type added.")
# return HttpResponseRedirect(reverse('inventory:type_detail',
# kwargs={'type_id': obj.pk}))
# else:
# form = forms.EquipmentClassForm(initial={'category': category})
# return render(request, "form_crispy.html", {
# 'msg': "Create Equipment Type",
# "form": form,
# })
#
#
# @login_required
# def type_rm(request, type_id):
# obj = get_object_or_404(models.EquipmentClass, pk=int(type_id))
# return_page = reverse('inventory:cat', args=[obj.category.pk])
#
# if not request.user.has_perm('inventory.delete_equipmentclass', obj):
# raise PermissionDenied
#
# if request.method == 'POST':
# if obj.items.exists():
# return HttpResponseBadRequest("There are still items of this type")
# else:
# obj.delete()
# return HttpResponseRedirect(return_page)
# else:
# return HttpResponseBadRequest("Bad method")
#
#
# @login_required
# def cat_edit(request, category_id):
# category = get_object_or_404(models.EquipmentCategory, pk=category_id)
#
# if not request.user.has_perm('inventory.change_equipmentcategory', category):
# raise PermissionDenied
#
# if request.method == 'POST':
# form = forms.CategoryForm(request.POST, request.FILES, instance=category)
# if form.is_valid():
# form.save()
# messages.add_message(request, messages.SUCCESS,
# "Category saved.")
# return HttpResponseRedirect(reverse('inventory:cat',
# kwargs={'category_id': category_id}))
# else:
# form = forms.CategoryForm(instance=category)
# return render(request, "form_crispy.html", {
# 'msg': "Edit Category",
# "form": form,
# })
#
#
# @login_required
# def cat_mk(request):
# if not request.user.has_perm('inventory.add_equipmentcategory'):
# raise PermissionDenied
#
# parent = request.GET.get('parent')
#
# if request.method == 'POST':
# form = forms.CategoryForm(request.POST, request.FILES)
# if form.is_valid():
# obj = form.save()
# messages.add_message(request, messages.SUCCESS,
# "Category added.")
# return HttpResponseRedirect(reverse('inventory:cat',
# kwargs={'category_id': obj.pk}))
# else:
# form = forms.CategoryForm(initial={'parent': parent})
# return render(request, "form_crispy.html", {
# 'msg': "Create Category",
# "form": form,
# })
#
#
# @login_required
# def cat_rm(request, category_id):
# ecat = get_object_or_404(models.EquipmentCategory, pk=int(category_id))
# if ecat.parent:
# return_url = reverse('inventory:cat', args=[ecat.parent.pk])
# else:
# return_url = reverse('inventory:view_all')
#
# if not request.user.has_perm('inventory.delete_equipmentcategory', ecat):
# raise PermissionDenied
#
# if request.method == 'POST':
# if ecat.get_children().exists():
# return HttpResponseBadRequest("There are still subcategories of this type")
# elif ecat.equipmentclass_set.exists():
# return HttpResponseBadRequest("There are still items in this category")
# else:
# ecat.delete()
# return HttpResponseRedirect(return_url)
# else:
# return HttpResponseBadRequest("Bad method")
#
#
# @login_required
# def fast_mk(request):
# if not request.user.has_perm('inventory.add_equipmentitem'):
# raise PermissionDenied
#
# try:
# category = int(request.GET['default_cat'])
# except (ValueError, KeyError, TypeError):
# category = None
#
# if request.method == 'POST':
# form = forms.FastAdd(request.user, request.POST, request.FILES)
# if form.is_valid():
# obj = form.save()
# messages.add_message(request, messages.SUCCESS,
# "%d items added and saved. Now editing." % form.cleaned_data['num_to_add'])
# return HttpResponseRedirect(reverse('inventory:bulk_edit',
# kwargs={'type_id': obj.pk}))
# else:
# form = forms.FastAdd(request.user, initial={'item_cat': category})
# return render(request, "form_crispy.html", {
# 'msg': "Fast Add Item(s)",
# "form": form,
# })
@login_required
def type_detail(request, type_id):
""" Detail page for a group of items """
e = get_object_or_404(models.EquipmentClass, pk=type_id)
return render(request, 'inventory/type_detail.html', {
'breadcrumbs': e.breadcrumbs,
'equipment': e
})
@login_required
def item_detail(request, item_id):
""" Detail page for a specific item """
item = get_object_or_404(models.EquipmentItem, pk=item_id)
return render(request, 'inventory/item_detail.html', {
'breadcrumbs': item.breadcrumbs,
'item': item
})
# @login_required
# def item_edit(request, item_id):
# try:
# item = models.EquipmentItem.objects.get(pk=int(item_id))
# except models.EquipmentItem.DoesNotExist:
# return HttpResponseNotFound()
#
# if not request.user.has_perm('inventory.change_equipmentitem', item):
# raise PermissionDenied
#
# if request.method == 'POST':
# form = forms.EquipmentItemForm(request.POST, request.FILES, instance=item)
# if form.is_valid():
# form.save()
# messages.add_message(request, messages.SUCCESS,
# "Item saved.")
# return HttpResponseRedirect(reverse('inventory:item_detail',
# kwargs={'item_id': item_id}))
# else:
# form = forms.EquipmentItemForm(instance=item)
# return render(request, "form_crispy.html", {
# 'msg': "Edit '%s'" % str(item),
# "form": form,
# })
#
#
# @login_required
# def item_rm(request, item_id):
# obj = get_object_or_404(models.EquipmentItem, pk=int(item_id))
# return_page = reverse('inventory:type_detail', args=[obj.item_type.pk])
#
# if not request.user.has_perm('inventory.delete_equipmentitem', obj):
# raise PermissionDenied
#
# if request.method == 'POST':
# if obj.unsafe_to_delete:
# return HttpResponseBadRequest("There are still items of this type")
# else:
# obj.delete()
# return HttpResponseRedirect(return_page)
# else:
# return HttpResponseBadRequest("Bad method")
@login_required
@permission_required('inventory.view_equipment', raise_exception=True)
def snipe_checkout(request):
""" Equipment inventory checkout form. Communicates with Snipe via their API. """
if not settings.SNIPE_URL:
return HttpResponse('This page is unavailable because SNIPE_URL is not set.', status=501)
if not settings.SNIPE_API_KEY:
return HttpResponse('This page is unavailable because SNIPE_API_KEY is not set.', status=501)
# Get the list of users in the rental group from Snipe
error_message = 'Error communicating with Snipe. Did not check out anything.'
checkout_to_choices = []
response = requests.request('GET', '{}api/v1/users'.format(settings.SNIPE_URL), headers={
'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY),
'accept': 'application/json',
'content-type': 'application/json'
})
if response.status_code == 200:
try:
data = json.loads(response.text)
if data.get('status') == 'error':
return HttpResponse(error_message, status=502)
checkout_to_choices = [(user['id'], user['name']) for user in data['rows'] if 'rental' in ((group['name'] for group in user['groups']['rows']) if user['groups'] is not None else ())]
except ValueError:
return HttpResponse(error_message, status=502)
else:
return HttpResponse(error_message, status=502)
# Handle the form
error_message = 'Error communicating with Snipe. Some things may have been checked out while some were not. ' \
'Please go check Snipe.'
if request.method == 'POST':
receipt_info = {}
form = forms.SnipeCheckoutForm(checkout_to_choices, request.POST, request.FILES)
if form.is_valid():
success_count_assets = 0
success_count_accessories = 0
for tag in [tag for tag in re.split('[^a-zA-Z0-9]', form.cleaned_data['asset_tags']) if tag]:
match = re.match('LNLACC([0-9]+)', tag)
if match:
tag = match.group(1)
# This tag represents an accessory
response = requests.request('GET', '{}api/v1/accessories/{}'.format(settings.SNIPE_URL, tag),
headers={'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY),
'accept': 'application/json', 'content-type': 'application/json'})
if response.status_code == 200:
try:
data = json.loads(response.text)
if data.get('status') == 'error':
# No accessory with that ID exists in Snipe
messages.add_message(request, messages.ERROR, 'No such accessory with ID {}'.format(tag))
continue
accessory_name = data['name']
rental_price = float(data['order_number']) if data['order_number'] is not None else None
# Check out the accessory
response = requests.request('POST', '{}api/v1/accessories/{}/checkout'.format(settings.SNIPE_URL, tag), data=json.dumps({
'assigned_to': form.cleaned_data['checkout_to'],
}), headers={
'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY),
'accept': 'application/json',
'content-type': 'application/json',
})
if response.status_code == 200:
data = json.loads(response.text)
if data.get('status') == 'error':
# Snipe refused to check out the accessory (maybe they are all checked out)
messages.add_message(request, messages.ERROR, 'Unable to check out accessory {}. Snipe says: {}'.format(tag, data['messages']))
continue
# The accessory was successfully checked out
success_count_accessories += 1
if tag in receipt_info:
if receipt_info[tag]['name'] != accessory_name \
or receipt_info[tag]['rental_price'] != rental_price:
return HttpResponse(error_message, status=502)
receipt_info[tag]['quantity'] += 1
else:
receipt_info[tag] = {'name': accessory_name, 'rental_price': rental_price,
'quantity': 1}
else:
return HttpResponse(error_message, status=502)
except ValueError:
return HttpResponse(error_message, status=502)
else:
return HttpResponse(error_message, status=502)
else:
# This tag represents an asset
response = requests.request('GET', '{}api/v1/hardware/bytag/{}'.format(settings.SNIPE_URL, tag),
headers={'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY),
'accept': 'application/json', 'content-type': 'application/json'})
if response.status_code == 200:
try:
data = json.loads(response.text)
if data.get('status') == 'error':
# The asset tag does not exist in Snipe
messages.add_message(request, messages.ERROR, 'No such asset tag {}'.format(tag))
continue
asset_name = data['name']
if 'custom_fields' in data and 'Rental Price' in data['custom_fields'] and \
'value' in data['custom_fields']['Rental Price'] and data['custom_fields']['Rental Price']['value'] is not None:
rental_price = float(data['custom_fields']['Rental Price']['value'])
else:
rental_price = None
# Check out the asset
response = requests.request('POST', '{}api/v1/hardware/{}/checkout'.format(settings.SNIPE_URL, data['id']), data=json.dumps({
'checkout_to_type': 'user',
'assigned_user': form.cleaned_data['checkout_to'],
}), headers={
'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY),
'accept': 'application/json',
'content-type': 'application/json',
})
if response.status_code == 200:
data = json.loads(response.text)
if data.get('status') == 'error':
# Snipe refused to check out the asset (maybe it is already checked out)
messages.add_message(request, messages.ERROR, 'Unable to check out asset {} - {}. Snipe says: {}'.format(tag, asset_name, data['messages']))
continue
# The asset was successfully checked out
success_count_assets += 1
if tag in receipt_info:
return HttpResponse(error_message, status=502)
receipt_info[tag] = {'name': asset_name, 'rental_price': rental_price, 'quantity': 1}
else:
return HttpResponse(error_message, status=502)
except ValueError:
return HttpResponse(error_message, status=502)
else:
return HttpResponse(error_message, status=502)
if success_count_assets > 0 or success_count_accessories > 0:
messages.add_message(request, messages.SUCCESS, 'Successfully checked out {} assets and {} accessories'.format(success_count_assets, success_count_accessories))
rental_prices = [(None if asset_info['rental_price'] is None else asset_info['rental_price'] * asset_info['quantity']) for asset_info in receipt_info.values()]
total_rental_price = None if None in rental_prices else sum(rental_prices)
checkout_to_name = next((item[1] for item in checkout_to_choices if item[0] == form.cleaned_data['checkout_to']))
# Before returning the response, email a PDF receipt
html = render_to_string('pdf_templates/checkout_receipt.html', request=request, context={
'title': 'Checkout Receipt',
'receipt_info': receipt_info,
'num_assets': success_count_assets,
'num_accessories': success_count_accessories,
'total_rental_price': total_rental_price,
'checkout_to': checkout_to_name,
})
pdf_file = BytesIO()
pisa.CreatePDF(html, dest=pdf_file, link_callback=link_callback)
pdf_handle = pdf_file.getvalue()
filename = 'LNL-checkout-receipt-{}.pdf'.format(timezone.now().isoformat())
attachments = [{'file_handle': pdf_handle, 'name': filename}]
email = DefaultLNLEmailGenerator(subject='LNL Inventory Checkout Receipt', to_emails=(request.user.email, settings.EMAIL_TARGET_RENTALS), attachments=attachments,
body='A receipt for the rental checkout by {} to {} is attached.'.format(request.user, checkout_to_name))
email.send()
# Return the response
return render(request, 'inventory/checkout_receipt.html', {
'receipt_info': receipt_info,
'num_assets': success_count_assets,
'num_accessories': success_count_accessories,
'total_rental_price': total_rental_price,
'checkout_to': form.cleaned_data['checkout_to'],
'checkout_to_name': checkout_to_name,
})
else:
form = forms.SnipeCheckoutForm(checkout_to_choices, initial={'checkout_to': form.cleaned_data['checkout_to']})
else:
if 'checkout_to' in request.GET:
form = forms.SnipeCheckoutForm(checkout_to_choices, initial={'checkout_to': request.GET['checkout_to']})
else:
form = forms.SnipeCheckoutForm(checkout_to_choices)
return render(request, "form_crispy.html", {
'msg': 'Inventory checkout',
'form': form,
})
@login_required
@permission_required('inventory.view_equipment', raise_exception=True)
def snipe_checkin(request):
""" Equipment inventory checkin form. Communicates with Snipe via their API. """
if not settings.SNIPE_URL:
return HttpResponse('This page is unavailable because SNIPE_URL is not set.', status=501)
if not settings.SNIPE_API_KEY:
return HttpResponse('This page is unavailable because SNIPE_API_KEY is not set.', status=501)
# Get the list of users in the rental group from Snipe
error_message = 'Error communicating with Snipe. Did not check in anything.'
checkin_from_choices = []
response = requests.request('GET', '{}api/v1/users'.format(settings.SNIPE_URL), headers={
'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY),
'accept': 'application/json',
'content-type': 'application/json'
})
if response.status_code == 200:
try:
data = json.loads(response.text)
if data.get('status') == 'error':
return HttpResponse(error_message, status=502)
checkin_from_choices = [(user['id'], user['name']) for user in data['rows'] if 'rental' in ((group['name'] for group in user['groups']['rows']) if user['groups'] is not None else ())]
checkin_from_usernames = {user['id']: user['username'] for user in data['rows'] if 'rental' in ((group['name'] for group in user['groups']['rows']) if user['groups'] is not None else ())}
except ValueError:
return HttpResponse(error_message, status=502)
else:
return HttpResponse(error_message, status=502)
# Handle the form
error_message = 'Error communicating with Snipe. Some things may have been checked in while some were not. Please go check Snipe.'
if request.method == 'POST':
form = forms.SnipeCheckinForm(checkin_from_choices, request.POST, request.FILES)
if form.is_valid():
receipt_info = {}
receipt_info_extra = {}
checkin_from_name = next((item[1] for item in checkin_from_choices if item[0] == form.cleaned_data['checkin_from']))
checkin_from_username = checkin_from_usernames[form.cleaned_data['checkin_from']]
success_count_assets = 0
success_count_accessories = 0
extra_count_assets = 0
extra_count_accessories = 0
for tag in [tag for tag in re.split('[^a-zA-Z0-9]', form.cleaned_data['asset_tags']) if tag]:
match = re.match('LNLACC([0-9]+)', tag)
if match:
tag = match.group(1)
# This tag represents an accessory
response = requests.request('GET', '{}api/v1/accessories/{}'.format(settings.SNIPE_URL, tag), headers={
'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY),
'accept': 'application/json', 'content-type': 'application/json'
})
if response.status_code == 200:
try:
data = json.loads(response.text)
if data.get('status') == 'error':
# No accessory with that ID exists in Snipe
messages.add_message(request, messages.ERROR, 'No such accessory with ID {}'.format(tag))
continue
accessory_name = data['name']
rental_price = float(data['order_number']) if data['order_number'] is not None else None
# Get the list of checked out instances of the accessory
response = requests.request('GET', '{}api/v1/accessories/{}/checkedout'.format(settings.SNIPE_URL, tag), headers={
'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY),
'accept': 'application/json', 'content-type': 'application/json'
})
if response.status_code == 200:
data = json.loads(response.text)
if data.get('status') == 'error':
return HttpResponse(error_message, status=502)
accessory_instances = [a for a in data['rows'] if a['username'] == checkin_from_username]
if len(accessory_instances) == 0:
# There are no instances of that accessory checked out to the specified Snipe user
messages.add_message(request, messages.ERROR, 'No instance of {} checked out to {}'.format(accessory_name, checkin_from_name))
extra_count_accessories += 1
if tag in receipt_info_extra:
if receipt_info_extra[tag]['name'] != accessory_name \
or receipt_info_extra[tag]['rental_price'] != rental_price:
return HttpResponse(error_message, status=502)
receipt_info_extra[tag]['quantity'] += 1
else:
receipt_info_extra[tag] = {'name': accessory_name, 'rental_price': rental_price, 'quantity': 1}
continue
# Check in the accessory
response = requests.request('POST', '{}api/v1/accessories/{}/checkin'.format(settings.SNIPE_URL, accessory_instances[0]['assigned_pivot_id']), headers={
'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY),
'accept': 'application/json',
'content-type': 'application/json',
})
if response.status_code == 200:
data = json.loads(response.text)
if data.get('status') == 'error':
# Snipe refused to check in the accessory
messages.add_message(request, messages.ERROR, 'Unable to check in accessory {}. Snipe says: {}'.format(tag, data['messages']))
continue
# The accessory was successfully checked in
success_count_accessories += 1
if tag in receipt_info:
if receipt_info[tag]['name'] != accessory_name \
or receipt_info[tag]['rental_price'] != rental_price:
return HttpResponse(error_message, status=502)
receipt_info[tag]['quantity'] += 1
else:
receipt_info[tag] = {'name': accessory_name, 'rental_price': rental_price, 'quantity': 1}
else:
return HttpResponse(error_message, status=502)
else:
return HttpResponse(error_message, status=502)
except ValueError:
return HttpResponse(error_message, status=502)
else:
return HttpResponse(error_message, status=502)
else:
# This tag represents an asset
response = requests.request('GET', '{}api/v1/hardware/bytag/{}'.format(settings.SNIPE_URL, tag), headers={
'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY),
'accept': 'application/json', 'content-type': 'application/json'
})
if response.status_code == 200:
try:
data = json.loads(response.text)
if data.get('status') == 'error':
# The asset tag does not exist in Snipe
messages.add_message(request, messages.ERROR, 'No such asset tag {}'.format(tag))
continue
asset_name = data['name']
if 'custom_fields' in data and 'Rental Price' in data['custom_fields'] and \
'value' in data['custom_fields']['Rental Price'] and data['custom_fields']['Rental Price']['value'] is not None:
rental_price = float(data['custom_fields']['Rental Price']['value'])
else:
rental_price = None
if ('assigned_to' not in data
or data['assigned_to'] is None
or 'type' not in data['assigned_to']
or data['assigned_to']['type'] != 'user'
or 'id' not in data['assigned_to']
or data['assigned_to']['id'] != form.cleaned_data['checkin_from']):
# That asset is not checked out to the specified Snipe user
messages.add_message(request, messages.ERROR, 'Asset {} was never checked out to {}'.format(asset_name, checkin_from_name))
extra_count_assets += 1
if tag in receipt_info:
return HttpResponse(error_message, status=502)
receipt_info_extra[tag] = {'name': asset_name, 'rental_price': rental_price, 'quantity': 1}
continue
# Check in the asset
response = requests.request('POST', '{}api/v1/hardware/{}/checkin'.format(settings.SNIPE_URL, data['id']), headers={
'authorization': 'Bearer {}'.format(settings.SNIPE_API_KEY),
'accept': 'application/json',
'content-type': 'application/json',
})
if response.status_code == 200:
data = json.loads(response.text)
if data.get('status') == 'error':
# Snipe refused to check in the asset
messages.add_message(request, messages.ERROR, 'Unable to check in asset {} - {}. Snipe says: {}'.format(tag, asset_name, data['messages']))
continue
# The asset was successfully checked in
success_count_assets += 1
if tag in receipt_info:
return HttpResponse(error_message, status=502)
receipt_info[tag] = {'name': asset_name, 'rental_price': rental_price, 'quantity': 1}
else:
return HttpResponse(error_message, status=502)
except ValueError:
return HttpResponse(error_message, status=502)
else:
return HttpResponse(error_message, status=502)
if success_count_assets > 0 or success_count_accessories > 0:
messages.add_message(request, messages.SUCCESS, 'Successfully checked in {} assets and {} accessories'.format(success_count_assets, success_count_accessories))
rental_prices = [(None if asset_info['rental_price'] is None else asset_info['rental_price'] * asset_info['quantity']) for asset_info in receipt_info.values()]
extra_prices = [(None if asset_info['rental_price'] is None else asset_info['rental_price'] * asset_info['quantity']) for asset_info in receipt_info_extra.values()]
total_rental_price = None if None in rental_prices or None in extra_prices else sum(rental_prices) + sum(extra_prices)
# Before returning the response, email a PDF receipt
html = render_to_string('pdf_templates/checkin_receipt.html', request=request, context={
'title': 'Checkin Receipt',
'receipt_info': receipt_info,
'receipt_info_extra': receipt_info_extra,
'num_assets': success_count_assets,
'num_accessories': success_count_accessories,
'num_extra_assets': extra_count_assets,
'num_extra_accessories': extra_count_accessories,
'total_rental_price': total_rental_price,
'checkin_from': checkin_from_name,
})
pdf_file = BytesIO()
pisa.CreatePDF(html, dest=pdf_file, link_callback=link_callback)
pdf_handle = pdf_file.getvalue()
filename = 'LNL-checkin-receipt-{}.pdf'.format(timezone.now().isoformat())
attachments = [{'file_handle': pdf_handle, 'name': filename}]
email = DefaultLNLEmailGenerator(subject='LNL Inventory Checkin Receipt', to_emails=(request.user.email, settings.EMAIL_TARGET_RENTALS), attachments=attachments,
body='A receipt for the rental checkin by {} from {} is attached.'.format(request.user, checkin_from_name))
email.send()
# Return the response
return render(request, 'inventory/checkin_receipt.html', {
'receipt_info': receipt_info,
'receipt_info_extra': receipt_info_extra,
'num_assets': success_count_assets,
'num_accessories': success_count_accessories,
'num_extra_assets': extra_count_assets,
'num_extra_accessories': extra_count_accessories,
'total_rental_price': total_rental_price,
'checkin_from': form.cleaned_data['checkin_from'],
'checkin_from_name': checkin_from_name,
})
else:
form = forms.SnipeCheckinForm(checkin_from_choices, initial={'checkin_from': form.cleaned_data['checkin_from']})
else:
if 'checkin_from' in request.GET:
form = forms.SnipeCheckinForm(checkin_from_choices, initial={'checkin_from': request.GET['checkin_from']})
else:
form = forms.SnipeCheckinForm(checkin_from_choices)
return render(request, "form_crispy.html", {
'msg': 'Inventory checkin',
'form': form,
})
@login_required
@permission_required('inventory.view_equipment', raise_exception=True)
def snipe_credentials(request):
context = {
'title': 'Snipe Login Credentials',
'message': '<span style="font-size: 1.3em"><strong>Username:</strong> ' + settings.SNIPE_GENERAL_USER +
'<br><strong>Password:</strong> ' + settings.SNIPE_GENERAL_PASS + '</span><br><br>'
'<a class="btn btn-primary" href="https://lnl-rt.wpi.edu/snipe" target="_blank">Login Now</a>'
}
return render(request, 'default.html', context)
@login_required
def log_access(request, location=None, reason=None):
"""
Checkin form used by LNL members when accessing a storage location (contact tracing)
:param location: The name of the location (must match a location that contains equipment)
:param reason: Should be set to "OUT" if user is checking out of a location (None otherwise)
"""
context = {'NO_FOOT': True, 'NO_NAV': True, 'NO_API': True, 'LIGHT_THEME': True}
location = location.replace('-', ' ')
space = Location.objects.filter(holds_equipment=True, name__icontains=location).first()
if not space:
return HttpResponseNotFound("Invalid Location ID")
if request.method == 'POST':
form = forms.AccessForm(request.POST, location=space.name, reason=reason, initial={'users': [request.user]})
if form.is_valid():
record = form.save(commit=False)
record.location = space
record.save()
form.save_m2m()
if reason == "OUT":
messages.success(request, "Thank you! Come again soon!", extra_tags="success")
else:
messages.success(request, "Thank you! You are now signed in.", extra_tags="success")
return HttpResponseRedirect(reverse("home"))
else:
form = forms.AccessForm(location=space.name, reason=reason, initial={'users': [request.user]})
context['form'] = form
return render(request, 'form_crispy_static.html', context)
@login_required
@permission_required('inventory.view_access_logs', raise_exception=True)
def view_logs(request):
""" View contact tracing logs for LNL storage spaces """
headers = ['Timestamp', 'User', 'Location', 'Reason']
def get_timestamp(data):
return data.get('timestamp')
records = []
for record in models.AccessRecord.objects.all():
for user in record.users.all():
obj = {'timestamp': record.timestamp, 'user': user, 'location': record.location, 'reason': record.reason}
records.append(obj)
records.sort(key=get_timestamp, reverse=True)
paginator = Paginator(records, 50)
page_number = request.GET.get('page', 1)
current_page = paginator.get_page(page_number)
context = {'records': current_page, 'title': 'Access Log', 'headers': headers}
return render(request, 'access_log.html', context)
| 51.369359 | 199 | 0.565464 | 0 | 0 | 0 | 0 | 32,120 | 0.742607 | 0 | 0 | 18,340 | 0.424017 |
d7c1a3f606fca343046498c63a29e95730e0801b | 2,877 | py | Python | tests/unit_tests/db_engine_specs/test_athena.py | awchisholm/superset | a169b6071209c4f6681c95486127fc43884ff6d1 | [
"Apache-2.0"
] | 2 | 2021-12-21T15:57:16.000Z | 2022-01-31T02:22:02.000Z | tests/unit_tests/db_engine_specs/test_athena.py | awchisholm/superset | a169b6071209c4f6681c95486127fc43884ff6d1 | [
"Apache-2.0"
] | 8 | 2021-11-19T11:56:39.000Z | 2022-03-02T10:50:58.000Z | tests/unit_tests/db_engine_specs/test_athena.py | awchisholm/superset | a169b6071209c4f6681c95486127fc43884ff6d1 | [
"Apache-2.0"
] | 2 | 2021-12-21T13:41:18.000Z | 2021-12-26T22:16:43.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, import-outside-toplevel, protected-access
import re
from datetime import datetime
from flask.ctx import AppContext
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from tests.unit_tests.fixtures.common import dttm
SYNTAX_ERROR_REGEX = re.compile(
": mismatched input '(?P<syntax_error>.*?)'. Expecting: "
)
def test_convert_dttm(app_context: AppContext, dttm: datetime) -> None:
"""
Test that date objects are converted correctly.
"""
from superset.db_engine_specs.athena import AthenaEngineSpec
assert AthenaEngineSpec.convert_dttm("DATE", dttm) == "DATE '2019-01-02'"
assert (
AthenaEngineSpec.convert_dttm("TIMESTAMP", dttm)
== "TIMESTAMP '2019-01-02 03:04:05.678'"
)
def test_extract_errors(app_context: AppContext) -> None:
"""
Test that custom error messages are extracted correctly.
"""
from superset.db_engine_specs.athena import AthenaEngineSpec
msg = ": mismatched input 'fromm'. Expecting: "
result = AthenaEngineSpec.extract_errors(Exception(msg))
assert result == [
SupersetError(
message='Please check your query for syntax errors at or near "fromm". Then, try running your query again.',
error_type=SupersetErrorType.SYNTAX_ERROR,
level=ErrorLevel.ERROR,
extra={
"engine_name": "Amazon Athena",
"issue_codes": [
{
"code": 1030,
"message": "Issue 1030 - The query has a syntax error.",
}
],
},
)
]
def test_get_text_clause_with_colon(app_context: AppContext) -> None:
"""
Make sure text clauses don't escape the colon character
"""
from superset.db_engine_specs.athena import AthenaEngineSpec
query = (
"SELECT foo FROM tbl WHERE " "abc >= TIMESTAMP '2021-11-26T00\:00\:00.000000'"
)
text_clause = AthenaEngineSpec.get_text_clause(query)
assert text_clause.text == query
| 33.847059 | 120 | 0.680222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,498 | 0.520681 |
d7c1f88f81f11884d3d6e8d5e5c7d5256d7a55bf | 1,376 | py | Python | apps/accounts/tests/functional/transactions/test_confirm_email.py | victoraguilarc/xib.li | 11e61641d0b2bad148713488aa7b730c1f6cbb0c | [
"MIT"
] | 1 | 2020-05-08T09:29:08.000Z | 2020-05-08T09:29:08.000Z | apps/accounts/tests/functional/transactions/test_confirm_email.py | victoraguilarc/xib.li | 11e61641d0b2bad148713488aa7b730c1f6cbb0c | [
"MIT"
] | 1 | 2020-05-08T16:47:17.000Z | 2020-05-13T21:05:00.000Z | apps/accounts/tests/functional/transactions/test_confirm_email.py | victoraguilarc/xib.li | 11e61641d0b2bad148713488aa7b730c1f6cbb0c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import pytest
from django.test import RequestFactory
from django.urls import reverse
from doubles import allow, expect
from rest_framework import status
from apps.accounts.models.choices import ActionCategory
from apps.accounts.services.auth import AuthService
from apps.accounts.tests.factories.pending_action import PendingActionFactory
from apps.accounts.views.confirm_email import ConfirmEmailView
@pytest.mark.django_db
class ConfirmEmailTests:
@classmethod
def make_confirm_email_url(cls, token):
return reverse(
'accounts:confirm-email',
kwargs={'token': token}
)
def test_get_with_valid_token(self, api_client):
pending_action = PendingActionFactory(category=ActionCategory.CONFIRM_EMAIL.value)
allow(AuthService).confirm_email.and_return(True)
expect(AuthService).confirm_email.once()
response = api_client.get(self.make_confirm_email_url(pending_action.token))
assert response.status_code == status.HTTP_200_OK
def test_get_without_invalid_token(self, api_client):
allow(AuthService).confirm_email.and_return(True)
expect(AuthService).confirm_email.never()
response = api_client.get(self.make_confirm_email_url('invalid_token'))
assert response.status_code == status.HTTP_200_OK
| 32.761905 | 90 | 0.750727 | 897 | 0.65189 | 0 | 0 | 920 | 0.668605 | 0 | 0 | 92 | 0.06686 |
d7c2573fe1dc3783d90bd1abbefb46141b4afea9 | 15,240 | py | Python | data/test/test_queue.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 2,027 | 2019-11-12T18:05:48.000Z | 2022-03-31T22:25:04.000Z | data/test/test_queue.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 496 | 2019-11-12T18:13:37.000Z | 2022-03-31T10:43:45.000Z | data/test/test_queue.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 249 | 2019-11-12T18:02:27.000Z | 2022-03-22T12:19:19.000Z | import json
import time
import pytest
from contextlib import contextmanager
from datetime import datetime, timedelta
from functools import wraps
from data.database import QueueItem
from data.queue import (
WorkQueue,
MINIMUM_EXTENSION,
queue_items_locked,
queue_items_available,
queue_items_available_unlocked,
)
from test.fixtures import *
QUEUE_NAME = "testqueuename"
class AutoUpdatingQueue(object):
def __init__(self, queue_to_wrap):
self._queue = queue_to_wrap
def _wrapper(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
to_return = func(*args, **kwargs)
self._queue.update_metrics()
return to_return
return wrapper
def __getattr__(self, attr_name):
method_or_attr = getattr(self._queue, attr_name)
if callable(method_or_attr):
return self._wrapper(method_or_attr)
else:
return method_or_attr
TEST_MESSAGE_1 = json.dumps({"data": 1})
TEST_MESSAGE_2 = json.dumps({"data": 2})
TEST_MESSAGES = [json.dumps({"data": str(i)}) for i in range(1, 101)]
@contextmanager
def fake_transaction(arg):
yield
@pytest.fixture()
def transaction_factory():
return fake_transaction
def gauge_value(g):
return g.collect()[0].samples[0].value
@pytest.fixture()
def queue(transaction_factory, initialized_db):
return AutoUpdatingQueue(WorkQueue(QUEUE_NAME, transaction_factory))
def test_get_single_item(queue, transaction_factory):
# Add a single item to the queue.
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
# Have two "instances" retrieve an item to claim. Since there is only one, both calls should
# return the same item.
now = datetime.utcnow()
first_item = queue._select_available_item(False, now)
second_item = queue._select_available_item(False, now)
assert first_item.id == second_item.id
assert first_item.state_id == second_item.state_id
# Have both "instances" now try to claim the item. Only one should succeed.
first_claimed = queue._attempt_to_claim_item(first_item, now, 300)
second_claimed = queue._attempt_to_claim_item(first_item, now, 300)
assert first_claimed
assert not second_claimed
# Ensure the item is no longer available.
assert queue.get() is None
# Ensure the item's state ID has changed.
assert first_item.state_id != QueueItem.get().state_id
def test_extend_processing(queue, transaction_factory):
# Add and retrieve a queue item.
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
queue_item = queue.get(processing_time=10)
assert queue_item is not None
existing_db_item = QueueItem.get(id=queue_item.id)
# Call extend processing with a timedelta less than the minimum and ensure its
# processing_expires and state_id do not change.
changed = queue.extend_processing(queue_item, 10 + MINIMUM_EXTENSION.total_seconds() - 1)
assert not changed
updated_db_item = QueueItem.get(id=queue_item.id)
assert existing_db_item.processing_expires == updated_db_item.processing_expires
assert existing_db_item.state_id == updated_db_item.state_id
# Call extend processing with a timedelta greater than the minimum and ensure its
# processing_expires and state_id are changed.
changed = queue.extend_processing(queue_item, 10 + MINIMUM_EXTENSION.total_seconds() + 1)
assert changed
updated_db_item = QueueItem.get(id=queue_item.id)
assert existing_db_item.processing_expires != updated_db_item.processing_expires
assert existing_db_item.state_id != updated_db_item.state_id
# Call extend processing with a timedelta less than the minimum but also with new data and
# ensure its processing_expires and state_id are changed.
changed = queue.extend_processing(
queue_item, 10 + MINIMUM_EXTENSION.total_seconds() - 1, updated_data="newbody"
)
assert changed
updated_db_item = QueueItem.get(id=queue_item.id)
assert existing_db_item.processing_expires != updated_db_item.processing_expires
assert existing_db_item.state_id != updated_db_item.state_id
assert updated_db_item.body == "newbody"
def test_same_canonical_names(queue, transaction_factory):
queue_items_locked.labels(queue._queue_name).set(0)
queue_items_available.labels(queue._queue_name).set(0)
queue_items_available_unlocked.labels(queue._queue_name).set(0)
id_1 = int(queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1))
id_2 = int(queue.put(["abc", "def"], TEST_MESSAGE_2, available_after=-1))
assert id_1 + 1 == id_2
assert not queue._currently_processing
assert gauge_value(queue_items_locked) == 0
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
one = queue.get(ordering_required=True)
assert one is not None
assert one.body == TEST_MESSAGE_1
assert queue._currently_processing
assert gauge_value(queue_items_locked) == 1
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
two_fail = queue.get(ordering_required=True)
assert two_fail is None
assert gauge_value(queue_items_locked) == 1
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
queue.complete(one)
assert not queue._currently_processing
assert gauge_value(queue_items_locked) == 0
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
two = queue.get(ordering_required=True)
assert two is not None
assert queue._currently_processing
assert two.body == TEST_MESSAGE_2
assert gauge_value(queue_items_locked) == 1
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
def test_different_canonical_names(queue, transaction_factory):
queue_items_locked.labels(queue._queue_name).set(0)
queue_items_available.labels(queue._queue_name).set(0)
queue_items_available_unlocked.labels(queue._queue_name).set(0)
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
queue.put(["abc", "ghi"], TEST_MESSAGE_2, available_after=-1)
assert gauge_value(queue_items_locked) == 0
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 2
one = queue.get(ordering_required=True)
assert one is not None
assert one.body == TEST_MESSAGE_1
assert gauge_value(queue_items_locked) == 1
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 2
two = queue.get(ordering_required=True)
assert two is not None
assert two.body == TEST_MESSAGE_2
assert gauge_value(queue_items_locked) == 2
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 2
def test_canonical_name(queue, transaction_factory):
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
queue.put(["abc", "def", "ghi"], TEST_MESSAGE_1, available_after=-1)
one = queue.get(ordering_required=True)
assert QUEUE_NAME + "/abc/def/" != one
two = queue.get(ordering_required=True)
assert QUEUE_NAME + "/abc/def/ghi/" != two
def test_expiration(queue, transaction_factory):
queue_items_locked.labels(queue._queue_name).set(0)
queue_items_available.labels(queue._queue_name).set(0)
queue_items_available_unlocked.labels(queue._queue_name).set(0)
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
assert gauge_value(queue_items_locked) == 0
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
one = queue.get(processing_time=0.5, ordering_required=True)
assert one is not None
assert gauge_value(queue_items_locked) == 1
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
one_fail = queue.get(ordering_required=True)
assert one_fail is None
time.sleep(1)
queue.update_metrics()
assert gauge_value(queue_items_locked) == 0
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
one_again = queue.get(ordering_required=True)
assert one_again is not None
assert gauge_value(queue_items_locked) == 1
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
def test_alive(queue, transaction_factory):
# No queue item = not alive.
assert not queue.alive(["abc", "def"])
# Add a queue item.
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
assert queue.alive(["abc", "def"])
# Retrieve the queue item.
queue_item = queue.get()
assert queue_item is not None
assert queue.alive(["abc", "def"])
# Make sure it is running by trying to retrieve it again.
assert queue.get() is None
# Delete the queue item.
queue.complete(queue_item)
assert not queue.alive(["abc", "def"])
def test_specialized_queue(queue, transaction_factory):
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
queue.put(["def", "def"], TEST_MESSAGE_2, available_after=-1)
my_queue = AutoUpdatingQueue(WorkQueue(QUEUE_NAME, transaction_factory, ["def"]))
two = my_queue.get(ordering_required=True)
assert two is not None
assert two.body == TEST_MESSAGE_2
one_fail = my_queue.get(ordering_required=True)
assert one_fail is None
one = queue.get(ordering_required=True)
assert one is not None
assert one.body == TEST_MESSAGE_1
def test_random_queue_no_duplicates(queue, transaction_factory):
for msg in TEST_MESSAGES:
queue.put(["abc", "def"], msg, available_after=-1)
seen = set()
for _ in range(1, 101):
item = queue.get()
json_body = json.loads(item.body)
msg = str(json_body["data"])
assert msg not in seen
seen.add(msg)
for body in TEST_MESSAGES:
json_body = json.loads(body)
msg = str(json_body["data"])
assert msg in seen
def test_bulk_insert(queue, transaction_factory):
queue_items_locked.labels(queue._queue_name).set(0)
queue_items_available.labels(queue._queue_name).set(0)
queue_items_available_unlocked.labels(queue._queue_name).set(0)
with queue.batch_insert() as queue_put:
queue_put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
queue_put(["abc", "def"], TEST_MESSAGE_2, available_after=-1)
queue.update_metrics()
assert not queue._currently_processing
assert gauge_value(queue_items_locked) == 0
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
with queue.batch_insert() as queue_put:
queue_put(["abd", "def"], TEST_MESSAGE_1, available_after=-1)
queue_put(["abd", "ghi"], TEST_MESSAGE_2, available_after=-1)
queue.update_metrics()
assert not queue._currently_processing
assert gauge_value(queue_items_locked) == 0
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 3
def test_num_available_between(queue, transaction_factory):
now = datetime.utcnow()
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-10)
queue.put(["abc", "ghi"], TEST_MESSAGE_2, available_after=-5)
# Partial results
count = queue.num_available_jobs_between(now - timedelta(seconds=8), now, ["abc"])
assert count == 1
# All results
count = queue.num_available_jobs_between(now - timedelta(seconds=20), now, ["/abc"])
assert count == 2
# No results
count = queue.num_available_jobs_between(now, now, "abc")
assert count == 0
def test_incomplete(queue, transaction_factory):
# Add an item.
queue.put(["somenamespace", "abc", "def"], TEST_MESSAGE_1, available_after=-10)
now = datetime.utcnow()
count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"])
assert count == 1
# Retrieve it.
item = queue.get()
assert item is not None
assert queue._currently_processing
# Mark it as incomplete.
queue.incomplete(item, retry_after=-1)
assert not queue._currently_processing
# Retrieve again to ensure it is once again available.
same_item = queue.get()
assert same_item is not None
assert queue._currently_processing
assert item.id == same_item.id
def test_complete(queue, transaction_factory):
# Add an item.
queue.put(["somenamespace", "abc", "def"], TEST_MESSAGE_1, available_after=-10)
now = datetime.utcnow()
count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"])
assert count == 1
# Retrieve it.
item = queue.get()
assert item is not None
assert queue._currently_processing
# Mark it as complete.
queue.complete(item)
assert not queue._currently_processing
def test_cancel(queue, transaction_factory):
# Add an item.
queue.put(["somenamespace", "abc", "def"], TEST_MESSAGE_1, available_after=-10)
queue.put(["somenamespace", "abc", "def"], TEST_MESSAGE_2, available_after=-5)
now = datetime.utcnow()
count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"])
assert count == 2
# Retrieve it.
item = queue.get()
assert item is not None
# Make sure we can cancel it.
assert queue.cancel(item.id)
now = datetime.utcnow()
count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"])
assert count == 1
# Make sure it is gone.
assert not queue.cancel(item.id)
def test_deleted_namespaced_items(queue, transaction_factory):
queue = AutoUpdatingQueue(WorkQueue(QUEUE_NAME, transaction_factory, has_namespace=True))
queue.put(["somenamespace", "abc", "def"], TEST_MESSAGE_1, available_after=-10)
queue.put(["somenamespace", "abc", "ghi"], TEST_MESSAGE_2, available_after=-5)
queue.put(["anothernamespace", "abc", "def"], TEST_MESSAGE_1, available_after=-10)
# Ensure we have 2 items under `somenamespace` and 1 item under `anothernamespace`.
now = datetime.utcnow()
count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"])
assert count == 2
count = queue.num_available_jobs_between(
now - timedelta(seconds=60), now, ["/anothernamespace"]
)
assert count == 1
# Delete all `somenamespace` items.
queue.delete_namespaced_items("somenamespace")
# Check the updated counts.
count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"])
assert count == 0
count = queue.num_available_jobs_between(
now - timedelta(seconds=60), now, ["/anothernamespace"]
)
assert count == 1
# Delete all `anothernamespace` items.
queue.delete_namespaced_items("anothernamespace")
# Check the updated counts.
count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"])
assert count == 0
count = queue.num_available_jobs_between(
now - timedelta(seconds=60), now, ["/anothernamespace"]
)
assert count == 0
| 34.247191 | 98 | 0.720997 | 567 | 0.037205 | 36 | 0.002362 | 428 | 0.028084 | 0 | 0 | 2,104 | 0.138058 |
d7c5b2db0f4c72202956af5eb6262f4c752182db | 24 | py | Python | fortifyapi/__init__.py | brownsec/fortifyapi | 18ce25eb0dd082dd3017f27162740cb29f7bda85 | [
"MIT"
] | null | null | null | fortifyapi/__init__.py | brownsec/fortifyapi | 18ce25eb0dd082dd3017f27162740cb29f7bda85 | [
"MIT"
] | 1 | 2021-06-25T15:18:20.000Z | 2021-06-25T15:18:20.000Z | fortifyapi/__init__.py | brownsec/fortifyapi | 18ce25eb0dd082dd3017f27162740cb29f7bda85 | [
"MIT"
] | null | null | null | __version__ = '1.0.19'
| 8 | 22 | 0.625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.333333 |
d7c62fc471a3322202e63b6f1c8bacf3b6d8ab26 | 1,432 | py | Python | 18Host/TDT4110/PythonWorkspace/Oving5/Blackjack.py | MarcusTL12/School | f7302f2d390e99ad9d06004e15da032c05ec59e7 | [
"Apache-2.0"
] | null | null | null | 18Host/TDT4110/PythonWorkspace/Oving5/Blackjack.py | MarcusTL12/School | f7302f2d390e99ad9d06004e15da032c05ec59e7 | [
"Apache-2.0"
] | null | null | null | 18Host/TDT4110/PythonWorkspace/Oving5/Blackjack.py | MarcusTL12/School | f7302f2d390e99ad9d06004e15da032c05ec59e7 | [
"Apache-2.0"
] | null | null | null | import random
def shuffle(cards):
for i in range(len(cards)):
randindex = random.randrange(0, len(cards))
cards[randindex], cards[i] = cards[i], cards[randindex]
def run():
stack = [2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 'A'] * 4
shuffle(stack)
stack_top = 51
dealer_cards = []
your_cards = []
dealer_cards.append(stack[stack_top])
stack_top -= 2
your_cards.append(stack[stack_top])
stack_top -= 2
dealer_cards.append(stack[stack_top])
stack_top -= 2
your_cards.append(stack[stack_top])
stack_top -= 2
print("Dealers cards are " + str(dealer_cards[0]) + " and ?")
done = False
ace_value = 11
ace_set = 'A' in your_cards
score = 0
dealers_score = 0
for i in your_cards:
score += i if i != 'A' else ace_value
for i in dealer_cards:
dealers_score += i if i != 'A' else ace_value
while not done:
print("Your score is " + str(score))
done = input("Do you want andother card? (Y/N) ").lower() == 'n'
if not done:
new_card = stack[stack_top]
stack_top -= 2
your_cards.append(new_card)
score += i if i != 'A' else ace_value
if new_card == 'A' and not ace_set:
ace_set = True
if score > 21:
score -= 10
ace_value = 1
if score >= 21:
done = True
if score > 21:
print("You got " + str(score))
print("You lost!")
else:
print("Dealers score is: " + str(dealers_score))
print("You " + ("won!" if score >= dealers_score else "lost!"))
| 20.457143 | 66 | 0.629888 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.111732 |
d7c647bb1019176d7d7f0e17a3dee844575194ad | 11,017 | py | Python | tools/external_converter_v2/parser/operations/op_io.py | pangge/Anakin | f327267d1ee2038d92d8c704ec9f1a03cb800fc8 | [
"Apache-2.0"
] | 533 | 2018-05-18T06:14:04.000Z | 2022-03-23T11:46:30.000Z | tools/external_converter_v2/parser/operations/op_io.py | pangge/Anakin | f327267d1ee2038d92d8c704ec9f1a03cb800fc8 | [
"Apache-2.0"
] | 100 | 2018-05-26T08:32:48.000Z | 2022-03-17T03:26:25.000Z | tools/external_converter_v2/parser/operations/op_io.py | pangge/Anakin | f327267d1ee2038d92d8c704ec9f1a03cb800fc8 | [
"Apache-2.0"
] | 167 | 2018-05-18T06:14:35.000Z | 2022-02-14T01:44:20.000Z | #! /usr/bin/env python
# Copyright (c) 2017, Cuichaowen. All rights reserved.
# -*- coding: utf-8 -*-
# ops helper dictionary
class Dictionary(object):
"""
Dictionary for op param which needs to be combined
"""
def __init__(self):
self.__dict__ = {}
def set_attr(self, **kwargs):
"""
set dict from kwargs
"""
for key in kwargs.keys():
if type(kwargs[key]) == type(dict()):
for key_inner in kwargs[key].keys():
self.__dict__[key_inner] = kwargs[key][key_inner]
else:
self.__dict__[key] = kwargs[key]
return self
def __call__(self):
"""
call class function to generate dictionary param
"""
ret = {key: self.__dict__[key] for key in self.__dict__.keys()}
return ret
########### Object track and detection helper (for adu(caffe layer type)) Op io define #############
# NMSSSDParameter
nms_param = Dictionary().set_attr(need_nms=bool(),
overlap_ratio=list(),
top_n=list(),
add_score=bool(),
max_candidate_n=list(),
use_soft_nms=list(),
nms_among_classes=bool(),
voting=list(),
vote_iou=list(),
nms_gpu_max_n_per_time=int())
# BBoxRegParameter
bbox_reg_param = Dictionary().set_attr(bbox_mean=list(),
bbox_std=list())
# GenerateAnchorParameter
gen_anchor_param = Dictionary().set_attr(base_size=float(),
ratios=list(),
scales=list(),
anchor_width=list(),
anchor_height=list(),
anchor_x1=list(),
anchor_y1=list(),
anchor_x2=list(),
anchor_y2=list(),
zero_anchor_center=bool())
# KPTSParameter
kpts_param = Dictionary().set_attr(kpts_exist_bottom_idx=int(),
kpts_reg_bottom_idx=int(),
kpts_reg_as_classify=bool(),
kpts_classify_width=int(),
kpts_classify_height=int(),
kpts_reg_norm_idx_st=int(),
kpts_st_for_each_class=list(),
kpts_ed_for_each_class=list(),
kpts_classify_pad_ratio=float())
# ATRSParameter
# enum NormType {
# NONE,
# WIDTH,
# HEIGHT,
# WIDTH_LOG,
# HEIGHT_LOG
# }
atrs_param = Dictionary().set_attr(atrs_reg_bottom_idx=int(),
atrs_reg_norm_idx_st=int(),
atrs_norm_type=str())
# FTRSParameter
ftrs_param = Dictionary().set_attr(ftrs_bottom_idx=int())
# SPMPParameter
spmp_param = Dictionary().set_attr(spmp_bottom_idx=int(),
spmp_class_aware=list(),
spmp_label_width=list(),
spmp_label_height=list(),
spmp_pad_ratio=list())
# Cam3dParameter
cam3d_param = Dictionary().set_attr(cam3d_bottom_idx=int())
# DetectionOutputSSDParameter
# enum MIN_SIZE_MODE {
# HEIGHT_AND_WIDTH,
# HEIGHT_OR_WIDTH
# }
detection_output_ssd_param = Dictionary().set_attr(nms=nms_param(),
threshold=list(),
channel_per_scale=int(),
class_name_list=str(),
num_class=int(),
refine_out_of_map_bbox=bool(),
class_indexes=list(),
heat_map_a=list(),
heat_map_b=list(),
threshold_objectness=float(),
proposal_min_sqrt_area=list(),
proposal_max_sqrt_area=list(),
bg_as_one_of_softmax=bool(),
use_target_type_rcnn=bool(),
im_width=float(),
im_height=float(),
rpn_proposal_output_score=bool(),
regress_agnostic=bool(),
gen_anchor=gen_anchor_param(),
allow_border=float(),
allow_border_ratio=float(),
bbox_size_add_one=bool(),
read_width_scale=float(),
read_height_scale=float(),
read_height_offset=int(),
min_size_h=float(),
min_size_w=float(),
min_size_mode="HEIGHT_AND_WIDTH",
kpts=kpts_param(),
atrs=atrs_param(),
ftrs=ftrs_param(),
spmp=spmp_param(),
cam3d=cam3d_param())
# DFMBPSROIPoolingParameter
dfmb_psroi_pooling_param = Dictionary().set_attr(heat_map_a=float(),
heat_map_b=float(),
pad_ratio=float(),
output_dim=int(),
trans_std=float(),
sample_per_part=int(),
group_height=int(),
group_width=int(),
pooled_height=int(),
pooled_width=int(),
part_height=int(),
part_width=int())
# ProposalImgScaleToCamCoordsParameter
#
# enum NormType {
# HEIGHT,
# HEIGHT_LOG
# }
#
# enum OrienType {
# PI,
# PI2
# }
proposal_img_scale_to_cam_coords_param = Dictionary().set_attr(num_class=int(),
sub_class_num_class=list(),
sub_class_bottom_idx=list(),
prj_h_norm_type=str(),
has_size3d_and_orien3d=bool(),
orien_type=str(),
cls_ids_zero_size3d_w=list(),
cls_ids_zero_size3d_l=list(),
cls_ids_zero_orien3d=list(),
cmp_pts_corner_3d=bool(),
cmp_pts_corner_2d=bool(),
ctr_2d_means=list(),
ctr_2d_stds=list(),
prj_h_means=list(),
prj_h_stds=list(),
real_h_means=list(),
real_h_stds=list(),
real_w_means=list(),
real_w_stds=list(),
real_l_means=list(),
real_l_stds=list(),
sin_means=list(),
sin_stds=list(),
cos_means=list(),
cos_stds=list(),
cam_info_idx_st_in_im_info=int(),
im_width_scale=float(),
im_height_scale=float(),
cords_offset_x=float(),
cords_offset_y=float(),
bbox_size_add_one=bool(),
rotate_coords_by_pitch=bool(),
#refine_coords_by_bbox=bool(),
#refine_min_dist=float(),
#refine_dist_for_height_ratio_one=float(),
#max_3d2d_height_ratio_for_min_dist=float(),
with_trunc_ratio=bool(),
regress_ph_rh_as_whole=bool(),
real_h_means_as_whole=list(),
real_h_stds_as_whole=list())
# RPNProposalSSD parameter
RPNProposalSSD_param = Dictionary().set_attr(detection_output_ssd=detection_output_ssd_param(),
bbox_reg=bbox_reg_param())
| 54.004902 | 108 | 0.338023 | 726 | 0.065898 | 0 | 0 | 0 | 0 | 0 | 0 | 1,040 | 0.0944 |