hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b1a88bc7e1241c7e280f5c4ac943fa677100e8e2
| 7,651
|
py
|
Python
|
utilities/tag-bumper.py
|
stackrox/collector
|
4c3913176eb62636e32a8a56f889e611c638de73
|
[
"Apache-2.0"
] | 1
|
2022-03-31T15:25:16.000Z
|
2022-03-31T15:25:16.000Z
|
utilities/tag-bumper.py
|
stackrox/collector
|
4c3913176eb62636e32a8a56f889e611c638de73
|
[
"Apache-2.0"
] | 4
|
2022-03-31T16:16:00.000Z
|
2022-03-31T23:24:33.000Z
|
utilities/tag-bumper.py
|
stackrox/collector
|
4c3913176eb62636e32a8a56f889e611c638de73
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python3
from sh import git, ErrorReturnCode
import argparse
import sys
import os
import atexit
import re
def exit_handler(repo):
"""
Rollback the repo to the branch passed as an argument.
Parameters:
repo: An sh.Command baked for git on the working repository.
"""
print('Rolling back to starting branch')
repo.checkout('-')
def validate_version(version: str):
"""
Validates the provided version is in the form 'M.m'.
Returns:
The same string provided as input if the format is valid.
Raises:
ValueError If the provided version does not match the expected pattern.
"""
version_re = re.compile(r'(:?^\d+\.\d+$)')
if not version_re.match(version):
raise ValueError
return version
def get_repo_handle(path: str):
"""
Provides a sh.Command baked to run git commands on a repository.
Parameters:
path: A path to a repository, if it is empty, the returned handle points to the directory this script lives in.
Returns:
An sh.Command ready to run git commands.
"""
if path != '':
return git.bake('--no-pager', C=path)
return git.bake('--no-pager', C=os.path.dirname(os.path.realpath(__file__)))
def get_release_branch(version: str) -> str:
"""
Helper function, simply formats the release branch for the provided version.
Parameters:
version: A string with a valid version.
Returns:
A string with the name of the corresponding release branch.
"""
return f'release/{version}.x'
def fetch_all(repo):
"""
Fetches all branches and tags from all remotes configured in the repository.
Parameters:
repo: An sh.Command baked for git on the working repository.
"""
try:
repo.fetch('--all', '--tags')
except ErrorReturnCode as e:
print(f'Failed to fetch remote. {e}')
sys.exit(1)
def get_branch(repo, version: str) -> str:
"""
Validates the release branch exists and returns a string with its name.
Parameters:
repo: An sh.Command baked for git on the working repository.
version: A string with a valid version.
Returns:
A string with the name of the release branch.
"""
release_branch = get_release_branch(version)
try:
repo('rev-parse', '--verify', release_branch)
except ErrorReturnCode as e:
print(f'The branch {release_branch} does not exist. {e}')
sys.exit(1)
return release_branch
def checkout_release_branch(repo, version: str):
"""
Checks out the release branch for the provided version.
Parameters:
repo: An sh.Command baked for git on the working repository.
version: A string with a valid version.
"""
branch = get_branch(repo, version)
print(f'Checking out {branch}')
try:
repo.checkout(branch).wait()
except ErrorReturnCode as e:
print(f'Failed to checkout release branch {branch}. {e}')
sys.exit(1)
def find_tag_version(repo, version: str) -> str:
"""
Finds the latest tag for the provided version.
This is done by iterating over the tags in the repository, checking against the provided major and minor versions
and using the highest patch number found once the iteration is done.
Parameters:
repo: An sh.Command baked for git on the working repository.
version: The major and minor versions we want to create a new tag for in the format 'M.m'
Returns:
The new tag to be created.
"""
patch_version = -1
version_regex = re.compile(fr'^{re.escape(version)}\.(\d+)$')
for tag in repo.tag().splitlines():
matched = version_regex.match(tag)
if matched:
patch = int(matched[1])
if patch > patch_version:
patch_version = patch
if patch_version == -1:
print(f'Failed to find an existing tag for {".".join(version)}')
sys.exit(1)
return f'{version}.{patch_version + 1}'
def create_empty_commit(repo):
"""
Creates an empty commit on the current branch. Uses defaults for author, signature, etc.
"""
print('Creating empty commit.')
try:
repo.commit('--allow-empty', '-m', 'Empty commit')
except ErrorReturnCode as e:
print(f'Failed to create empty commit: {e}')
sys.exit(1)
def create_new_tag(repo, new_tag: str):
"""
Creates a new tag on the current commit.
Parameters:
new_tag: The new tag to be created. i.e: 3.8.5
"""
print(f'Creating new tag: {new_tag}')
try:
git.tag(new_tag)
except ErrorReturnCode as e:
print(f'Failed to create new tag {new_tag}. {e}')
sys.exit(1)
def push_branch(repo):
"""
Executes a push on the current branch.
Parameters:
repo: An sh.Command baked for git on the working repository.
"""
print('Pushing release branch...')
try:
repo.push()
except ErrorReturnCode as e:
print(f'Failed to push empty commit to release branch. {e}')
sys.exit(1)
def push_tag(repo, new_tag: str, remote: str):
"""
Push a new tag to the provided remote.
Parameters:
repo: An sh.Command baked for git on the working repository.
new_tag: The new tag to be pushed. i.e: 3.8.5
remote: The remote in the repository the tag will be pushed to. i.e: origin
"""
print(f'Pushing {new_tag} to {remote}...')
try:
repo.push(remote, new_tag)
except ErrorReturnCode as e:
print(f'Failed to push tag {new_tag} to {remote}. {e}')
def main(version: str, dry_run: bool, push: bool, path: str, remote: str):
repo = get_repo_handle(path)
new_tag = find_tag_version(repo, version)
print(f'New tag to be created: {new_tag}')
if dry_run:
print('This is a dry run, no tag created.')
return
# Before doing anything else, ensure branch rolls back to current working one
atexit.register(exit_handler, repo)
fetch_all(repo)
checkout_release_branch(repo, version)
create_empty_commit(repo)
create_new_tag(repo, new_tag)
if not push:
print(f'Created empty commit and new tag {new_tag}')
print("Run")
print(f"git checkout {get_release_branch(version)} && git push && git push {remote} {new_tag}")
print("to publish them")
return
push_branch(repo)
push_tag(repo, new_tag)
if __name__ == "__main__":
description = """Creates a new patch tag with an empty commit.
Useful when we need to simply rebuild a collector image."""
parser = argparse.ArgumentParser(description=description,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('version', help='Version to bump in the vormat X.Y', type=validate_version)
parser.add_argument('-d', '--dry-run', help='Run all checks without actually modifying the repo',
default=False, action='store_true')
parser.add_argument('-p', '--push', help="Push the newly create tag", default=False, action='store_true')
parser.add_argument('-C', '--cwd',
help='Path to the repository to run in, defaults to the directory this script is in',
default='')
parser.add_argument('-r', '--remote', help="Remote repoditory to push tags to, defaults to 'origin'")
args = parser.parse_args()
version = args.version
dry_run = args.dry_run
push = args.push
path = args.cwd
remote = args.remote
main(version, dry_run, push, path, remote)
| 29.091255
| 119
| 0.640439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,247
| 0.555091
|
b1a911035784142a39959873000505c8b7d79b40
| 2,455
|
py
|
Python
|
openshift/helper/openshift.py
|
flaper87/openshift-restclient-python
|
13d5d86ca89035b9f596032e7a34f3cc33bf8f18
|
[
"Apache-2.0"
] | null | null | null |
openshift/helper/openshift.py
|
flaper87/openshift-restclient-python
|
13d5d86ca89035b9f596032e7a34f3cc33bf8f18
|
[
"Apache-2.0"
] | null | null | null |
openshift/helper/openshift.py
|
flaper87/openshift-restclient-python
|
13d5d86ca89035b9f596032e7a34f3cc33bf8f18
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
from kubernetes.client import models as k8s_models
from kubernetes.client import apis as k8s_apis
from kubernetes.client.rest import ApiException
from urllib3.exceptions import MaxRetryError
from . import VERSION_RX
from .. import config
from ..client import models as openshift_models
from ..client import apis as openshift_apis
from ..client import ApiClient, ConfigurationObject
from .base import BaseObjectHelper
from .exceptions import OpenShiftException
class OpenShiftObjectHelper(BaseObjectHelper):
@staticmethod
def client_from_config(config_file, context):
if not config_file:
return ApiClient(config=ConfigurationObject())
return config.new_client_from_config(config_file, context)
@classmethod
def available_apis(cls):
apis = ['OapiApi']
apis.extend([x for x in dir(openshift_apis) if VERSION_RX.search(x)])
apis.extend([x for x in dir(k8s_apis) if VERSION_RX.search(x)])
return apis
@staticmethod
def get_exception_class():
return OpenShiftException
@staticmethod
def model_class_from_name(model_name):
try:
return getattr(openshift_models, model_name)
except AttributeError:
return getattr(k8s_models, model_name)
@staticmethod
def api_class_from_name(api_name):
try:
return getattr(openshift_apis, api_name)
except AttributeError:
return getattr(k8s_apis, api_name)
def create_project(self, metadata, display_name=None, description=None):
""" Creating a project requires using the project_request endpoint. """
# TODO: handle admin-level project creation
w, stream = self._create_stream(None)
try:
proj_req = openshift_models.V1ProjectRequest(metadata=metadata, display_name=display_name, description=description)
openshift_apis.OapiApi(self.api_client).create_project_request(proj_req)
except ApiException as exc:
msg = json.loads(exc.body).get('message', exc.reason) if exc.body.startswith('{') else exc.body
raise OpenShiftException(msg, status=exc.status)
except MaxRetryError as ex:
raise OpenShiftException(str(ex.reason))
self._read_stream(w, stream, metadata.name)
return self._wait_for_response(metadata.name, None, 'create')
| 35.071429
| 127
| 0.712424
| 1,914
| 0.779633
| 0
| 0
| 937
| 0.38167
| 0
| 0
| 166
| 0.067617
|
b1a93d370fc62aa987aa9250ab1bac4da3444f9c
| 35
|
py
|
Python
|
tests/__init__.py
|
jsta/nhdpy
|
38f52a68907e4d838715c77b18e61450eb775c72
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
jsta/nhdpy
|
38f52a68907e4d838715c77b18e61450eb775c72
|
[
"MIT"
] | 8
|
2020-11-12T16:42:23.000Z
|
2021-03-04T19:00:09.000Z
|
tests/__init__.py
|
jsta/nhdpy
|
38f52a68907e4d838715c77b18e61450eb775c72
|
[
"MIT"
] | null | null | null |
"""Unit test package for nhdpy."""
| 17.5
| 34
| 0.657143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 34
| 0.971429
|
b1a94a4b34655e087c8464bf5e2ca43f8d328eaa
| 10,423
|
py
|
Python
|
ltcl/modules/lvae_nonlinear.py
|
anonymous-authors-iclr2022-481/ltcl
|
0d8902228fa6c37f875bb60c4d16988462a9655a
|
[
"MIT"
] | 8
|
2021-10-16T08:35:37.000Z
|
2022-02-10T09:25:50.000Z
|
leap/modules/lvae_nonlinear.py
|
weirayao/leap
|
8d10b8413d02d3be49d5c02a13a0aa60a741d8da
|
[
"MIT"
] | null | null | null |
leap/modules/lvae_nonlinear.py
|
weirayao/leap
|
8d10b8413d02d3be49d5c02a13a0aa60a741d8da
|
[
"MIT"
] | 1
|
2021-11-30T04:06:43.000Z
|
2021-11-30T04:06:43.000Z
|
"""Temporal VAE with gaussian margial and laplacian transition prior"""
import torch
import numpy as np
import ipdb as pdb
import torch.nn as nn
import pytorch_lightning as pl
import torch.distributions as D
from torch.nn import functional as F
from .components.beta import BetaVAE_MLP
from .metrics.correlation import compute_mcc
from .components.base import GroupLinearLayer
from .components.transforms import ComponentWiseSpline
def reconstruction_loss(x, x_recon, distribution):
batch_size = x.size(0)
assert batch_size != 0
if distribution == 'bernoulli':
recon_loss = F.binary_cross_entropy_with_logits(
x_recon, x, size_average=False).div(batch_size)
elif distribution == 'gaussian':
recon_loss = F.mse_loss(x_recon, x, size_average=False).div(batch_size)
elif distribution == 'sigmoid':
x_recon = F.sigmoid(x_recon)
recon_loss = F.mse_loss(x_recon, x, size_average=False).div(batch_size)
return recon_loss
def compute_cross_ent_normal(mu, logvar):
return 0.5 * (mu**2 + torch.exp(logvar)) + np.log(np.sqrt(2 * np.pi))
def compute_ent_normal(logvar):
return 0.5 * (logvar + np.log(2 * np.pi * np.e))
def compute_sparsity(mu, normed=True):
# assume couples, compute normalized sparsity
diff = mu[::2] - mu[1::2]
if normed:
norm = torch.norm(diff, dim=1, keepdim=True)
norm[norm == 0] = 1 # keep those that are same, dont divide by 0
diff = diff / norm
return torch.mean(torch.abs(diff))
class AfflineVAESynthetic(pl.LightningModule):
def __init__(
self,
input_dim,
lag=1,
beta=1,
alpha=1,
lr=1e-4,
z_dim=10,
gamma=10,
rate_prior=1,
hidden_dim=128,
diagonal=False,
decoder_dist='gaussian',
nonlinear_type='gaussian'
):
'''Import Beta-VAE as encoder/decoder'''
super().__init__()
self.net = BetaVAE_MLP(input_dim=input_dim,
z_dim=z_dim,
hidden_dim=hidden_dim)
self.trans_func = GroupLinearLayer(din=z_dim,
dout=z_dim,
num_blocks=lag,
diagonal=diagonal)
self.spline = ComponentWiseSpline(input_dim=z_dim,
bound=5,
count_bins=8,
order="linear")
self.f1 = nn.Sequential(nn.Linear(z_dim, z_dim),
nn.LeakyReLU(0.2))
self.f2 = nn.Sequential(nn.Linear(z_dim, z_dim),
nn.LeakyReLU(0.2))
self.coff = nn.Linear(z_dim, z_dim)
# self.spline.load_state_dict(torch.load("/home/yuewen/spline.pth"))
self.lr = lr
self.lag = lag
self.beta = beta
self.z_dim = z_dim
self.alpha = alpha
self.gamma = gamma
self.input_dim = input_dim
self.rate_prior = rate_prior
self.decoder_dist = decoder_dist
self.nonlinear_type = nonlinear_type
self.b = nn.Parameter(0.01 * torch.randn(1, z_dim))
# base distribution for calculation of log prob under the model
self.register_buffer('base_dist_var', torch.eye(self.z_dim))
self.register_buffer('base_dist_mean', torch.zeros(self.z_dim))
@property
def base_dist(self):
return D.MultivariateNormal(self.base_dist_mean, self.base_dist_var)
def forward(self, batch):
xt, xt_ = batch["xt"], batch["xt_"]
batch_size, _, _ = xt.shape
x = torch.cat((xt, xt_), dim=1)
x = x.view(-1, self.input_dim)
return self.net(x)
def compute_cross_ent_laplace(self, mean, logvar, rate_prior):
var = torch.exp(logvar)
sigma = torch.sqrt(var)
ce = - torch.log(rate_prior / 2) + rate_prior * sigma *\
np.sqrt(2 / np.pi) * torch.exp(- mean**2 / (2 * var)) -\
rate_prior * mean * (
1 - 2 * self.normal_dist.cdf(mean / sigma))
return ce
def training_step(self, batch, batch_idx):
xt, xt_ = batch["xt"], batch["xt_"]
batch_size, _, _ = xt.shape
x = torch.cat((xt, xt_), dim=1)
x = x.view(-1, self.input_dim)
x_recon, mu, logvar, z = self.net(x)
# Normal VAE loss: recon_loss + kld_loss
recon_loss = reconstruction_loss(x, x_recon, self.decoder_dist)
mu = mu.view(batch_size, -1, self.z_dim)
logvar = logvar.view(batch_size, -1, self.z_dim)
z = z.view(batch_size, -1, self.z_dim)
mut, mut_ = mu[:,:-1,:], mu[:,-1:,:]
logvart, logvart_ = logvar[:,:-1,:], logvar[:,-1:,:]
zt, zt_ = z[:,:-1,:], z[:,-1:,:]
# Past KLD divergenve
p1 = D.Normal(torch.zeros_like(mut), torch.ones_like(logvart))
q1 = D.Normal(mut, torch.exp(logvart / 2))
log_qz_normal = q1.log_prob(zt)
log_pz_normal = p1.log_prob(zt)
kld_normal = log_qz_normal - log_pz_normal
kld_normal = torch.sum(torch.sum(kld_normal,dim=-1),dim=-1).mean()
'''
have question on this part...
'''
# Current KLD divergence
if self.nonlinear_type == "gaussian":
zt_mid = self.f1(zt)
noise_term = nn.Parameter(torch.randn(zt.shape).cuda())
zt_mid = zt_mid + self.coff(noise_term)
zt_bar = self.f2(zt_mid)
recon_zt_ = reconstruction_loss(zt_, zt_bar, self.decoder_dist)
else:
zt_mid = self.f1(zt)
noise_term = nn.Parameter(torch.distributions.laplace.Laplace(0,1).rsample(zt.shape).cuda())
zt_mid = zt_mid + self.coff(noise_term)
zt_bar = self.f2(zt_mid)
recon_zt_ = reconstruction_loss(zt_, zt_bar, self.decoder_dist)
coff = torch.abs(self.coff.weight).mean()
# # Current KLD divergence
# ut = self.trans_func(zt)
# ut = torch.sum(ut, dim=1) + self.b
# epst_ = zt_.squeeze() + ut
# et_, logabsdet = self.spline(epst_)
# log_pz_laplace = self.base_dist.log_prob(et_) + logabsdet
# q_laplace = D.Normal(mut_, torch.exp(logvart_ / 2))
# log_qz_laplace = q_laplace.log_prob(zt_)
# kld_laplace = torch.sum(torch.sum(log_qz_laplace,dim=-1),dim=-1) - log_pz_laplace
# kld_laplace = kld_laplace.mean()
loss = (self.lag+1) * recon_loss + self.beta * kld_normal + self.gamma * recon_zt_ + self.alpha * coff
# loss = (self.lag+1) * recon_loss + self.beta * kld_normal
zt_recon = mu[:,-1,:].T.detach().cpu().numpy()
zt_true = batch["yt_"].squeeze().T.detach().cpu().numpy()
mcc = compute_mcc(zt_recon, zt_true, "Pearson")
self.log("train_mcc", mcc)
self.log("train_coff", coff)
self.log("train_elbo_loss", loss)
self.log("train_recon_zt_", recon_zt_)
self.log("train_recon_loss", recon_loss)
self.log("train_kld_normal", kld_normal)
return loss
def validation_step(self, batch, batch_idx):
xt, xt_ = batch["xt"], batch["xt_"]
batch_size, _, _ = xt.shape
x = torch.cat((xt, xt_), dim=1)
x = x.view(-1, self.input_dim)
x_recon, mu, logvar, z = self.net(x)
# Normal VAE loss: recon_loss + kld_loss
recon_loss = reconstruction_loss(x, x_recon, self.decoder_dist)
mu = mu.view(batch_size, -1, self.z_dim)
logvar = logvar.view(batch_size, -1, self.z_dim)
z = z.view(batch_size, -1, self.z_dim)
mut, mut_ = mu[:,:-1,:], mu[:,-1:,:]
logvart, logvart_ = logvar[:,:-1,:], logvar[:,-1:,:]
zt, zt_ = z[:,:-1,:], z[:,-1:,:]
# Past KLD divergenve
p1 = D.Normal(torch.zeros_like(mut), torch.ones_like(logvart))
q1 = D.Normal(mut, torch.exp(logvart / 2))
log_qz_normal = q1.log_prob(zt)
log_pz_normal = p1.log_prob(zt)
kld_normal = log_qz_normal - log_pz_normal
kld_normal = torch.sum(torch.sum(kld_normal,dim=-1),dim=-1).mean()
# Current KLD divergence
if self.nonlinear_type == "gaussian":
zt_mid = self.f1(zt)
noise_term = nn.Parameter(torch.randn(zt.shape).cuda())
zt_mid = zt_mid + self.coff(noise_term)
zt_bar = self.f2(zt_mid)
recon_zt_ = reconstruction_loss(zt_, zt_bar, self.decoder_dist)
else:
zt_mid = self.f1(zt)
noise_term = nn.Parameter(torch.distributions.laplace.Laplace(0,1).rsample(zt.shape).cuda())
zt_mid = zt_mid + self.coff(noise_term)
zt_bar = self.f2(zt_mid)
recon_zt_ = reconstruction_loss(zt_, zt_bar, self.decoder_dist)
coff = torch.abs(self.coff.weight).mean()
loss = (self.lag+1) * recon_loss + self.beta * kld_normal + self.gamma * recon_zt_ + self.alpha * coff
# loss = (self.lag+1) * recon_loss + self.beta * kld_normal
zt_recon = mu[:,-1,:].T.detach().cpu().numpy()
zt_true = batch["yt_"].squeeze().T.detach().cpu().numpy()
mcc = compute_mcc(zt_recon, zt_true, "Pearson")
self.log("val_mcc", mcc)
self.log("val_coff", coff)
self.log("val_elbo_loss", loss)
self.log("val_recon_zt_", recon_zt_)
self.log("val_recon_loss", recon_loss)
self.log("val_kld_normal", kld_normal)
return loss
def sample(self, xt):
batch_size = xt.shape[0]
e = torch.randn(batch_size, self.z_dim).to(xt.device)
eps, _ = self.spline.inverse(e)
return eps
def reconstruct(self):
return self.forward(batch)[0]
def configure_optimizers(self):
optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.parameters()), lr=self.lr)
# An scheduler is optional, but can help in flows to get the last bpd improvement
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.99)
return [optimizer], [scheduler]
| 40.399225
| 111
| 0.570469
| 8,849
| 0.848988
| 0
| 0
| 113
| 0.010841
| 0
| 0
| 1,549
| 0.148614
|
b1a94cda8b0a8f59129a19a7e19f329084618c94
| 7,196
|
py
|
Python
|
cargame/camera.py
|
jocelynthiojaya/Self-Learning-Cars
|
5dbd47f4f34155cf50cd6c6a6daef70449f96398
|
[
"Apache-2.0"
] | null | null | null |
cargame/camera.py
|
jocelynthiojaya/Self-Learning-Cars
|
5dbd47f4f34155cf50cd6c6a6daef70449f96398
|
[
"Apache-2.0"
] | null | null | null |
cargame/camera.py
|
jocelynthiojaya/Self-Learning-Cars
|
5dbd47f4f34155cf50cd6c6a6daef70449f96398
|
[
"Apache-2.0"
] | null | null | null |
import arcade
from cargame.globals import conf
from cargame import util
# This math is for getting the ratio from zoom. I honestly
# don't know what it is called, i just constructed it by hand
# Long form is 1 - (x - 1) / 2
zoom_multiplexer = lambda x : (3 - x)/2
# TODO: Implement anchor
class Camera:
def __init__(self, left_bound, bottom_bound, right_bound, top_bound):
""" Set every camera variables
s_width: Screen width
s_height: Screen height
"""
self.x = 0
self.y = 0
self.right = conf["screen_width"]
self.top = conf["screen_height"]
# Camera bounds
self.left_bound = left_bound
self.bottom_bound = bottom_bound
self.right_bound = right_bound
self.top_bound = top_bound
# The zoom of the main canvas
self.zoom = 1
# Whether zoom is enabled.
self.can_zoom = True
# Marker when camera port will be updated this frame
self.moved = False
def on_start_update(self):
""" Will be run at the beginning of them main update function """
self.moved = False
def handle_border(self):
""" Handles if the camera went out of bounds """
bound_left = self.x < self.left_bound
bound_right = self.right > self.right_bound
if bound_left or bound_right:
x_diff = self.left_bound - self.x if bound_left else self.right_bound - self.right
self.x += x_diff
self.right += x_diff
bound_bot = self.y < self.bottom_bound
bound_top = self.top > self.top_bound
if bound_bot or bound_top:
y_diff = self.bottom_bound - self.y if bound_bot else self.top_bound - self.top
self.y += y_diff
self.top += y_diff
def update_camera_pos(self, x=None, y=None, zoom=None):
"""
Updates the position according to the x, y, and zoom
"""
# Mark camera as moved this frame
self.moved = True
# Move and do maths
zoom_mult = zoom_multiplexer(self.zoom)
if x != None:
self.right = x + conf["screen_width"] * zoom_mult
self.x = x
if y != None:
self.top = y + conf["screen_height"] * zoom_mult
self.y = y
self.handle_border()
# print("Port size: ({}, {}) zoom: {}".format(self.right - self.x, self.top - self.y, self.zoom))
def update_zoom(self, zoom, anchor_x, anchor_y):
""" Updates the zoom of the main canvas """
# Check first whether zoom is enabled
if not self.can_zoom: return
# Mark camera as moved
self.moved = True
# Clamp the zoom
zoom = util.clamp(zoom, -5.0, 2.95)
# Calculate zoom increment
zoom_inc = self.zoom - zoom
# Get the linear interpolation so that the zoom is
# focused on the anchor
x_lerp = util.invlerp(0, conf["screen_width"], anchor_x)
y_lerp = util.invlerp(0, conf["screen_height"], anchor_y)
# print("x: {} y: {} right: {} top: {}".format(self.x, self.y, self.right, self.top))
# print("xlerp: {} ylerp: {}".format(x_lerp, y_lerp))
# Camera view ports
lp = self.x - (x_lerp * conf["screen_width"] * zoom_inc) / 2
bp = self.y - (y_lerp * conf["screen_height"] * zoom_inc) / 2
rp = self.right + ((1-x_lerp) * conf["screen_width"] * zoom_inc) / 2
tp = self.top + ((1-y_lerp) * conf["screen_height"] * zoom_inc) / 2
# If camera view port is within the bounds, do the zoom.
if (rp - lp) < (self.right_bound - self.left_bound) and (tp - bp) < (self.top_bound - self.bottom_bound):
# Calculate the camera maths here
self.x = lp
self.y = bp
self.right = rp
self.top = tp
self.zoom = round(zoom, 3)
self.handle_border()
# print("x: {} y: {} right: {} top: {}".format(self.x, self.y, self.right, self.top))
# print("Port size: ({}, {}) zoom: {}".format(self.right - self.x, self.top - self.y, self.zoom))
def move_camera_pos(self, dx, dy):
""" Moves the camera by appending the variables to
the individual coordinates.
"""
self.update_camera_pos(self.x + dx, self.y + dy)
def update_viewport(self):
""" Updates the camera by updating
the viewport of arcade
"""
arcade.set_viewport(self.x, self.right, self.y, self.top)
def handle_pan(self, dx, dy):
""" Handles the camera pan from data gotten from
mouse drag """
# Here, we adjust the pan speed according to the level of zoom too.
zoom_mult = zoom_multiplexer(self.zoom)
self.move_camera_pos(-dx * zoom_mult, -dy * zoom_mult)
def handle_zoom(self, mouse_x, mouse_y, scroll_y):
""" Handles the camera scroll from data gotten from
mouse scroll """
# Must adjust according to where the pointer is.
self.update_zoom(self.zoom + scroll_y * 0.05, mouse_x, mouse_y)
def get_viewport(self):
""" Gets the size of the viewport """
return (self.right - self.x, self.top - self.y)
def reset_zoom(self):
""" Reset the zoom of the camera to 1x """
self.update_zoom(1, conf["screen_width"]/2, conf["screen_height"]/2)
def set_can_zoom(self, state):
self.can_zoom = state
class Grid():
grid_size = 128
def __init__(self, camera):
"""
Detects the camera movement
"""
self.grid_lines = []
self.camera: Camera = camera
self.recreate_grid()
def update(self):
""" Update """
def recreate_grid(self):
""" Recreate the grid from the ground up
This will recreate the grids with an offset based on the camera position.
Therefore, grids will be only drawn in the place of the camera, not outside."""
# Reset the grid lines
self.grid_lines = []
# Recreate the vertical lines
viewport = self.camera.get_viewport()
for i in range(int(viewport[0]) // Grid.grid_size + 2):
self.grid_lines.append([self.camera.x + Grid.grid_size * i - (self.camera.x % Grid.grid_size), self.camera.y + -Grid.grid_size])
self.grid_lines.append([self.camera.x + Grid.grid_size * i - (self.camera.x % Grid.grid_size), self.camera.y + viewport[1] + Grid.grid_size])
# Horizontal lines
for i in range(int(viewport[1]) // Grid.grid_size + 2):
self.grid_lines.append([self.camera.x + -Grid.grid_size, self.camera.y + Grid.grid_size * i - (self.camera.y % Grid.grid_size)])
self.grid_lines.append([self.camera.x + viewport[0] + Grid.grid_size, self.camera.y + Grid.grid_size * i - (self.camera.y % Grid.grid_size)])
def draw_grid(self):
""" Draws the grid based on the configuration """
# Only update grid when camera is moved.
if self.camera.moved:
# Recreate every line grid
self.recreate_grid()
arcade.draw_lines(self.grid_lines, (235, 235, 235))
| 34.932039
| 153
| 0.591857
| 6,903
| 0.959283
| 0
| 0
| 0
| 0
| 0
| 0
| 2,540
| 0.352974
|
b1ac9e7af9abde201568a2b9eff7f851241bb02a
| 168
|
py
|
Python
|
configs/tsmnet/tsmnet_r50-d1_769x769_40k_cityscapes_video.py
|
labdeeman7/TRDP_temporal_stability_semantic_segmentation
|
efe0f13c2ed4e203d1caa41810e39e09152b508e
|
[
"Apache-2.0"
] | null | null | null |
configs/tsmnet/tsmnet_r50-d1_769x769_40k_cityscapes_video.py
|
labdeeman7/TRDP_temporal_stability_semantic_segmentation
|
efe0f13c2ed4e203d1caa41810e39e09152b508e
|
[
"Apache-2.0"
] | null | null | null |
configs/tsmnet/tsmnet_r50-d1_769x769_40k_cityscapes_video.py
|
labdeeman7/TRDP_temporal_stability_semantic_segmentation
|
efe0f13c2ed4e203d1caa41810e39e09152b508e
|
[
"Apache-2.0"
] | null | null | null |
_base_ = [
'../_base_/models/tsm_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py'
]
| 42
| 81
| 0.684524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 141
| 0.839286
|
b1aca6b126eaf2078a24e5384b735f4060abd7a2
| 1,883
|
py
|
Python
|
abc/104/c_retry2.py
|
515hikaru/solutions
|
9fb3e4600f9a97b78211a5736c98354d4cbebc38
|
[
"MIT"
] | null | null | null |
abc/104/c_retry2.py
|
515hikaru/solutions
|
9fb3e4600f9a97b78211a5736c98354d4cbebc38
|
[
"MIT"
] | 9
|
2019-12-29T17:57:39.000Z
|
2020-02-16T16:36:04.000Z
|
abc104/c_retry2.py
|
515hikaru/abc-sandbox
|
6445dd9d6583bd48a285d6e5693173529933da51
|
[
"MIT"
] | null | null | null |
from itertools import combinations
def exclude_combi_idx(combis, scores):
a = [score[1] for score in combis]
v = []
for score in scores:
if score[1] in a:
continue
v.append(score)
return v
def set_all_solve(combi):
current = 0
num = 0
for item in combi:
num += item[0]
current += item[2]
return num, current
def track_residue(num, score, current, target):
for i in range(num):
current += score
if current >= target:
return i + 1
return None
def main():
D, G = [int(c) for c in input().split()]
items = []
for idx in range(1, 1+D):
num, bonasu = [int(c) for c in input().split()]
items.append((idx * 100, num, bonasu))
scores = [(num, idx,idx * num + bonasu) for idx, num, bonasu in items]
max_prob = sum([num for num,_, _ in scores])
min_prob = max_prob
for i in range(0, D+1):
combis = list(combinations(scores, i))
# print('i = {}, combinations = {}'.format(i, combis))
for combi in combis:
s = 0
prob = 0
# print('combi = {}'.format(combi))
tmp, tmp2 = set_all_solve(combi)
prob += tmp
s += tmp2
# print('all solves num = {} score = {}'.format(prob, s))
if s >= G:
if prob < min_prob:
min_prob = prob
continue
else:
v = exclude_combi_idx(combi, scores)[-1] # 値が最大のもののみ必要
res = track_residue(v[0], v[1], s, G)
if res is None:
continue
if res + prob < min_prob:
# print('track solve: num = {}, score = {}'.format(res + prob, s))
min_prob = res + prob
print(min_prob)
if __name__ == '__main__':
main()
| 29.888889
| 86
| 0.495486
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 257
| 0.134908
|
b1acfa5bf6bd71ea82cf922fd4900527c2980874
| 4,418
|
py
|
Python
|
merlin/celery.py
|
robinson96/merlin
|
962b97ac037465f0fe285ceee6b77e554d8a29fe
|
[
"MIT"
] | null | null | null |
merlin/celery.py
|
robinson96/merlin
|
962b97ac037465f0fe285ceee6b77e554d8a29fe
|
[
"MIT"
] | null | null | null |
merlin/celery.py
|
robinson96/merlin
|
962b97ac037465f0fe285ceee6b77e554d8a29fe
|
[
"MIT"
] | null | null | null |
###############################################################################
# Copyright (c) 2019, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory
# Written by the Merlin dev team, listed in the CONTRIBUTORS file.
# <merlin@llnl.gov>
#
# LLNL-CODE-797170
# All rights reserved.
# This file is part of Merlin, Version: 1.5.0.
#
# For details, see https://github.com/LLNL/merlin.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
"""Updated celery configuration."""
from __future__ import (
absolute_import,
print_function,
)
import logging
import os
import billiard
import psutil
from celery import Celery
from celery.signals import worker_process_init
import merlin.common.security.encrypt_backend_traffic
from merlin.config import (
broker,
results_backend,
)
from merlin.log_formatter import FORMATS
from merlin.router import route_for_task
LOG = logging.getLogger(__name__)
broker_ssl = True
results_ssl = False
try:
BROKER_URI = broker.get_connection_string()
LOG.info(f"broker: {broker.get_connection_string(include_password=False)}")
broker_ssl = broker.get_ssl_config()
LOG.info(f"broker_ssl = {broker_ssl}")
RESULTS_BACKEND_URI = results_backend.get_connection_string()
results_ssl = results_backend.get_ssl_config(celery_check=True)
LOG.info(
f"results: {results_backend.get_connection_string(include_password=False)}"
)
LOG.info(f"results: redis_backed_use_ssl = {results_ssl}")
except ValueError:
# These variables won't be set if running with '--local'.
BROKER_URI = None
RESULTS_BACKEND_URI = None
app = Celery(
"merlin",
broker=BROKER_URI,
backend=RESULTS_BACKEND_URI,
broker_use_ssl=broker_ssl,
redis_backend_use_ssl=results_ssl,
)
app.conf.update(
task_serializer="pickle", accept_content=["pickle"], result_serializer="pickle"
)
app.autodiscover_tasks(["merlin.common"])
app.conf.update(
task_acks_late=True,
task_reject_on_worker_lost=True,
task_publish_retry_policy={
"interval_start": 10,
"interval_step": 10,
"interval_max": 60,
},
redis_max_connections=100000,
)
# Set a one hour timeout to acknowledge a task before it's available to grab
# again.
app.conf.broker_transport_options = {"visibility_timeout": 7200, "max_connections": 100}
app.conf.update(broker_pool_limit=0)
# Task routing: call our default queue merlin
app.conf.task_routes = (route_for_task,)
app.conf.task_default_queue = "merlin"
# Log formatting
app.conf.worker_log_color = True
app.conf.worker_log_format = FORMATS["DEFAULT"]
app.conf.worker_task_log_format = FORMATS["WORKER"]
@worker_process_init.connect()
def setup(**kwargs):
"""
Set affinity for the worker on startup (works on toss3 nodes)
:param `**kwargs`: keyword arguments
"""
if "CELERY_AFFINITY" in os.environ and int(os.environ["CELERY_AFFINITY"]) > 1:
# Number of cpus between workers.
cpu_skip = int(os.environ["CELERY_AFFINITY"])
npu = psutil.cpu_count()
p = psutil.Process()
current = billiard.current_process()
prefork_id = current._identity[0] - 1 # range 0:nworkers-1
cpu_slot = (prefork_id * cpu_skip) % npu
p.cpu_affinity(list(range(cpu_slot, cpu_slot + cpu_skip)))
| 33.218045
| 88
| 0.715708
| 0
| 0
| 0
| 0
| 645
| 0.145994
| 0
| 0
| 2,369
| 0.536215
|
b1ad3f5981efc006ce7e36a91015794cd61586bc
| 648
|
py
|
Python
|
webapi/apps/web/management/mockup.py
|
NovaSBE-DSKC/retention-evaluation
|
5b68b9282f0b5479a9dc5238faef68067c76b861
|
[
"MIT"
] | null | null | null |
webapi/apps/web/management/mockup.py
|
NovaSBE-DSKC/retention-evaluation
|
5b68b9282f0b5479a9dc5238faef68067c76b861
|
[
"MIT"
] | null | null | null |
webapi/apps/web/management/mockup.py
|
NovaSBE-DSKC/retention-evaluation
|
5b68b9282f0b5479a9dc5238faef68067c76b861
|
[
"MIT"
] | null | null | null |
import random
import pandas as pd
import json
def get_random_name():
names = ["Miguel", "Susana", " Pedro", "Mateus", "Ângelo", "Beatriz", "Ana", " Maria", "Carlos", "José"]
return names[random.randint(0, len(names) - 1)]
def add_random_names(data):
for x in data:
x["name"] = get_random_name()
return data
def get_mockup_data(grade='9'):
# read file
df = pd.read_csv("mockup_data/dummy_" + grade + "grade.csv")
# dataframe to json
result = df.to_json(orient="values")
result = json.loads(result)
# add columns
columns = list(df.columns)
result.insert(0, columns)
return result
| 20.903226
| 108
| 0.634259
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 169
| 0.26
|
b1ad704b385cea93f718a905833492ee873ae1bf
| 1,332
|
py
|
Python
|
migrations/versions/e91e2508f055_.py
|
ifat-mohit/flask-microblog
|
f4f5f0df600779caecbe442d30a7ecc517ad515f
|
[
"MIT"
] | 1
|
2021-02-13T23:47:46.000Z
|
2021-02-13T23:47:46.000Z
|
migrations/versions/e91e2508f055_.py
|
ifat-mohit/flask-microblog
|
f4f5f0df600779caecbe442d30a7ecc517ad515f
|
[
"MIT"
] | 2
|
2021-02-14T17:04:53.000Z
|
2021-06-02T00:35:49.000Z
|
migrations/versions/e91e2508f055_.py
|
mohidex/flask-microblog
|
f4f5f0df600779caecbe442d30a7ecc517ad515f
|
[
"MIT"
] | 1
|
2020-04-07T11:56:22.000Z
|
2020-04-07T11:56:22.000Z
|
"""empty message
Revision ID: e91e2508f055
Revises: a064e677a1f1
Create Date: 2019-11-04 22:59:00.701304
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e91e2508f055'
down_revision = 'a064e677a1f1'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('comment', sa.String(length=255), nullable=True),
sa.Column('commneted_on', sa.Integer(), nullable=True),
sa.Column('commented_by', sa.Integer(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['commented_by'], ['user.id'], ),
sa.ForeignKeyConstraint(['commneted_on'], ['post.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_comment_timestamp'), 'comment', ['timestamp'], unique=False)
op.add_column('post', sa.Column('title', sa.String(length=140), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('post', 'title')
op.drop_index(op.f('ix_comment_timestamp'), table_name='comment')
op.drop_table('comment')
# ### end Alembic commands ###
| 31.714286
| 89
| 0.682432
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 579
| 0.434685
|
490a5d4dee030077442db885609423fe0007703e
| 758
|
py
|
Python
|
cli/cli_cloudformation.py
|
reneses/cloud-cli
|
1f765cfb67cb9ffde1633fffe0da11893fb1503f
|
[
"MIT"
] | null | null | null |
cli/cli_cloudformation.py
|
reneses/cloud-cli
|
1f765cfb67cb9ffde1633fffe0da11893fb1503f
|
[
"MIT"
] | null | null | null |
cli/cli_cloudformation.py
|
reneses/cloud-cli
|
1f765cfb67cb9ffde1633fffe0da11893fb1503f
|
[
"MIT"
] | null | null | null |
from menu import Menu, MenuEntry
from logic.cloudformation import CloudFormation
class CloudFormationCli:
"""
Menu for the AWS CloudFormation operations
"""
def __init__(self):
"""
Run the menu
"""
# Init the logic handler
self.cloudformation = CloudFormation()
# Init the menu
Menu('Amazon Web Services (AWS) Elastic Load Balancer', [
MenuEntry('Go back', None),
MenuEntry('Generate web bucket', self.generate_web_bucket),
]).run()
def generate_web_bucket(self):
"""
Generate a web bucket
"""
print '# Generating web bucket'
self.cloudformation.generate_web_bucket()
print 'Web bucket generated'
| 25.266667
| 71
| 0.604222
| 675
| 0.890501
| 0
| 0
| 0
| 0
| 0
| 0
| 304
| 0.401055
|
490a7e4e927bf1f9002b7ce41d2b092342ed19da
| 3,107
|
py
|
Python
|
bot/models/__init__.py
|
masterbpro/radio-archive
|
c612cd845d969a6577a3facbdd8183048f8db2de
|
[
"MIT"
] | null | null | null |
bot/models/__init__.py
|
masterbpro/radio-archive
|
c612cd845d969a6577a3facbdd8183048f8db2de
|
[
"MIT"
] | null | null | null |
bot/models/__init__.py
|
masterbpro/radio-archive
|
c612cd845d969a6577a3facbdd8183048f8db2de
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from peewee import SqliteDatabase, Model, PrimaryKeyField, IntegerField, CharField, BooleanField, DateTimeField
from bot.data.config import STATIC_DIR
from bot.utils.logging import logger
db = SqliteDatabase(f"{STATIC_DIR}/db.sqlite3")
class User(Model):
"""
Клас описывающий поля в таблице для юзера
"""
id = PrimaryKeyField(null=False, unique=True)
user_id = IntegerField(null=False, unique=True)
full_name = CharField(null=False, max_length=255)
username = CharField(null=True, max_length=128)
is_subscribe = BooleanField(null=False, default=False)
created = DateTimeField(default=datetime.now())
def add_user(self, user_id: int, full_name: str, username: str) -> bool:
"""
Функция для добавления пользователя в Базу данных
:param user_id: ID пользователя в Телеграме
:param username: Имя пользователя
:param full_name: Полное имя аккаунта
:return:
"""
try:
return self.create(user_id=user_id,
full_name=full_name,
username=username)
except Exception as addUserError:
print(addUserError)
def get_user(self, user_id: int) -> [Model, bool]:
"""
Функция для проверки наличия пользователя в Базе Данных
:param user_id: ID пользователя в Телегрме
:return: Булевое значения True если пользователь найден
"""
res = self.get_or_none(User.user_id == user_id)
if res: # User is find
return res
return False
class Meta:
database = db
class Archive(Model):
"""
Модель дял хранения записей
"""
id = PrimaryKeyField(null=False, unique=True)
start_date = DateTimeField(null=False)
finish_date = DateTimeField(null=False)
file_id = CharField(null=False, max_length=50)
class Meta:
database = db
def get_archive(self, hour, day, month, year):
"""
Получения архима исходя из часа, дня, месяца и года
:param hour:
:param day:
:param month:
:param year:
:return:
"""
archive_date = datetime.strptime(f"{year}/{month}/{day}-{hour}", "%Y/%m/%d-%H").strftime("%Y-%m-%d %H")
return self.get_or_none(Archive.start_date >= archive_date)
def add_archive(self, start_date, file_id):
"""
Добавления архива записи в базу
:param start_date:
:param file_id:
:return:
"""
check = self.get_or_none(Archive.start_date == start_date)
if check:
check.file_id = file_id
check.save()
logger.info(f"Update archive [{start_date}] with file [{file_id}]")
return self.get(Archive.start_date == start_date)
return self.create(
start_date=start_date,
finish_date=start_date + timedelta(hours=1),
file_id=file_id
)
User.create_table(safe=True)
Archive.create_table(safe=True)
user = User()
archive = Archive()
| 30.762376
| 111
| 0.620856
| 3,054
| 0.888566
| 0
| 0
| 0
| 0
| 0
| 0
| 1,303
| 0.37911
|
490c3a09e90ac7741bc5df730d26dac2764368fc
| 40,373
|
py
|
Python
|
TrainingExtensions/tensorflow/src/python/aimet_tensorflow/utils/op/fusedbatchnorm.py
|
quic-ykota/aimet
|
c897bd4c360e3a0fb7a329c6bb98b569f66bace1
|
[
"BSD-3-Clause"
] | 945
|
2020-04-30T02:23:55.000Z
|
2022-03-31T08:44:32.000Z
|
TrainingExtensions/tensorflow/src/python/aimet_tensorflow/utils/op/fusedbatchnorm.py
|
seaun163/aimet
|
de94e5522e0c9250fb422d064b77ef9ecc70f239
|
[
"BSD-3-Clause"
] | 563
|
2020-05-01T03:07:22.000Z
|
2022-03-30T05:35:58.000Z
|
TrainingExtensions/tensorflow/src/python/aimet_tensorflow/utils/op/fusedbatchnorm.py
|
seaun163/aimet
|
de94e5522e0c9250fb422d064b77ef9ecc70f239
|
[
"BSD-3-Clause"
] | 186
|
2020-04-30T00:55:26.000Z
|
2022-03-30T09:54:51.000Z
|
# /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019-2020, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" utilities for fused batchnorm op """
from typing import Union
import numpy as np
import tensorflow as tf
from tensorflow.contrib import graph_editor as ge
from aimet_common.utils import AimetLogger
from aimet_tensorflow.utils import constants
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
_BN_STRUCTURE_ERROR_MSG = "BN op doesn't have the expected structure"
class BNUtils:
""" Batch Norm/ fused Batch Norm op related utils"""
@staticmethod
def skip_bn_op(sess: tf.compat.v1.Session, bn_op: tf.Operation, in_tensor: tf.Tensor, out_tensor: tf.Tensor):
"""
Skip given bn op specified (fused batch norm op).
Note: supports only Fused bn op types.
:param sess: Tensorflow session
:param bn_op: Batchnorm op to be skipped
:param in_tensor: Input tensor to the batchnorm op
:param out_tensor: Output tensor of the batchnorm op
"""
if in_tensor is None or out_tensor is None:
logger.error("Error, input and output tensors must be provided for skipping the op")
assert False
else:
with sess.graph.as_default():
if bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
ge.detach_outputs(in_tensor.op)
ge.reroute_ts(in_tensor, out_tensor)
BNUtils.remove_bn_op_from_update_ops(sess, bn_op)
else:
logger.error("Error, Unknown BN op")
assert False
@staticmethod
def _get_tensor_read_var_op_trainable_bn_op(input_tensor: tf.Tensor) -> tf.Tensor:
"""
Generic helper to find a read op tensor associated with input tensor that can be evaluated, when the bn op is
marked trainable.
:param input_tensor: Input tensor to find corresponding read op tensor that can be evaluated
:return: read var op type tensor as tf.Tensor type.
"""
logger.debug('Fetching params from trainable BN op type')
assert input_tensor.op.inputs[0].op.inputs is not None
# inputs of 0 is beta tensor , get readVarOp associated with it
var_tensor = input_tensor.op.inputs[0].op.inputs[0]
assert var_tensor.op.outputs is not None
assert len(var_tensor.consumers()) >= 3
tensor_consumers = var_tensor.consumers()
var_read_tensor = None
# get read variable op tensor from these consumers
# do not pick the one with _1 , it is not fetch-able
for consumer in tensor_consumers:
if consumer.type == 'ReadVariableOp' and 'ReadVariableOp_1' not in consumer.name:
assert consumer.outputs is not None
var_read_tensor = consumer.outputs[0]
break
assert var_read_tensor is not None
return var_read_tensor
@staticmethod
def get_beta_read_op(bn_op: tf.Operation) -> tf.Operation:
"""
Get beta read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules (is mul_1 op inside BN scope)
:return: beta read op
"""
if bn_op.type in ['Mul']:
# For regular BN
# mul_1 -> add_1 <-- sub <-- beta_read
assert len(bn_op.outputs) >= 1, _BN_STRUCTURE_ERROR_MSG
add_1 = bn_op.outputs[0].consumers()[0]
assert len(add_1.inputs) >= 2, _BN_STRUCTURE_ERROR_MSG
sub = add_1.inputs[1].op
assert len(sub.inputs) >= 1, _BN_STRUCTURE_ERROR_MSG
beta_read = sub.inputs[0].op
elif bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
assert len(bn_op.inputs) == 5
beta_read = bn_op.inputs[constants.BN_OP_PARAM_INDICES['beta']].op
if beta_read.type == 'Switch': # tf slim bn using training tensor form
beta_read = beta_read.inputs[0].op
assert 'read' in beta_read.name
else:
logger.error("Error, unknown BN op")
assert False
assert beta_read.type in ['ReadVariableOp', 'Identity'] # Will be identity for tf slim BNs
return beta_read
@staticmethod
def _get_beta_read_var_op_tensor_using_structure(bn_op: tf.Operation) -> tf.Tensor:
"""
Get beta readVariableOp tensor from BN op specified.
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op beta readVariableOp type, as tf.Tensor
"""
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm', 'Mul']
beta_read_tensor = BNUtils.get_beta_read_op(bn_op).outputs[0]
assert beta_read_tensor is not None
if beta_read_tensor.op.inputs[0].op.type == 'Switch':
logger.debug('Fetching params from trainable BN op type')
beta_read_tensor = BNUtils._get_tensor_read_var_op_trainable_bn_op(beta_read_tensor)
return beta_read_tensor
@staticmethod
def get_beta_read_var_op_tensor(graph: tf.Graph, bn_op: tf.Operation) -> tf.Tensor:
"""
Get beta readVariableOp tensor from BN op specified.
:param graph: TensorFlow graph
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op beta readVariableOp type, as tf.Tensor
"""
try:
# try name based tensor look up for Keras layers
beta_read_tensor = BNUtils._get_bn_param_tensor_using_name(graph, bn_op,
constants.BNOpParamType.beta)
except KeyError:
# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
beta_read_tensor = BNUtils._get_beta_read_var_op_tensor_using_structure(bn_op)
return beta_read_tensor
@staticmethod
def get_beta_as_numpy_data(sess: tf.compat.v1.Session, bn_op: tf.Operation) -> np.ndarray:
"""
Get beta param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op as tf.Operation
:return: beta tensor as numpy data
"""
beta_tensor = BNUtils.get_beta_read_var_op_tensor(sess.graph, bn_op)
with sess.graph.as_default():
numpy_data = sess.run(beta_tensor)
return numpy_data
@staticmethod
def get_gamma_as_read_op(bn_op: tf.Operation) -> tf.Operation:
"""
Get gamma read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules (is mul_1 op inside BN scope)
:return: gamma read op
"""
if bn_op.type in ['Mul']:
# For regular BN
# mul_1 <-- mul <-- gamma_read <-- gamma_tensor
assert len(bn_op.inputs) >= 2, _BN_STRUCTURE_ERROR_MSG
mul = bn_op.inputs[1].op
assert len(mul.inputs) >= 2, _BN_STRUCTURE_ERROR_MSG
gamma_read = mul.inputs[1].op
elif bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
assert len(bn_op.inputs) == 5
gamma_read = bn_op.inputs[constants.BN_OP_PARAM_INDICES['gamma']].op
if gamma_read.type == 'Switch': # tf slim bn using training tensor form
gamma_read = gamma_read.inputs[0].op
assert 'read' in gamma_read.name or gamma_read.type == 'Const'
else:
logger.error("Error, unknown BN op")
assert False
assert gamma_read.type in ['ReadVariableOp', 'Identity', 'Const'] # Will be identity for tf slim BNs
return gamma_read
@staticmethod
def _get_gamma_read_var_op_tensor_using_structure(bn_op: tf.Operation) -> tf.Tensor:
"""
Get the gamma read var op tensor associated with the batchnorm op.
:param bn_op: Batchnorm op to get gamma read var op tensor from
:return: Gamma read var op tensor associated with bn_op
"""
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm', 'Mul']
gamma_read_tensor = BNUtils.get_gamma_as_read_op(bn_op).outputs[0]
assert gamma_read_tensor is not None
if gamma_read_tensor.op.inputs and gamma_read_tensor.op.inputs[0].op.type == 'Switch':
logger.debug('Fetching params from trainable BN op type')
gamma_read_tensor = BNUtils._get_tensor_read_var_op_trainable_bn_op(gamma_read_tensor)
return gamma_read_tensor
@staticmethod
def get_gamma_read_var_op_tensor(graph: tf.Graph, bn_op: tf.Operation) -> tf.Tensor:
"""
Get the gamma read var op tensor associated with the batchnorm op.
:param graph: TensorFlow graph
:param bn_op: Batchnorm op to get gamma read var op tensor from
:return: Gamma read var op tensor associated with bn_op
"""
try:
# try name based tensor look up for Keras layers
gamma_read_tensor = BNUtils._get_bn_param_tensor_using_name(graph, bn_op,
constants.BNOpParamType.gamma)
except KeyError:
# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
gamma_read_tensor = BNUtils._get_gamma_read_var_op_tensor_using_structure(bn_op)
return gamma_read_tensor
@staticmethod
def get_gamma_as_numpy_data(sess: tf.compat.v1.Session, bn_op: tf.Operation) -> np.ndarray:
"""
Get gamma param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op obtained from connected graph using get_modules (is mul_1 op inside BN scope)
:return: gamma as numpy data
"""
gamma_tensor = BNUtils.get_gamma_read_var_op_tensor(sess.graph, bn_op)
with sess.graph.as_default():
numpy_data = sess.run(gamma_tensor)
return numpy_data
@staticmethod
def _bn_op_var_struct_1(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_variance op corresponding to batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_variance
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
rsqrt_op = mul_op.inputs[0].op
assert rsqrt_op.type == 'Rsqrt'
add_op = rsqrt_op.inputs[0].op
assert add_op.type == 'AddV2'
merge_op = add_op.inputs[0].op
assert merge_op.type == 'Merge'
read_op = merge_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def _bn_op_var_struct_2(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_variance op corresponding to batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_variance
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
rsqrt_op = mul_op.inputs[0].op
assert rsqrt_op.type == 'Rsqrt'
add_op = rsqrt_op.inputs[0].op
assert add_op.type == 'AddV2'
squeeze_1_op = add_op.inputs[0].op
assert squeeze_1_op.type == 'Squeeze'
sub_op = squeeze_1_op.outputs[0].consumers()[0]
assert sub_op.type == 'Sub'
read_op = sub_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def _bn_op_var_struct_3(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_variance op corresponding to batchnorm with training=False.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_variance
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
rsqrt_op = mul_op.inputs[0].op
assert rsqrt_op.type == 'Rsqrt'
add_op = rsqrt_op.inputs[0].op
assert add_op.type == 'AddV2'
read_op = add_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def get_moving_variance_as_read_op(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Get moving variance read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving variance as read op
"""
# register handlers for different structures
bn_op_struct_for_variance_handlers = [BNUtils._bn_op_var_struct_1,
BNUtils._bn_op_var_struct_2,
BNUtils._bn_op_var_struct_3]
if bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
assert len(bn_op.inputs) == 5
moving_var_read = bn_op.inputs[constants.BN_OP_PARAM_INDICES['movingvariance']].op
if moving_var_read.type == 'Switch': # tf slim bn using training tensor form
moving_var_read = moving_var_read.inputs[0].op
assert 'read' in moving_var_read.name
elif bn_op.type in ['Mul']:
# For regular BN
moving_var_read = None
# try all handlers available
for handler in bn_op_struct_for_variance_handlers:
if moving_var_read is None:
moving_var_read = handler(bn_op)
else:
break
assert moving_var_read is not None, _BN_STRUCTURE_ERROR_MSG
else:
logger.error("Error, unknown BN op")
assert False
if moving_var_read.type == 'Identity':
assert len(moving_var_read.inputs) == 1, _BN_STRUCTURE_ERROR_MSG
assert moving_var_read.type in ['ReadVariableOp', 'Const', 'Identity']
return moving_var_read
@staticmethod
def _get_moving_variance_read_var_op_tensor_using_structure(bn_op: tf.Operation) -> tf.Tensor:
"""
Get moving variance readVariableOp tensor from BN op specified.
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving variance readVariableOp type, as tf.Tensor
"""
# only support fused BN
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm', 'Mul']
moving_var_read_tensor = BNUtils.get_moving_variance_as_read_op(bn_op).outputs[0]
assert moving_var_read_tensor is not None
if moving_var_read_tensor.op.type == 'Const':
logger.debug("BN op has const type op for moving variance")
# get the sub_1 op associated with moving variance read op
assert len(bn_op.outputs) >= 2
moving_avg_1_sub_1 = bn_op.outputs[2].consumers()[0]
all_inputs = moving_avg_1_sub_1.inputs
# among inputs figure out the read var op type that can be "evaluated"
for input_t in all_inputs:
if input_t.op.type == 'ReadVariableOp':
moving_var_read_tensor = input_t
elif input_t.op.type == 'Identity' and 'read:0' in input_t.name: # tf slim form
moving_var_read_tensor = input_t
elif moving_var_read_tensor.op.inputs[0].op.type == 'Switch':
logger.debug("Fetch moving var from a trainable BN op structure")
moving_var_read_tensor = BNUtils._get_tensor_read_var_op_trainable_bn_op(moving_var_read_tensor)
return moving_var_read_tensor
@staticmethod
def get_moving_variance_read_var_op_tensor(graph: tf.Graph, bn_op: tf.Operation) -> tf.Tensor:
"""
Get moving variance readVariableOp tensor from BN op specified.
:param graph: TensorFlow graph
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving variance readVariableOp type, as tf.Tensor
"""
try:
# try name based tensor look up for Keras layers
moving_var_read_tensor = BNUtils._get_bn_param_tensor_using_name(graph, bn_op,
constants.BNOpParamType.moving_variance)
except KeyError:
# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
moving_var_read_tensor = BNUtils._get_moving_variance_read_var_op_tensor_using_structure(bn_op)
return moving_var_read_tensor
@staticmethod
def get_moving_variance_as_numpy_data(sess: tf.compat.v1.Session, bn_op: tf.Operation) -> np.ndarray:
"""
Get moving variance param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving variance as numpy data
"""
moving_var_tensor = BNUtils.get_moving_variance_read_var_op_tensor(sess.graph, bn_op)
with sess.graph.as_default():
numpy_data = sess.run(moving_var_tensor)
return numpy_data
@staticmethod
def _bn_op_mean_struct_1(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_mean op corresponding to batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_mean
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
mul_2_op = mul_op.outputs[0].consumers()[1]
assert mul_2_op.type == 'Mul'
merge_op = mul_2_op.inputs[0].op
assert merge_op.type == 'Merge'
read_op = merge_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def _bn_op_mean_struct_2(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_mean op corresponding to batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: Read operation for moving_mean
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
mul_2_op = mul_op.outputs[0].consumers()[1]
assert mul_2_op.type == 'Mul'
squeeze_op = mul_2_op.inputs[0].op
assert squeeze_op.type == 'Squeeze'
sub_op = squeeze_op.outputs[0].consumers()[0]
assert sub_op.type == 'Sub'
read_op = sub_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def _bn_op_mean_struct_3(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Return moving_mean op corresponding to batchnorm with training=False.
:param bn_op: bn_op obtained from connected graph using get_modules
a mul_1 op inside BN scope.
:return: Read operation for moving_mean
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
mul_2_op = mul_op.outputs[0].consumers()[1]
assert mul_2_op.type == 'Mul'
read_op = mul_2_op.inputs[0].op
assert read_op.type in ['ReadVariableOp']
return read_op
except: # pylint: disable=bare-except
return None
@staticmethod
def get_moving_mean_as_read_op(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Get moving mean read op from BN op specified.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving mean read op
"""
if bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
assert len(bn_op.inputs) == 5
moving_mean_read = bn_op.inputs[constants.BN_OP_PARAM_INDICES['movingmean']].op
if moving_mean_read.type == 'Switch': # tf slim bn using training tensor form
moving_mean_read = moving_mean_read.inputs[0].op
assert 'read' in moving_mean_read.name
elif bn_op.type in ['Mul']:
# For regular BN
# mul_1 << - mul --> mul_2 <-- cond/merge <-- switch2 <-- moving mean read < moving mean tensor
# inputs[1] is mul .op.inputs[1] is gamma:read op whose input is gamma tensor as variable v2
# register handlers for different structures
bn_op_struct_for_mean_handlers = [BNUtils._bn_op_mean_struct_1,
BNUtils._bn_op_mean_struct_2,
BNUtils._bn_op_mean_struct_3]
moving_mean_read = None
# try all handlers available
for handler in bn_op_struct_for_mean_handlers:
if moving_mean_read is None:
moving_mean_read = handler(bn_op)
else:
break
assert moving_mean_read is not None, _BN_STRUCTURE_ERROR_MSG
else:
logger.error("Error, unknown BN op")
assert False
if moving_mean_read.type == 'Identity':
assert len(moving_mean_read.inputs) == 1, _BN_STRUCTURE_ERROR_MSG
assert moving_mean_read.type in ['ReadVariableOp', 'Const', 'Identity']
return moving_mean_read
@staticmethod
def _get_moving_mean_read_var_op_tensor_using_structure(bn_op: tf.Operation) -> tf.Tensor:
"""
Get moving mean readVariableOp tensor from BN op specified.
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving mean readVariableOp type, as tf.Tensor
"""
# only support fused BN
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm', 'Mul']
moving_mean_read_tensor = BNUtils.get_moving_mean_as_read_op(bn_op).outputs[0]
assert moving_mean_read_tensor is not None
if moving_mean_read_tensor.op.type == 'Const':
logger.debug("BN op has const type op for moving variance")
# get the read var type from bn op
# get the sub_1 op associated with moving mean read op
assert len(bn_op.outputs) > 1
moving_avg_sub_1 = bn_op.outputs[1].consumers()[0]
all_inputs = moving_avg_sub_1.inputs
# among inputs figure out the read var op type that can be "evaluated"
for input_t in all_inputs:
if input_t.op.type == 'ReadVariableOp':
moving_mean_read_tensor = input_t
elif input_t.op.type == 'Identity' and 'read:0' in input_t.name: # tf slim form
moving_mean_read_tensor = input_t
elif moving_mean_read_tensor.op.inputs[0].op.type == 'Switch':
logger.debug("Fetch moving var from a trainable BN op structure")
moving_mean_read_tensor = BNUtils._get_tensor_read_var_op_trainable_bn_op(moving_mean_read_tensor)
return moving_mean_read_tensor
@staticmethod
def get_moving_mean_read_var_op_tensor(graph: tf.Graph, bn_op: tf.Operation) -> tf.Tensor:
"""
Get moving mean readVariableOp tensor from BN op specified.
:param graph: TensorFlow graph
:param bn_op: FusedBatchNorm as tf.Operation
:return: tensor associated with bn op moving mean readVariableOp type, as tf.Tensor
"""
try:
# try name based tensor look up for Keras layers
moving_mean_read_tensor = BNUtils._get_bn_param_tensor_using_name(graph, bn_op,
constants.BNOpParamType.moving_mean)
except KeyError:
# if we can't find the tensor name, use structure match
# to figure out the read tensor for param
moving_mean_read_tensor = BNUtils._get_moving_mean_read_var_op_tensor_using_structure(bn_op)
return moving_mean_read_tensor
@staticmethod
def get_moving_mean_as_numpy_data(sess: tf.compat.v1.Session, bn_op: tf.Operation) -> np.ndarray:
"""
Get moving mean param from BN op specified.
:param sess: tensorflow session
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: moving mean as numpy data
"""
moving_mean_tensor = BNUtils.get_moving_mean_read_var_op_tensor(sess.graph, bn_op)
with sess.graph.as_default():
numpy_data = sess.run(moving_mean_tensor)
return numpy_data
@staticmethod
def get_epsilon(bn_op: tf.Operation) -> float:
"""
Returns epsilon extracted from given bn op.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: epsilon value
"""
if bn_op.type in ['Mul']:
assert len(bn_op.inputs) >= 2, _BN_STRUCTURE_ERROR_MSG
mul = bn_op.inputs[1].op
assert len(mul.inputs) >= 1, _BN_STRUCTURE_ERROR_MSG
rsqrt = mul.inputs[0].op
assert len(rsqrt.inputs) >= 1, _BN_STRUCTURE_ERROR_MSG
add = rsqrt.inputs[0].op
assert len(add.inputs) >= 2, _BN_STRUCTURE_ERROR_MSG
epsilon = add.inputs[1].op
numpy_epsilon = epsilon.get_attr('value').float_val[0]
elif bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
# epsilon can be derived as attribute value
numpy_epsilon = bn_op.get_attr("epsilon")
else:
logger.error("Error, unknown BN op")
assert False
return numpy_epsilon
@staticmethod
def get_assign_moving_avg_op(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Get assign_moving_avg op corresponding with the bn_op, if it exists.
:param bn_op: Batchnorm op to search for corresponding assign_moving_avg op
:return: assign_moving_op corresponding with the bn op, or None if it does not exist.
"""
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']
assert len(bn_op.outputs) == 6 or len(bn_op.outputs) == 5
if bn_op.outputs[1].consumers():
child_op = bn_op.outputs[1].consumers()[0]
if child_op.type == 'Merge':
sub_op = child_op.outputs[0].consumers()[0]
else:
sub_op = child_op
assert sub_op.type == 'Sub'
mul_op = sub_op.outputs[0].consumers()[0]
assert mul_op.type == 'Mul'
assign_moving_avg_op = mul_op.outputs[0].consumers()[0]
assert assign_moving_avg_op.type in ['AssignSub', 'AssignSubVariableOp']
return assign_moving_avg_op
return None
@staticmethod
def get_assign_moving_avg_1_op(bn_op: tf.Operation) -> Union[tf.Operation, None]:
"""
Get assign_moving_avg_1 op corresponding with the bn_op, if it exists.
:param bn_op: Batchnorm op to search for corresponding assign_moving_avg_1 op
:return: assign_moving_avg_1 corresponding with the bn op, or None if it does not exist.
"""
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']
assert len(bn_op.outputs) == 6 or len(bn_op.outputs) == 5
if bn_op.outputs[2].consumers():
child_op = bn_op.outputs[2].consumers()[0]
if child_op.type == 'Merge':
sub_op = child_op.outputs[0].consumers()[0]
else:
sub_op = child_op
assert sub_op.type == 'Sub'
mul_op = sub_op.outputs[0].consumers()[0]
assert mul_op.type == 'Mul'
assign_moving_avg_op = mul_op.outputs[0].consumers()[0]
assert assign_moving_avg_op.type in ['AssignSub', 'AssignSubVariableOp']
return assign_moving_avg_op
return None
@staticmethod
def remove_bn_op_from_update_ops(sess: tf.compat.v1.Session, bn_op: tf.Operation):
"""
Remove batchnorm assign_moving_avg and assign_moving_avg_1 ops from update ops.
:param sess: tf.compat.v1.Session
:param bn_op: BatchNorm operation whose assign_moving_avg and assign_moving_avg_1 ops should be removed.
"""
with sess.graph.as_default():
update_ops = tf.compat.v1.get_collection_ref(tf.compat.v1.GraphKeys.UPDATE_OPS)
assign_moving_avg_op = BNUtils.get_assign_moving_avg_op(bn_op)
assign_moving_avg_op_1 = BNUtils.get_assign_moving_avg_1_op(bn_op)
if assign_moving_avg_op and assign_moving_avg_op in update_ops:
update_ops.remove(assign_moving_avg_op)
logger.debug('Removed %s from update ops', assign_moving_avg_op.name)
if assign_moving_avg_op_1 and assign_moving_avg_op_1 in update_ops:
update_ops.remove(assign_moving_avg_op_1)
logger.debug('Removed %s from update ops', assign_moving_avg_op_1.name)
@staticmethod
def _get_bn_param_tensor_using_name(graph: tf.Graph, bn_op: tf.Operation, param_type: constants.BNOpParamType):
"""
Helper to get BN op param read tensor.
:param graph: TensorFlow graph
:param bn_op: BN op from which param read tensor is to be extracted
:param param_type: param type for which param tensor is to be extracted, as constants.BNOpParamType (supported
types are beta, gamma, moving_mean or moving_variance)
:return: param read tensor
"""
if param_type not in vars(constants.BNOpParamType).values():
assert 0, 'Error, get_bn_param_using_name() invalid param type requested'
# name of the fused bn contains bn_name/FusedBatchNormV3 or
# bn_name/cond/FusedBatchNormV3_1
# we need only the bn_name to make param tensor names
op_name = bn_op.name.split('/')[0]
param_tensor_name = op_name + constants.BN_OP_PARAM_NAME_SUFFIX[param_type]
param_tensor = graph.get_tensor_by_name(param_tensor_name)
return param_tensor
@staticmethod
def _bn_op_momentum_struct_1(bn_op: tf.Operation) -> Union[float, None]:
"""
Return momentum value corresponding to batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
mul_2_op = mul_op.outputs[0].consumers()[1]
assert mul_2_op.type == 'Mul'
merge_op = mul_2_op.inputs[0].op
assert merge_op.type == 'Merge'
switch_1_op = merge_op.outputs[0].consumers()[0]
assert switch_1_op.type == 'Switch'
sub_op = switch_1_op.outputs[1].consumers()[0]
assert sub_op.type == 'Sub'
assign_moving_avg_mul_op = sub_op.outputs[0].consumers()[0]
assert assign_moving_avg_mul_op.type == 'Mul'
decay_op = assign_moving_avg_mul_op.inputs[1].op
assert decay_op.type == 'Const'
decay = decay_op.get_attr('value').float_val[0]
return 1 - decay
except: # pylint: disable=bare-except
return None
@staticmethod
def _bn_op_momentum_struct_2(bn_op: tf.Operation) -> Union[float, None]:
"""
Return momentum value corresponding to batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""
try:
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
mul_2_op = mul_op.outputs[0].consumers()[1]
assert mul_2_op.type == 'Mul'
squeeze_op = mul_2_op.inputs[0].op
assert squeeze_op.type == 'Squeeze'
sub_op = squeeze_op.outputs[0].consumers()[0]
assert sub_op.type == 'Sub'
assign_moving_avg_mul_op = sub_op.outputs[0].consumers()[0]
assert assign_moving_avg_mul_op.type == 'Mul'
decay_op = assign_moving_avg_mul_op.inputs[1].op
assert decay_op.type == 'Const'
decay = decay_op.get_attr('value').float_val[0]
return 1 - decay
except: # pylint: disable=bare-except
return None
@staticmethod
def _fused_bn_op_momentum_struct_1(bn_op: tf.Operation) -> Union[float, None]:
"""
Return momentum value corresponding to fused batchnorm with training tensor.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""
try:
merge_1_op = bn_op.outputs[1].consumers()[0]
assert merge_1_op.type == 'Merge'
sub_op = merge_1_op.outputs[0].consumers()[0]
assert sub_op.type == 'Sub'
mul_op = sub_op.outputs[0].consumers()[0]
assert mul_op.type == 'Mul'
sub_2_op = mul_op.inputs[1].op
assert sub_2_op.type == 'Sub'
merge_op = sub_2_op.inputs[1].op
assert merge_op.type == 'Merge'
decay_op = merge_op.inputs[1].op
assert decay_op.type == 'Const'
decay = decay_op.get_attr('value').float_val[0]
return decay
except: # pylint: disable=bare-except
return None
@staticmethod
def _fused_bn_op_momentum_struct_2(bn_op: tf.Operation) -> Union[float, None]:
"""
Return momentum value corresponding to fused batchnorm with training=True.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""
try:
sub_op = bn_op.outputs[1].consumers()[0]
assert sub_op.type == 'Sub'
mul_op = sub_op.outputs[0].consumers()[0]
assert mul_op.type == 'Mul'
sub_2_op = mul_op.inputs[1].op
assert sub_2_op.type == 'Sub'
decay_op = sub_2_op.inputs[1].op
assert decay_op.type == 'Const'
decay = decay_op.get_attr('value').float_val[0]
return decay
except: # pylint: disable=bare-except
return None
@staticmethod
def get_momentum(bn_op: tf.Operation) -> float:
"""
Returns momentum extracted from given bn op. If bn op is training=False mode, momentum will be none.
:param bn_op: bn_op obtained from connected graph using get_modules a mul_1 op inside BN scope.
:return: momentum value
"""
# register handlers for different structures
bn_op_struct_for_momentum_handlers = [BNUtils._bn_op_momentum_struct_1,
BNUtils._bn_op_momentum_struct_2]
fused_bn_op_struct_for_momentum_handlers = [BNUtils._fused_bn_op_momentum_struct_1,
BNUtils._fused_bn_op_momentum_struct_2]
decay = None
if bn_op.type in ['Mul']:
# try all handlers available
for handler in bn_op_struct_for_momentum_handlers:
if decay is None:
decay = handler(bn_op)
else:
break
elif bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm']:
# try all handlers available
for handler in fused_bn_op_struct_for_momentum_handlers:
if decay is None:
decay = handler(bn_op)
else:
break
else:
logger.error("Error, unknown BN op")
assert False
return decay
@staticmethod
def get_training(bn_op: tf.Operation) -> Union[None, bool, tf.Tensor]:
"""
Returns either a boolean of whether the BN op training mode is True or False, or the is_training tensor
feeding into the BN op if it is using a tensor to determine the mode dynamically.
:param bn_op: bn_op obtained in the connected graph
:return: True or False for training mode, or tf.Tensor that determines the mode dynamically.
"""
assert bn_op.type in ['FusedBatchNormV3', 'FusedBatchNorm', 'Mul']
if bn_op.type == 'FusedBatchNormV3' or bn_op.type == 'FusedBatchNorm':
if 'FusedBatchNormV3_1' in bn_op.name:
switch_op = bn_op.inputs[0].op
pred_id_op = switch_op.inputs[1].op
training = pred_id_op.inputs[0]
else:
training = bn_op.get_attr('is_training')
return training
# Non fused batchnorm case
mul_op = bn_op.inputs[1].op
assert mul_op.type == 'Mul'
rsqrt_op = mul_op.inputs[0].op
assert rsqrt_op.type == 'Rsqrt'
add_op = rsqrt_op.inputs[0].op
assert add_op.type == 'AddV2'
add_input_op = add_op.inputs[0].op
if add_input_op.type == 'Squeeze':
return True
if add_input_op.type == 'ReadVariableOp':
return False
if add_input_op.type == 'Merge':
switch_op = add_input_op.inputs[1].op
assert switch_op.type == 'Switch'
pred_id_op = switch_op.inputs[1].op
assert pred_id_op.type == 'Identity'
return pred_id_op.inputs[0]
logger.error('Error, unknown BN structure')
return None
| 43.552319
| 118
| 0.626112
| 38,088
| 0.943403
| 0
| 0
| 37,807
| 0.936443
| 0
| 0
| 15,564
| 0.385505
|
490d54319f77117f33898d0f301f950c860478c3
| 4,878
|
py
|
Python
|
flaskex.py
|
DesuDeluxe/simple_rest_api
|
c9bed666269882adae97db974c29f9f8e406ce80
|
[
"MIT"
] | null | null | null |
flaskex.py
|
DesuDeluxe/simple_rest_api
|
c9bed666269882adae97db974c29f9f8e406ce80
|
[
"MIT"
] | null | null | null |
flaskex.py
|
DesuDeluxe/simple_rest_api
|
c9bed666269882adae97db974c29f9f8e406ce80
|
[
"MIT"
] | null | null | null |
import os
from flask import Flask, Response, render_template, redirect
from flask_restful import reqparse,request, abort, Api, Resource, fields, marshal_with
from flask_sqlalchemy import SQLAlchemy
import sqlite3
app = Flask(__name__)
p_dir = os.path.dirname(os.path.abspath(__file__))
db_file = "sqlite:///{}".format(os.path.join(p_dir, "notes.db"))
app.config['SQLALCHEMY_DATABASE_URI'] = db_file
api = Api(app)
db = SQLAlchemy(app)
def copy_data(note, to):
note = to(title = note.title, note_id = note.id, content = note.content, created_date = note.created_date, modified_date = note.modified_date)
db.session.add(note)
return note
def find_and_abort_if_doesnt_exist(number):
note = DB_Notes.query.filter_by(id=number).first()
if note is None:
abort(404, message="Note number {} doesn't exist".format(number))
else:
return note
parser = reqparse.RequestParser(bundle_errors=True)
#parser.add_argument('id', required=False,help='No id provided')
parser.add_argument('title', required=True, help='No title provided')
parser.add_argument('content', required=True, help='No content provided')
parserPut = reqparse.RequestParser(bundle_errors=True)
parserPut.add_argument('content', required=True, help='No content provided')
## sqlalchemy classes to be mapped to db
class DB_BaseColumns(object):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(200))
content = db.Column(db.String(800))
class DB_Notes(DB_BaseColumns,db.Model):
created_date = db.Column(db.DateTime, default=db.func.now())
modified_date = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())
class DB_NotesHistory(DB_BaseColumns,db.Model):
note_id = db.Column(db.Integer)
created_date = db.Column(db.DateTime)
modified_date = db.Column(db.DateTime)
class DB_NotesDeleted(DB_BaseColumns, db.Model):
note_id = db.Column(db.Integer)
created_date = db.Column(db.DateTime)
modified_date = db.Column(db.DateTime)
deletion_date = db.Column(db.DateTime, default=db.func.now(), onupdate=db.func.now())
## fields needed for json output
note_fields = {
'id': fields.Integer,
'title': fields.String,
'content': fields.String,
'created_date': fields.DateTime,
'modified_date': fields.DateTime
}
noteH_fields = dict(note_fields)
noteH_fields.update({
'note_id': fields.Integer,
} )
noteD_fields = dict(noteH_fields)
noteD_fields.update({
'deletion_date': fields.DateTime,
} )
class Home(Resource):
def get(self):
return Response(render_template('home.html', Notes = DB_Notes.query.all(), NotesHistory = DB_NotesHistory.query.all(), NotesDeleted = DB_NotesDeleted.query.all()), mimetype='text/html')
##flask classes for routing
class Note(Resource):
@marshal_with(note_fields)
def get(self, number):
return find_and_abort_if_doesnt_exist(number), 200
def delete(self, number):
note = find_and_abort_if_doesnt_exist(number)
copy_data(note,DB_NotesHistory)
copy_data(note,DB_NotesDeleted)
db.session.delete(note)
db.session.commit()
#return redirect("/"), 204
return "", 204
def put(self, number):
args = parserPut.parse_args()
note = find_and_abort_if_doesnt_exist(number)
noteH = copy_data(note,DB_NotesHistory)
note.content = args['content']
noteH.modified_date = db.func.now()
db.session.commit()
return args['content'], 201
class NotesList(Resource):
@marshal_with(note_fields)
def get(self):
return DB_Notes.query.all(), 200
def post(self):
args = parser.parse_args()
note = DB_Notes(title = args['title'], content = args['content'])
db.session.add(note)
db.session.commit()
return 201
class NotesHistory(Resource):
@marshal_with(noteH_fields)
def get(self, number):
note = DB_NotesHistory.query.filter_by(note_id=number).order_by(DB_NotesHistory.modified_date.desc()).all()
if note is None:
abort(404, message="History of note number {} doesn't exist".format(number))
return note, 200
#return Response([note.title, "\n",note.content, "\n", note.created_date.strftime('%m/%d/%Y'), "\n", note.modified_date.strftime('%m/%d/%Y')])
class NotesDeleted(Resource):
@marshal_with(noteD_fields)
def get(self):
note = DB_NotesDeleted.query.all()
if note is None:
abort(404, message="No deleted notes")
return note, 200
##setup the Api resource routing
api.add_resource(Home, '/')
api.add_resource(Note, '/note/<int:number>')
api.add_resource(NotesHistory, '/note/<int:number>/history')
api.add_resource(NotesList, '/notes')
api.add_resource(NotesDeleted, '/deleted')
if __name__ == '__main__':
app.run(debug=False)
| 32.304636
| 193
| 0.693727
| 2,790
| 0.571956
| 0
| 0
| 848
| 0.173842
| 0
| 0
| 797
| 0.163387
|
491193c73d24c3c74876c7aa66287f19f2f09a60
| 6,203
|
py
|
Python
|
backend/server/device_legacy/routes.py
|
kristof-g/TempHum-Supervisor-Sys
|
aa7343c5dab5941b905333fd0172b688f8b4896f
|
[
"MIT"
] | null | null | null |
backend/server/device_legacy/routes.py
|
kristof-g/TempHum-Supervisor-Sys
|
aa7343c5dab5941b905333fd0172b688f8b4896f
|
[
"MIT"
] | null | null | null |
backend/server/device_legacy/routes.py
|
kristof-g/TempHum-Supervisor-Sys
|
aa7343c5dab5941b905333fd0172b688f8b4896f
|
[
"MIT"
] | null | null | null |
import sys
import os
import json
import csv
from time import strftime
from datetime import timedelta, date, datetime
from flask import Blueprint, render_template, redirect, request, url_for, flash
import server.configuration as cfg
from server.postalservice import checkTemp
from server.helpers import LoginRequired, pwIsValid, resource_path
from server.models import SzenzorAdatok
app = sys.modules['__main__']
device_bp = Blueprint('device_bp', __name__, template_folder='templates')
@device_bp.route('/allomas/<id>', methods=['GET'])
@LoginRequired
def allomas(id):
print(id)
if os.listdir(resource_path("logs/{}/".format(id))):
if request.method == 'GET' and request.args.get('sdate') and request.args.get('edate'):
start_date = datetime.strptime(request.args.get('sdate'), '%Y-%m-%d')
end_date = datetime.strptime(request.args.get('edate'), '%Y-%m-%d')
if start_date > datetime.now():
flash("Hiba a bevitt adatokban!", category='danger')
start_date = date.today() - timedelta(days=0)
end_date = date.today() + timedelta(days=1)
elif start_date == end_date:
start_date = start_date
end_date = start_date + timedelta(days=1)
elif start_date > end_date:
flash("Hiba a bevitt adatokban! a záró dátum korábbi, mint a kezdő dátum.", category='danger')
start_date = date.today() - timedelta(days=0)
end_date = date.today() + timedelta(days=1)
else:
# Past 24h as deafault
start_date = date.today() - timedelta(days=0)
end_date = date.today() + timedelta(days=1)
path = os.path.join(app.GLOBAL_CONFIG['SERVER']['WORKDIR'], "allomasok.json")
allomasok = json.load(open(path, encoding="utf-8"))
jelenlegiAllomas = allomasok[id]
print('DATES: [{}]-[{}]'.format(start_date, end_date))
adatok = SzenzorAdatok(start_date, end_date, id)
try:
fajlnev = adatok.generateCsv()
except Exception as error:
flash(error)
print(adatok.nev)
try:
latest = SzenzorAdatok(date.today() - timedelta(days=0), date.today() + timedelta(days=1), id)
latest.adatok = latest.adatok[::-1]
latest = latest.adatok[0]
# Legfrissebb adatok
latest['homerseklet'] = round(Decimal(str(latest['homerseklet']).replace(",", ".")), 1)
latest['paratartalom'] = round(Decimal(str(latest['paratartalom']).replace(",", ".")), 1)
except Exception as error:
pass
ctx = {
"jallomas": app.GLOBAL_STATION_CONFIG[id],
"id": id,
"mero_nev": jelenlegiAllomas['allomasnev'],
"datumok": {"ma": date.today() - timedelta(days=0), "holnap": date.today() + timedelta(days=1),
"hetmulva": date.today() - timedelta(days=7), "honapmulva": date.today() - timedelta(days=30)},
"stat": adatok.stat,
"latest": latest,
"adatok": adatok,
"fajlnev": fajlnev,
"sdate": start_date.strftime("%Y-%m-%d"),
"edate": end_date.strftime("%Y-%m-%d")
}
return render_template("layout.html", ctx=ctx)
else:
flash("Ezen az állomáson még nincs felvett adat", category="warning")
return redirect(url_for('allomasok'))
@device_bp.route('/log/<id>', methods=['GET'])
def log(id):
if request.method == 'GET':
print("[SERVER] GET REQUEST FROM: {}".format(request.remote_addr))
app.GLOBAL_STATION_CONFIG[id]['ip'] = str(request.remote_addr)
cfg.save_allomas()
homerseklet = request.args.get('homerseklet')
# Hőmérséklet határérték ellenörzése:
checkTemp(homerseklet, id)
paratartalom = request.args.get('paratartalom')
currDate = strftime("%Y/%m/%d")
currTime = strftime("%H:%M:%S")
try:
dir = os.path.dirname(__file__)
filename = os.path.join(dir, strftime("logs/{}/%Y/%m/%d.csv".format(id)))
os.makedirs(os.path.dirname(filename), exist_ok=True)
ofile = open(filename, "a")
writer = csv.writer(ofile, delimiter=';', lineterminator='\n')
writer.writerow([currDate] + [currTime] + [homerseklet] + [paratartalom])
ofile.close()
return "Siker!"
except Exception as error:
return str(error)
else:
return "Not Get"
@device_bp.route('/deletestation', methods=['POST'])
@LoginRequired
def deletestation():
password_candidate = request.form['password']
if request.method == 'POST' and pwIsValid(password_candidate, app.GLOBAL_CONFIG['HozzaferesiKulcs']):
app.GLOBAL_STATION_CONFIG.pop(request.form['id'], None)
flash("Sikeresen törölve a " + str(request.form['id']) + " állomás!", category='success')
cfg.save_allomas()
return redirect(url_for('allomasok'))
else:
flash("Helytelen kulcs! Hozzáférés megtagadva!", category='danger')
return redirect(url_for('allomasok'))
@device_bp.route('/addnewstation')
@LoginRequired
def newstation():
if request.method == 'GET':
dict = {
request.args.get('id'): {"allomasnev": request.args.get('megnev'), "allomashely": request.args.get('hely')}}
print("bevitt adatok:")
print(dict)
app.GLOBAL_STATION_CONFIG[request.args.get('id')] = {"allomasnev": request.args.get('megnev'),
"allomashely": request.args.get('hely'), "ip": "0.0.0.0",
"minT": float(request.args.get('mint')),
"maxT": float(request.args.get('maxt'))}
dir = os.path.dirname(__file__)
path = os.path.join(dir, strftime("logs/{}/".format(request.args.get('id'))))
os.makedirs(os.path.dirname(path), exist_ok=True)
flash("Sikeresen hozzáadva!", category='success')
cfg.save_allomas()
return redirect(url_for('allomasok'))
| 44.307143
| 120
| 0.59036
| 0
| 0
| 0
| 0
| 5,727
| 0.919557
| 0
| 0
| 1,217
| 0.195408
|
4912467ee29fbe811c78fea1ef046cb9707fcd7e
| 2,507
|
py
|
Python
|
gdsfactory/components/resistance_sheet.py
|
simbilod/gdsfactory
|
4d76db32674c3edb4d16260e3177ee29ef9ce11d
|
[
"MIT"
] | null | null | null |
gdsfactory/components/resistance_sheet.py
|
simbilod/gdsfactory
|
4d76db32674c3edb4d16260e3177ee29ef9ce11d
|
[
"MIT"
] | null | null | null |
gdsfactory/components/resistance_sheet.py
|
simbilod/gdsfactory
|
4d76db32674c3edb4d16260e3177ee29ef9ce11d
|
[
"MIT"
] | null | null | null |
from functools import partial
from gdsfactory.cell import cell
from gdsfactory.component import Component
from gdsfactory.components.compass import compass
from gdsfactory.components.via_stack import via_stack_slab_npp_m3
from gdsfactory.types import ComponentSpec, Floats, LayerSpecs, Optional
pad_via_stack_slab_npp = partial(via_stack_slab_npp_m3, size=(80, 80))
@cell
def resistance_sheet(
width: float = 10,
layers: LayerSpecs = ("SLAB90", "NPP"),
layer_offsets: Floats = (0, 0.2),
pad: ComponentSpec = pad_via_stack_slab_npp,
pad_pitch: float = 100.0,
ohms_per_square: Optional[float] = None,
port_orientation1: int = 180,
port_orientation2: int = 0,
) -> Component:
"""Returns Sheet resistance.
keeps connectivity for pads and first layer in layers
Args:
width: in um.
layers: for the middle part.
layer_offsets: from edge, positive: over, negative: inclusion.
pad: function to create a pad.
pad_pitch: in um.
ohms_per_square: optional sheet resistance to compute info.resistance.
port_orientation1: in degrees.
port_orientation2: in degrees.
"""
c = Component()
pad = pad()
length = pad_pitch - pad.get_setting("size")[0]
pad1 = c << pad
pad2 = c << pad
r0 = c << compass(
size=(length + layer_offsets[0], width + layer_offsets[0]), layer=layers[0]
)
for layer, offset in zip(layers[1:], layer_offsets[1:]):
c << compass(size=(length + 2 * offset, width + 2 * offset), layer=layer)
pad1.connect("e3", r0.ports["e1"])
pad2.connect("e1", r0.ports["e3"])
c.info["resistance"] = ohms_per_square * width * length if ohms_per_square else None
c.add_port(
"pad1",
port_type="vertical_dc",
midpoint=pad1.center,
layer=list(layers)[-1],
width=width,
orientation=port_orientation1,
)
c.add_port(
"pad2",
port_type="vertical_dc",
midpoint=pad2.center,
layer=list(layers)[-1],
width=width,
orientation=port_orientation2,
)
return c
if __name__ == "__main__":
# import gdsfactory as gf
# sweep = [resistance_sheet(width=width, layers=((1,0), (1,1))) for width in [1, 10, 100]]
# c = gf.pack(sweep)[0]
c = resistance_sheet(width=40)
c.show()
# import gdsfactory as gf
# sweep_resistance = list(map(resistance_sheet, (5, 10, 80)))
# c = gf.grid(sweep_resistance)
# c.show()
| 28.816092
| 94
| 0.643797
| 0
| 0
| 0
| 0
| 1,757
| 0.700838
| 0
| 0
| 818
| 0.326286
|
4912d26a22acac060d471e8872438c7e944e8077
| 17,008
|
py
|
Python
|
cogdl/trainers/sampled_trainer.py
|
zhangdan0602/cogdl
|
35a338f29066e4b1a5d7f46217f09ebceaf13106
|
[
"MIT"
] | null | null | null |
cogdl/trainers/sampled_trainer.py
|
zhangdan0602/cogdl
|
35a338f29066e4b1a5d7f46217f09ebceaf13106
|
[
"MIT"
] | null | null | null |
cogdl/trainers/sampled_trainer.py
|
zhangdan0602/cogdl
|
35a338f29066e4b1a5d7f46217f09ebceaf13106
|
[
"MIT"
] | null | null | null |
from abc import abstractmethod
import argparse
import copy
import numpy as np
import torch
from tqdm import tqdm
from cogdl.data import Dataset
from cogdl.data.sampler import (
SAINTSampler,
NeighborSampler,
ClusteredLoader,
)
from cogdl.models.supervised_model import SupervisedModel
from cogdl.trainers.base_trainer import BaseTrainer
from . import register_trainer
class SampledTrainer(BaseTrainer):
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument("--num-workers", type=int, default=4)
parser.add_argument("--eval-step", type=int, default=3)
parser.add_argument("--batch-size", type=int, default=128)
parser.add_argument("--no-self-loop", action="store_true")
# fmt: on
@abstractmethod
def fit(self, model: SupervisedModel, dataset: Dataset):
raise NotImplementedError
@abstractmethod
def _train_step(self):
raise NotImplementedError
@abstractmethod
def _test_step(self, split="val"):
raise NotImplementedError
def __init__(self, args):
super(SampledTrainer, self).__init__(args)
self.device = "cpu" if not torch.cuda.is_available() or args.cpu else args.device_id[0]
self.patience = args.patience
self.max_epoch = args.max_epoch
self.lr = args.lr
self.weight_decay = args.weight_decay
self.loss_fn, self.evaluator = None, None
self.data, self.train_loader, self.optimizer = None, None, None
self.eval_step = args.eval_step if hasattr(args, "eval_step") else 1
self.num_workers = args.num_workers if hasattr(args, "num_workers") else 0
self.batch_size = args.batch_size
self.self_loop = not (hasattr(args, "no_self_loop") and args.no_self_loop)
@classmethod
def build_trainer_from_args(cls, args):
return cls(args)
def train(self):
epoch_iter = tqdm(range(self.max_epoch))
patience = 0
max_score = 0
min_loss = np.inf
best_model = copy.deepcopy(self.model)
for epoch in epoch_iter:
self._train_step()
if (epoch + 1) % self.eval_step == 0:
acc, loss = self._test_step()
train_acc = acc["train"]
val_acc = acc["val"]
val_loss = loss["val"]
epoch_iter.set_description(
f"Epoch: {epoch:03d}, Train Acc/F1: {train_acc:.4f}, Val Acc/F1: {val_acc:.4f}"
)
self.model = self.model.to(self.device)
if val_loss <= min_loss or val_acc >= max_score:
if val_loss <= min_loss:
best_model = copy.deepcopy(self.model)
min_loss = np.min((min_loss, val_loss.cpu()))
max_score = np.max((max_score, val_acc))
patience = 0
else:
patience += 1
if patience == self.patience:
epoch_iter.close()
break
return best_model
@register_trainer("graphsaint")
class SAINTTrainer(SampledTrainer):
@staticmethod
def add_args(parser: argparse.ArgumentParser):
"""Add trainer-specific arguments to the parser."""
# fmt: off
SampledTrainer.add_args(parser)
parser.add_argument("--eval-cpu", action="store_true")
parser.add_argument("--method", type=str, default="node", help="graph samplers")
parser.add_argument("--sample-coverage", default=20, type=float, help="sample coverage ratio")
parser.add_argument("--size-subgraph", default=1200, type=int, help="subgraph size")
args = parser.parse_args()
if args.method == "rw" or args.method == "mrw":
parser.add_argument("--num-walks", default=50, type=int, help="number of random walks")
parser.add_argument("--walk-length", default=20, type=int, help="random walk length")
parser.add_argument("--size-frontier", default=20, type=int, help="frontier size in multidimensional random walks")
# fmt: on
@staticmethod
def get_args4sampler(args):
args4sampler = {
"method": args.method,
"sample_coverage": args.sample_coverage,
"size_subgraph": args.size_subgraph,
}
if args.method == "rw" or args.method == "mrw":
args4sampler["num_walks"] = args.num_walks
args4sampler["walk_length"] = args.walk_length
if args.method == "mrw":
args4sampler["size_frontier"] = args.size_frontier
return args4sampler
@classmethod
def build_trainer_from_args(cls, args):
return cls(args)
def __init__(self, args):
super(SAINTTrainer, self).__init__(args)
self.args4sampler = self.get_args4sampler(args)
self.eval_cpu = args.eval_cpu if hasattr(args, "eval_cpu") else False
def fit(self, model: SupervisedModel, dataset: Dataset):
self.dataset = dataset
self.data = dataset.data
if self.self_loop:
self.data.add_remaining_self_loops()
self.model = model.to(self.device)
self.evaluator = dataset.get_evaluator()
self.loss_fn = dataset.get_loss_fn()
self.sampler = SAINTSampler(dataset, self.args4sampler)()
# self.train_dataset = SAINTDataset(dataset, self.args_sampler)
# self.train_loader = SAINTDataLoader(
# dataset=train_dataset,
# num_workers=self.num_workers,
# persistent_workers=True,
# pin_memory=True
# )
# self.set_data_model(dataset, model)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
return self.train()
def _train_step(self):
self.data = self.sampler.one_batch("train")
self.data.to(self.device)
self.model = self.model.to(self.device)
self.model.train()
self.optimizer.zero_grad()
mask = self.data.train_mask
if len(self.data.y.shape) > 1:
logits = self.model.predict(self.data)
weight = self.data.norm_loss[mask].unsqueeze(1)
loss = torch.nn.BCEWithLogitsLoss(reduction="sum", weight=weight)(logits[mask], self.data.y[mask].float())
else:
logits = torch.nn.functional.log_softmax(self.model.predict(self.data), dim=-1)
loss = (
torch.nn.NLLLoss(reduction="none")(logits[mask], self.data.y[mask]) * self.data.norm_loss[mask]
).sum()
loss.backward()
self.optimizer.step()
def _test_step(self, split="val"):
self.data = self.sampler.one_batch(split)
if split != "train" and self.eval_cpu:
self.model = self.model.cpu()
else:
self.data.apply(lambda x: x.to(self.device))
self.model.eval()
masks = {"train": self.data.train_mask, "val": self.data.val_mask, "test": self.data.test_mask}
with torch.no_grad():
logits = self.model.predict(self.data)
loss = {key: self.loss_fn(logits[val], self.data.y[val]) for key, val in masks.items()}
metric = {key: self.evaluator(logits[val], self.data.y[val]) for key, val in masks.items()}
return metric, loss
@register_trainer("neighborsampler")
class NeighborSamplingTrainer(SampledTrainer):
model: torch.nn.Module
@staticmethod
def add_args(parser: argparse.ArgumentParser):
"""Add trainer-specific arguments to the parser."""
# fmt: off
SampledTrainer.add_args(parser)
# fmt: on
def __init__(self, args):
super(NeighborSamplingTrainer, self).__init__(args)
self.hidden_size = args.hidden_size
self.sample_size = args.sample_size
def fit(self, model, dataset):
self.data = dataset[0]
if self.self_loop:
self.data.add_remaining_self_loops()
self.evaluator = dataset.get_evaluator()
self.loss_fn = dataset.get_loss_fn()
settings = dict(
batch_size=self.batch_size,
num_workers=self.num_workers,
shuffle=False,
persistent_workers=True,
pin_memory=True,
)
if torch.__version__.split("+")[0] < "1.7.1":
settings.pop("persistent_workers")
self.data.train()
self.train_loader = NeighborSampler(
dataset=dataset,
mask=self.data.train_mask,
sizes=self.sample_size,
**settings,
)
settings["batch_size"] *= 5
self.data.eval()
self.test_loader = NeighborSampler(
dataset=dataset,
mask=None,
sizes=[-1],
**settings,
)
self.model = model.to(self.device)
self.model.set_data_device(self.device)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
best_model = self.train()
self.model = best_model
acc, loss = self._test_step()
return dict(Acc=acc["test"], ValAcc=acc["val"])
def _train_step(self):
self.data.train()
self.model.train()
self.train_loader.shuffle()
x_all = self.data.x.to(self.device)
y_all = self.data.y.to(self.device)
for target_id, n_id, adjs in self.train_loader:
self.optimizer.zero_grad()
n_id = n_id.to(x_all.device)
target_id = target_id.to(y_all.device)
x_src = x_all[n_id].to(self.device)
y = y_all[target_id].to(self.device)
loss = self.model.node_classification_loss(x_src, adjs, y)
loss.backward()
self.optimizer.step()
def _test_step(self, split="val"):
self.model.eval()
self.data.eval()
masks = {"train": self.data.train_mask, "val": self.data.val_mask, "test": self.data.test_mask}
with torch.no_grad():
logits = self.model.inference(self.data.x, self.test_loader)
loss = {key: self.loss_fn(logits[val], self.data.y[val]) for key, val in masks.items()}
acc = {key: self.evaluator(logits[val], self.data.y[val]) for key, val in masks.items()}
return acc, loss
@classmethod
def build_trainer_from_args(cls, args):
return cls(args)
@register_trainer("clustergcn")
class ClusterGCNTrainer(SampledTrainer):
@staticmethod
def add_args(parser: argparse.ArgumentParser):
"""Add trainer-specific arguments to the parser."""
# fmt: off
SampledTrainer.add_args(parser)
parser.add_argument("--n-cluster", type=int, default=1000)
parser.add_argument("--batch-size", type=int, default=20)
# fmt: on
@staticmethod
def get_args4sampler(args):
args4sampler = {
"method": "metis",
"n_cluster": args.n_cluster,
}
return args4sampler
def __init__(self, args):
super(ClusterGCNTrainer, self).__init__(args)
self.n_cluster = args.n_cluster
self.batch_size = args.batch_size
def fit(self, model, dataset):
self.data = dataset[0]
if self.self_loop:
self.data.add_remaining_self_loops()
self.model = model.to(self.device)
self.evaluator = dataset.get_evaluator()
self.loss_fn = dataset.get_loss_fn()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
settings = dict(
batch_size=self.batch_size, num_workers=self.num_workers, persistent_workers=True, pin_memory=True
)
if torch.__version__.split("+")[0] < "1.7.1":
settings.pop("persistent_workers")
self.data.train()
self.train_loader = ClusteredLoader(
dataset,
self.n_cluster,
method="metis",
**settings,
)
best_model = self.train()
self.model = best_model
metric, loss = self._test_step()
return dict(Acc=metric["test"], ValAcc=metric["val"])
def _train_step(self):
self.model.train()
self.data.train()
self.train_loader.shuffle()
total_loss = 0
for batch in self.train_loader:
self.optimizer.zero_grad()
batch = batch.to(self.device)
loss = self.model.node_classification_loss(batch)
loss.backward()
total_loss += loss.item()
self.optimizer.step()
def _test_step(self, split="val"):
self.model.eval()
self.data.eval()
data = self.data
self.model = self.model.cpu()
masks = {"train": self.data.train_mask, "val": self.data.val_mask, "test": self.data.test_mask}
with torch.no_grad():
logits = self.model(data)
loss = {key: self.loss_fn(logits[val], self.data.y[val]) for key, val in masks.items()}
metric = {key: self.evaluator(logits[val], self.data.y[val]) for key, val in masks.items()}
return metric, loss
@register_trainer("random_cluster")
class RandomClusterTrainer(SampledTrainer):
@staticmethod
def add_args(parser):
# fmt: off
SampledTrainer.add_args(parser)
parser.add_argument("--n-cluster", type=int, default=10)
parser.add_argument("--val-n-cluster", type=int, default=-1)
# fmt: on
def __init__(self, args):
super(RandomClusterTrainer, self).__init__(args)
self.patience = args.patience // args.eval_step
self.n_cluster = args.n_cluster
self.val_n_cluster = args.val_n_cluster if hasattr(args, "val_n_cluster") else -1
self.eval_step = args.eval_step
self.data, self.optimizer, self.evaluator, self.loss_fn = None, None, None, None
def fit(self, model, dataset):
self.model = model.to(self.device)
self.data = dataset[0]
if self.self_loop:
self.data.add_remaining_self_loops()
self.loss_fn = dataset.get_loss_fn()
self.evaluator = dataset.get_evaluator()
settings = dict(num_workers=self.num_workers, persistent_workers=True, pin_memory=True)
if torch.__version__.split("+")[0] < "1.7.1":
settings.pop("persistent_workers")
self.train_loader = ClusteredLoader(dataset=dataset, n_cluster=self.n_cluster, method="random", **settings)
if self.val_n_cluster > 0:
self.test_loader = ClusteredLoader(
dataset=dataset,
n_cluster=self.val_n_cluster,
method="random",
num_workers=self.num_workers,
persistent_workers=True,
shuffle=False,
)
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.weight_decay)
best_model = self.train()
self.model = best_model
metric, loss = self._test_step()
return dict(Acc=metric["test"], ValAcc=metric["val"])
def _train_step(self):
self.model.train()
self.data.train()
self.train_loader.shuffle()
for batch in self.train_loader:
self.optimizer.zero_grad()
batch = batch.to(self.device)
loss_n = self.model.node_classification_loss(batch)
loss_n.backward()
self.optimizer.step()
def _test_step(self, split="val"):
self.model.eval()
self.data.eval()
if self.val_n_cluster > 0:
return self.batch_eval(split)
self.model = self.model.to("cpu")
data = self.data
self.model = self.model.cpu()
masks = {"train": self.data.train_mask, "val": self.data.val_mask, "test": self.data.test_mask}
with torch.no_grad():
logits = self.model.predict(data)
loss = {key: self.loss_fn(logits[val], self.data.y[val]) for key, val in masks.items()}
metric = {key: self.evaluator(logits[val], self.data.y[val]) for key, val in masks.items()}
return metric, loss
def batch_eval(self, split="val"):
preds = {"train": [], "val": [], "test": []}
ys = {"train": [], "val": [], "test": []}
with torch.no_grad():
for batch in self.test_loader:
batch = batch.to(self.device)
pred = self.model.predict(batch)
for item in ["train", "val", "test"]:
preds[item].append(pred[batch[f"{item}_mask"]])
ys[item].append(batch.y[batch[f"{item}_mask"]])
metric = dict()
loss = dict()
for key in preds.keys():
pred = torch.cat(preds[key], dim=0)
y = torch.cat(ys[key], dim=0)
_metric = self.evaluator(pred, y)
_loss = self.loss_fn(pred, y)
metric[key] = _metric
loss[key] = _loss
return metric, loss
| 36.893709
| 127
| 0.602305
| 16,474
| 0.968603
| 0
| 0
| 14,592
| 0.857949
| 0
| 0
| 1,618
| 0.095132
|
491377c3b97184cf6e4325a1301a6746ac433ea2
| 7,448
|
py
|
Python
|
sample-input/sph-factors/pin-cell/sph-factors.py
|
AI-Pranto/OpenMOC
|
7f6ce4797aec20ddd916981a56a4ba54ffda9a06
|
[
"MIT"
] | 97
|
2015-01-02T02:13:45.000Z
|
2022-03-09T14:12:45.000Z
|
sample-input/sph-factors/pin-cell/sph-factors.py
|
AI-Pranto/OpenMOC
|
7f6ce4797aec20ddd916981a56a4ba54ffda9a06
|
[
"MIT"
] | 325
|
2015-01-07T17:43:14.000Z
|
2022-02-21T17:22:00.000Z
|
sample-input/sph-factors/pin-cell/sph-factors.py
|
AI-Pranto/OpenMOC
|
7f6ce4797aec20ddd916981a56a4ba54ffda9a06
|
[
"MIT"
] | 73
|
2015-01-17T19:11:58.000Z
|
2022-03-24T16:31:37.000Z
|
import openmoc
import openmc.openmoc_compatible
import openmc.mgxs
import numpy as np
import matplotlib
# Enable Matplotib to work for headless nodes
matplotlib.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
opts = openmoc.options.Options()
openmoc.log.set_log_level('NORMAL')
###############################################################################
# Eigenvalue Calculation w/o SPH Factors
###############################################################################
# Initialize 2-group OpenMC multi-group cross section library for a pin cell
mgxs_lib = openmc.mgxs.Library.load_from_file(filename='mgxs', directory='.')
# Create an OpenMOC Geometry from the OpenMOC Geometry
openmoc_geometry = \
openmc.openmoc_compatible.get_openmoc_geometry(mgxs_lib.geometry)
# Load cross section data
openmoc_materials = \
openmoc.materialize.load_openmc_mgxs_lib(mgxs_lib, openmoc_geometry)
# Initialize FSRs
openmoc_geometry.initializeFlatSourceRegions()
# Initialize an OpenMOC TrackGenerator
track_generator = openmoc.TrackGenerator(
openmoc_geometry, opts.num_azim, opts.azim_spacing)
track_generator.generateTracks()
# Initialize an OpenMOC Solver
solver = openmoc.CPUSolver(track_generator)
solver.setConvergenceThreshold(opts.tolerance)
solver.setNumThreads(opts.num_omp_threads)
# Run an eigenvalue calulation with the MGXS from OpenMC
solver.computeEigenvalue(opts.max_iters)
solver.printTimerReport()
keff_no_sph = solver.getKeff()
# Extract the OpenMOC scalar fluxes
fluxes_no_sph = openmoc.process.get_scalar_fluxes(solver)
###############################################################################
# Eigenvalue Calculation with SPH Factors
###############################################################################
# Compute SPH factors
sph, sph_mgxs_lib, sph_indices = \
openmoc.materialize.compute_sph_factors(
mgxs_lib, azim_spacing=opts.azim_spacing,
num_azim=opts.num_azim, num_threads=opts.num_omp_threads)
# Load the SPH-corrected MGXS library data
materials = \
openmoc.materialize.load_openmc_mgxs_lib(sph_mgxs_lib, openmoc_geometry)
# Run an eigenvalue calculation with the SPH-corrected modified MGXS library
solver.computeEigenvalue(opts.max_iters)
solver.printTimerReport()
keff_with_sph = solver.getKeff()
# Report the OpenMC and OpenMOC eigenvalues
openmoc.log.py_printf('RESULT', 'OpenMOC keff w/o SPH: \t%1.5f', keff_no_sph)
openmoc.log.py_printf('RESULT', 'OpenMOC keff w/ SPH: \t%1.5f', keff_with_sph)
openmoc.log.py_printf('RESULT', 'OpenMC keff: \t\t1.17574 +/- 0.00086')
###############################################################################
# Extracting Scalar Fluxes
###############################################################################
openmoc.log.py_printf('NORMAL', 'Plotting data...')
# Plot the cells
openmoc.plotter.plot_cells(openmoc_geometry)
# Extract the OpenMOC scalar fluxes
fluxes_sph = openmoc.process.get_scalar_fluxes(solver)
fluxes_sph *= sph
# Extract the OpenMC scalar fluxes
num_fsrs = openmoc_geometry.getNumFSRs()
num_groups = openmoc_geometry.getNumEnergyGroups()
openmc_fluxes = np.zeros((num_fsrs, num_groups), dtype=np.float64)
nufission_xs = np.zeros((num_fsrs, num_groups), dtype=np.float64)
# Get the OpenMC flux in each FSR
for fsr in range(num_fsrs):
# Find the OpenMOC cell and volume for this FSR
openmoc_cell = openmoc_geometry.findCellContainingFSR(fsr)
cell_id = openmoc_cell.getId()
fsr_volume = track_generator.getFSRVolume(fsr)
# Store the volume-averaged flux
mgxs = mgxs_lib.get_mgxs(cell_id, 'nu-fission')
flux = mgxs.tallies['flux'].mean.flatten()
flux = np.flipud(flux) / fsr_volume
openmc_fluxes[fsr, :] = flux
nufission_xs[fsr, :] = mgxs.get_xs(nuclide='all')
# Extract energy group edges
group_edges = mgxs_lib.energy_groups.group_edges
group_edges += 1e-3 # Adjust lower bound to 1e-3 eV (for loglog scaling)
# Compute difference in energy bounds for each group
group_edges = np.flipud(group_edges)
# Normalize fluxes with the fission source
openmc_fluxes /= np.sum(openmc_fluxes * nufission_xs)
fluxes_sph /= np.sum(fluxes_sph * nufission_xs)
fluxes_no_sph /= np.sum(fluxes_no_sph * nufission_xs)
###############################################################################
# Plot the OpenMC, OpenMOC Scalar Fluxes
###############################################################################
# Extend the mgxs values array for matplotlib's step plot of fluxes
openmc_fluxes = np.insert(openmc_fluxes, 0, openmc_fluxes[:,0], axis=1)
fluxes_no_sph = np.insert(fluxes_no_sph, 0, fluxes_no_sph[:,0], axis=1)
fluxes_sph = np.insert(fluxes_sph, 0, fluxes_sph[:,0], axis=1)
# Plot OpenMOC and OpenMC fluxes in each FSR
for fsr in range(num_fsrs):
# Get the OpenMOC cell and material for this FSR
cell = openmoc_geometry.findCellContainingFSR(fsr)
material_name = cell.getFillMaterial().getName()
# Create a step plot for the MGXS
fig = plt.figure()
plt.plot(group_edges, openmc_fluxes[fsr,:],
drawstyle='steps', color='r', linewidth=2)
plt.plot(group_edges, fluxes_no_sph[fsr,:],
drawstyle='steps', color='b', linewidth=2)
plt.plot(group_edges, fluxes_sph[fsr,:],
drawstyle='steps', color='g', linewidth=2)
plt.yscale('log')
plt.xscale('log')
plt.xlabel('Energy [eV]')
plt.ylabel('Flux')
plt.title('Normalized Flux ({0})'.format(material_name))
plt.xlim((min(group_edges), max(group_edges)))
plt.legend(['openmc', 'openmoc w/o sph', 'openmoc w/ sph'], loc='best')
plt.grid()
filename = 'plots/flux-{0}.png'.format(material_name.replace(' ', '-'))
plt.savefig(filename, bbox_inches='tight')
plt.close()
###############################################################################
# Plot OpenMC-to-OpenMOC Scalar Flux Errors
###############################################################################
# Compute the percent relative error in the flux
rel_err_no_sph = np.zeros(openmc_fluxes.shape)
rel_err_sph = np.zeros(openmc_fluxes.shape)
for fsr in range(num_fsrs):
delta_flux_no_sph = fluxes_no_sph[fsr,:] - openmc_fluxes[fsr,:]
delta_flux_sph = fluxes_sph[fsr,:] - openmc_fluxes[fsr,:]
rel_err_no_sph[fsr,:] = delta_flux_no_sph / openmc_fluxes[fsr,:] * 100.
rel_err_sph[fsr,:] = delta_flux_sph / openmc_fluxes[fsr,:] * 100.
# Plot OpenMOC relative flux errors in each FSR
for fsr in range(num_fsrs):
# Get the OpenMOC cell and material for this FSR
cell = openmoc_geometry.findCellContainingFSR(fsr)
material_name = cell.getFillMaterial().getName()
# Create a step plot for the MGXS
fig = plt.figure()
plt.plot(group_edges, rel_err_no_sph[fsr,:],
drawstyle='steps', color='r', linewidth=2)
plt.plot(group_edges, rel_err_sph[fsr,:],
drawstyle='steps', color='b', linewidth=2)
plt.xscale('log')
plt.xlabel('Energy [eV]')
plt.ylabel('Relative Error [%]')
plt.title('OpenMOC-to-OpenMC Flux Rel. Err. ({0})'.format(material_name))
plt.xlim((min(group_edges), max(group_edges)))
plt.legend(['openmoc w/o sph', 'openmoc w/ sph'], loc='best')
plt.grid()
filename = 'plots/rel-err-{0}.png'.format(material_name.replace(' ', '-'))
plt.savefig(filename, bbox_inches='tight')
plt.close()
| 36.509804
| 79
| 0.649436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,893
| 0.388426
|
4913c3ea285b469820f3898e3feff4274634fe9e
| 494
|
py
|
Python
|
VerifyServer.py
|
ACueva/Avi-Playground
|
cb1768999630ed884cff5d40c0faa86d24802754
|
[
"Apache-2.0"
] | null | null | null |
VerifyServer.py
|
ACueva/Avi-Playground
|
cb1768999630ed884cff5d40c0faa86d24802754
|
[
"Apache-2.0"
] | null | null | null |
VerifyServer.py
|
ACueva/Avi-Playground
|
cb1768999630ed884cff5d40c0faa86d24802754
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import urllib2, json
from urlparse import urlparse
def ParseURL(agsURL):
ags = []
print agsURL
ags = urlparse(agsURL)
return ags
def GetFolders(agsURL):
f = urllib2.urlopen(agsURL)
j = json.loads(f.read())
for item in j["folders"]:
print item
def MapServiceQuery(agsURL):
f = urllib2.urlopen(agsURL)
#print f.read()
j = json.loads(f.read())
for item in j["layers"]:
print item["name"]
| 21.478261
| 31
| 0.61336
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 59
| 0.119433
|
4915e964647e8f80103fac2c1f5e742bca3ba265
| 809
|
py
|
Python
|
data_io/export_camera_poses.py
|
tumcms/aerial2pdsm
|
1dc4f9c49d3f682dac4a665d081d547baaa0c877
|
[
"BSD-3-Clause"
] | null | null | null |
data_io/export_camera_poses.py
|
tumcms/aerial2pdsm
|
1dc4f9c49d3f682dac4a665d081d547baaa0c877
|
[
"BSD-3-Clause"
] | null | null | null |
data_io/export_camera_poses.py
|
tumcms/aerial2pdsm
|
1dc4f9c49d3f682dac4a665d081d547baaa0c877
|
[
"BSD-3-Clause"
] | 1
|
2021-11-30T13:02:26.000Z
|
2021-11-30T13:02:26.000Z
|
import plyfile
from config import SparseModel, project_path
from colmap_scripts.read_model import Image
import numpy as np
project = SparseModel("/home/felix/pointclouds/_working/2019_11_07_Muenchen_26_10_2018",
model_path="/home/felix/pointclouds/_working/2019_11_07_Muenchen_26_10_2018/sparse/1")
images = project.images
cameras = project.cameras
poses = []
dt = np.dtype([('x', np.float64), ('y', np.float64), ('z', np.float64), ('img', np.int32)])
for nr, img in images.items():
R = img.qvec2rotmat()
T = img.tvec
cp = np.dot(-R.T, T)
pose = (cp[0], cp[1], cp[2], nr)
poses.append(pose)
vertex = np.array(poses, dtype=dt)
v = plyfile.PlyElement.describe(vertex, "vertices")
plyfile.PlyData([v], text=True).write(project.model_path + "/camera_locations.ply")
| 32.36
| 108
| 0.697157
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 186
| 0.229913
|
491612be631c7616e895777f18a1f1f4ec09ba72
| 15,815
|
py
|
Python
|
src/goesrecv/support/generate_interpolator_taps.py
|
codient/goestools
|
862600960681a1a1f3942f18f40b1f17dcdffc40
|
[
"BSD-2-Clause"
] | 244
|
2017-11-07T12:12:23.000Z
|
2022-03-24T07:24:53.000Z
|
src/goesrecv/support/generate_interpolator_taps.py
|
codient/goestools
|
862600960681a1a1f3942f18f40b1f17dcdffc40
|
[
"BSD-2-Clause"
] | 116
|
2018-03-07T04:02:26.000Z
|
2022-03-27T12:08:01.000Z
|
src/goesrecv/support/generate_interpolator_taps.py
|
codient/goestools
|
862600960681a1a1f3942f18f40b1f17dcdffc40
|
[
"BSD-2-Clause"
] | 65
|
2018-05-28T02:44:21.000Z
|
2022-03-18T12:27:58.000Z
|
#!/usr/bin/env python
#
# Create filter taps to use for interpolation filter in
# clock recovery algorithm. These taps are copied from
# GNU Radio at gnuradio/filter/interpolator_taps.h.
#
# This file includes them in natural order and I want
# them stored in reversed order such that they can be
# used directly.
#
filters = [
[ 0.00000e+00, 0.00000e+00, 0.00000e+00, 0.00000e+00, 1.00000e+00, 0.00000e+00, 0.00000e+00, 0.00000e+00 ],
[ -1.54700e-04, 8.53777e-04, -2.76968e-03, 7.89295e-03, 9.98534e-01, -5.41054e-03, 1.24642e-03, -1.98993e-04 ],
[ -3.09412e-04, 1.70888e-03, -5.55134e-03, 1.58840e-02, 9.96891e-01, -1.07209e-02, 2.47942e-03, -3.96391e-04 ],
[ -4.64053e-04, 2.56486e-03, -8.34364e-03, 2.39714e-02, 9.95074e-01, -1.59305e-02, 3.69852e-03, -5.92100e-04 ],
[ -6.18544e-04, 3.42130e-03, -1.11453e-02, 3.21531e-02, 9.93082e-01, -2.10389e-02, 4.90322e-03, -7.86031e-04 ],
[ -7.72802e-04, 4.27773e-03, -1.39548e-02, 4.04274e-02, 9.90917e-01, -2.60456e-02, 6.09305e-03, -9.78093e-04 ],
[ -9.26747e-04, 5.13372e-03, -1.67710e-02, 4.87921e-02, 9.88580e-01, -3.09503e-02, 7.26755e-03, -1.16820e-03 ],
[ -1.08030e-03, 5.98883e-03, -1.95925e-02, 5.72454e-02, 9.86071e-01, -3.57525e-02, 8.42626e-03, -1.35627e-03 ],
[ -1.23337e-03, 6.84261e-03, -2.24178e-02, 6.57852e-02, 9.83392e-01, -4.04519e-02, 9.56876e-03, -1.54221e-03 ],
[ -1.38589e-03, 7.69462e-03, -2.52457e-02, 7.44095e-02, 9.80543e-01, -4.50483e-02, 1.06946e-02, -1.72594e-03 ],
[ -1.53777e-03, 8.54441e-03, -2.80746e-02, 8.31162e-02, 9.77526e-01, -4.95412e-02, 1.18034e-02, -1.90738e-03 ],
[ -1.68894e-03, 9.39154e-03, -3.09033e-02, 9.19033e-02, 9.74342e-01, -5.39305e-02, 1.28947e-02, -2.08645e-03 ],
[ -1.83931e-03, 1.02356e-02, -3.37303e-02, 1.00769e-01, 9.70992e-01, -5.82159e-02, 1.39681e-02, -2.26307e-03 ],
[ -1.98880e-03, 1.10760e-02, -3.65541e-02, 1.09710e-01, 9.67477e-01, -6.23972e-02, 1.50233e-02, -2.43718e-03 ],
[ -2.13733e-03, 1.19125e-02, -3.93735e-02, 1.18725e-01, 9.63798e-01, -6.64743e-02, 1.60599e-02, -2.60868e-03 ],
[ -2.28483e-03, 1.27445e-02, -4.21869e-02, 1.27812e-01, 9.59958e-01, -7.04471e-02, 1.70776e-02, -2.77751e-03 ],
[ -2.43121e-03, 1.35716e-02, -4.49929e-02, 1.36968e-01, 9.55956e-01, -7.43154e-02, 1.80759e-02, -2.94361e-03 ],
[ -2.57640e-03, 1.43934e-02, -4.77900e-02, 1.46192e-01, 9.51795e-01, -7.80792e-02, 1.90545e-02, -3.10689e-03 ],
[ -2.72032e-03, 1.52095e-02, -5.05770e-02, 1.55480e-01, 9.47477e-01, -8.17385e-02, 2.00132e-02, -3.26730e-03 ],
[ -2.86289e-03, 1.60193e-02, -5.33522e-02, 1.64831e-01, 9.43001e-01, -8.52933e-02, 2.09516e-02, -3.42477e-03 ],
[ -3.00403e-03, 1.68225e-02, -5.61142e-02, 1.74242e-01, 9.38371e-01, -8.87435e-02, 2.18695e-02, -3.57923e-03 ],
[ -3.14367e-03, 1.76185e-02, -5.88617e-02, 1.83711e-01, 9.33586e-01, -9.20893e-02, 2.27664e-02, -3.73062e-03 ],
[ -3.28174e-03, 1.84071e-02, -6.15931e-02, 1.93236e-01, 9.28650e-01, -9.53307e-02, 2.36423e-02, -3.87888e-03 ],
[ -3.41815e-03, 1.91877e-02, -6.43069e-02, 2.02814e-01, 9.23564e-01, -9.84679e-02, 2.44967e-02, -4.02397e-03 ],
[ -3.55283e-03, 1.99599e-02, -6.70018e-02, 2.12443e-01, 9.18329e-01, -1.01501e-01, 2.53295e-02, -4.16581e-03 ],
[ -3.68570e-03, 2.07233e-02, -6.96762e-02, 2.22120e-01, 9.12947e-01, -1.04430e-01, 2.61404e-02, -4.30435e-03 ],
[ -3.81671e-03, 2.14774e-02, -7.23286e-02, 2.31843e-01, 9.07420e-01, -1.07256e-01, 2.69293e-02, -4.43955e-03 ],
[ -3.94576e-03, 2.22218e-02, -7.49577e-02, 2.41609e-01, 9.01749e-01, -1.09978e-01, 2.76957e-02, -4.57135e-03 ],
[ -4.07279e-03, 2.29562e-02, -7.75620e-02, 2.51417e-01, 8.95936e-01, -1.12597e-01, 2.84397e-02, -4.69970e-03 ],
[ -4.19774e-03, 2.36801e-02, -8.01399e-02, 2.61263e-01, 8.89984e-01, -1.15113e-01, 2.91609e-02, -4.82456e-03 ],
[ -4.32052e-03, 2.43930e-02, -8.26900e-02, 2.71144e-01, 8.83893e-01, -1.17526e-01, 2.98593e-02, -4.94589e-03 ],
[ -4.44107e-03, 2.50946e-02, -8.52109e-02, 2.81060e-01, 8.77666e-01, -1.19837e-01, 3.05345e-02, -5.06363e-03 ],
[ -4.55932e-03, 2.57844e-02, -8.77011e-02, 2.91006e-01, 8.71305e-01, -1.22047e-01, 3.11866e-02, -5.17776e-03 ],
[ -4.67520e-03, 2.64621e-02, -9.01591e-02, 3.00980e-01, 8.64812e-01, -1.24154e-01, 3.18153e-02, -5.28823e-03 ],
[ -4.78866e-03, 2.71272e-02, -9.25834e-02, 3.10980e-01, 8.58189e-01, -1.26161e-01, 3.24205e-02, -5.39500e-03 ],
[ -4.89961e-03, 2.77794e-02, -9.49727e-02, 3.21004e-01, 8.51437e-01, -1.28068e-01, 3.30021e-02, -5.49804e-03 ],
[ -5.00800e-03, 2.84182e-02, -9.73254e-02, 3.31048e-01, 8.44559e-01, -1.29874e-01, 3.35600e-02, -5.59731e-03 ],
[ -5.11376e-03, 2.90433e-02, -9.96402e-02, 3.41109e-01, 8.37557e-01, -1.31581e-01, 3.40940e-02, -5.69280e-03 ],
[ -5.21683e-03, 2.96543e-02, -1.01915e-01, 3.51186e-01, 8.30432e-01, -1.33189e-01, 3.46042e-02, -5.78446e-03 ],
[ -5.31716e-03, 3.02507e-02, -1.04150e-01, 3.61276e-01, 8.23188e-01, -1.34699e-01, 3.50903e-02, -5.87227e-03 ],
[ -5.41467e-03, 3.08323e-02, -1.06342e-01, 3.71376e-01, 8.15826e-01, -1.36111e-01, 3.55525e-02, -5.95620e-03 ],
[ -5.50931e-03, 3.13987e-02, -1.08490e-01, 3.81484e-01, 8.08348e-01, -1.37426e-01, 3.59905e-02, -6.03624e-03 ],
[ -5.60103e-03, 3.19495e-02, -1.10593e-01, 3.91596e-01, 8.00757e-01, -1.38644e-01, 3.64044e-02, -6.11236e-03 ],
[ -5.68976e-03, 3.24843e-02, -1.12650e-01, 4.01710e-01, 7.93055e-01, -1.39767e-01, 3.67941e-02, -6.18454e-03 ],
[ -5.77544e-03, 3.30027e-02, -1.14659e-01, 4.11823e-01, 7.85244e-01, -1.40794e-01, 3.71596e-02, -6.25277e-03 ],
[ -5.85804e-03, 3.35046e-02, -1.16618e-01, 4.21934e-01, 7.77327e-01, -1.41727e-01, 3.75010e-02, -6.31703e-03 ],
[ -5.93749e-03, 3.39894e-02, -1.18526e-01, 4.32038e-01, 7.69305e-01, -1.42566e-01, 3.78182e-02, -6.37730e-03 ],
[ -6.01374e-03, 3.44568e-02, -1.20382e-01, 4.42134e-01, 7.61181e-01, -1.43313e-01, 3.81111e-02, -6.43358e-03 ],
[ -6.08674e-03, 3.49066e-02, -1.22185e-01, 4.52218e-01, 7.52958e-01, -1.43968e-01, 3.83800e-02, -6.48585e-03 ],
[ -6.15644e-03, 3.53384e-02, -1.23933e-01, 4.62289e-01, 7.44637e-01, -1.44531e-01, 3.86247e-02, -6.53412e-03 ],
[ -6.22280e-03, 3.57519e-02, -1.25624e-01, 4.72342e-01, 7.36222e-01, -1.45004e-01, 3.88454e-02, -6.57836e-03 ],
[ -6.28577e-03, 3.61468e-02, -1.27258e-01, 4.82377e-01, 7.27714e-01, -1.45387e-01, 3.90420e-02, -6.61859e-03 ],
[ -6.34530e-03, 3.65227e-02, -1.28832e-01, 4.92389e-01, 7.19116e-01, -1.45682e-01, 3.92147e-02, -6.65479e-03 ],
[ -6.40135e-03, 3.68795e-02, -1.30347e-01, 5.02377e-01, 7.10431e-01, -1.45889e-01, 3.93636e-02, -6.68698e-03 ],
[ -6.45388e-03, 3.72167e-02, -1.31800e-01, 5.12337e-01, 7.01661e-01, -1.46009e-01, 3.94886e-02, -6.71514e-03 ],
[ -6.50285e-03, 3.75341e-02, -1.33190e-01, 5.22267e-01, 6.92808e-01, -1.46043e-01, 3.95900e-02, -6.73929e-03 ],
[ -6.54823e-03, 3.78315e-02, -1.34515e-01, 5.32164e-01, 6.83875e-01, -1.45993e-01, 3.96678e-02, -6.75943e-03 ],
[ -6.58996e-03, 3.81085e-02, -1.35775e-01, 5.42025e-01, 6.74865e-01, -1.45859e-01, 3.97222e-02, -6.77557e-03 ],
[ -6.62802e-03, 3.83650e-02, -1.36969e-01, 5.51849e-01, 6.65779e-01, -1.45641e-01, 3.97532e-02, -6.78771e-03 ],
[ -6.66238e-03, 3.86006e-02, -1.38094e-01, 5.61631e-01, 6.56621e-01, -1.45343e-01, 3.97610e-02, -6.79588e-03 ],
[ -6.69300e-03, 3.88151e-02, -1.39150e-01, 5.71370e-01, 6.47394e-01, -1.44963e-01, 3.97458e-02, -6.80007e-03 ],
[ -6.71985e-03, 3.90083e-02, -1.40136e-01, 5.81063e-01, 6.38099e-01, -1.44503e-01, 3.97077e-02, -6.80032e-03 ],
[ -6.74291e-03, 3.91800e-02, -1.41050e-01, 5.90706e-01, 6.28739e-01, -1.43965e-01, 3.96469e-02, -6.79662e-03 ],
[ -6.76214e-03, 3.93299e-02, -1.41891e-01, 6.00298e-01, 6.19318e-01, -1.43350e-01, 3.95635e-02, -6.78902e-03 ],
[ -6.77751e-03, 3.94578e-02, -1.42658e-01, 6.09836e-01, 6.09836e-01, -1.42658e-01, 3.94578e-02, -6.77751e-03 ],
[ -6.78902e-03, 3.95635e-02, -1.43350e-01, 6.19318e-01, 6.00298e-01, -1.41891e-01, 3.93299e-02, -6.76214e-03 ],
[ -6.79662e-03, 3.96469e-02, -1.43965e-01, 6.28739e-01, 5.90706e-01, -1.41050e-01, 3.91800e-02, -6.74291e-03 ],
[ -6.80032e-03, 3.97077e-02, -1.44503e-01, 6.38099e-01, 5.81063e-01, -1.40136e-01, 3.90083e-02, -6.71985e-03 ],
[ -6.80007e-03, 3.97458e-02, -1.44963e-01, 6.47394e-01, 5.71370e-01, -1.39150e-01, 3.88151e-02, -6.69300e-03 ],
[ -6.79588e-03, 3.97610e-02, -1.45343e-01, 6.56621e-01, 5.61631e-01, -1.38094e-01, 3.86006e-02, -6.66238e-03 ],
[ -6.78771e-03, 3.97532e-02, -1.45641e-01, 6.65779e-01, 5.51849e-01, -1.36969e-01, 3.83650e-02, -6.62802e-03 ],
[ -6.77557e-03, 3.97222e-02, -1.45859e-01, 6.74865e-01, 5.42025e-01, -1.35775e-01, 3.81085e-02, -6.58996e-03 ],
[ -6.75943e-03, 3.96678e-02, -1.45993e-01, 6.83875e-01, 5.32164e-01, -1.34515e-01, 3.78315e-02, -6.54823e-03 ],
[ -6.73929e-03, 3.95900e-02, -1.46043e-01, 6.92808e-01, 5.22267e-01, -1.33190e-01, 3.75341e-02, -6.50285e-03 ],
[ -6.71514e-03, 3.94886e-02, -1.46009e-01, 7.01661e-01, 5.12337e-01, -1.31800e-01, 3.72167e-02, -6.45388e-03 ],
[ -6.68698e-03, 3.93636e-02, -1.45889e-01, 7.10431e-01, 5.02377e-01, -1.30347e-01, 3.68795e-02, -6.40135e-03 ],
[ -6.65479e-03, 3.92147e-02, -1.45682e-01, 7.19116e-01, 4.92389e-01, -1.28832e-01, 3.65227e-02, -6.34530e-03 ],
[ -6.61859e-03, 3.90420e-02, -1.45387e-01, 7.27714e-01, 4.82377e-01, -1.27258e-01, 3.61468e-02, -6.28577e-03 ],
[ -6.57836e-03, 3.88454e-02, -1.45004e-01, 7.36222e-01, 4.72342e-01, -1.25624e-01, 3.57519e-02, -6.22280e-03 ],
[ -6.53412e-03, 3.86247e-02, -1.44531e-01, 7.44637e-01, 4.62289e-01, -1.23933e-01, 3.53384e-02, -6.15644e-03 ],
[ -6.48585e-03, 3.83800e-02, -1.43968e-01, 7.52958e-01, 4.52218e-01, -1.22185e-01, 3.49066e-02, -6.08674e-03 ],
[ -6.43358e-03, 3.81111e-02, -1.43313e-01, 7.61181e-01, 4.42134e-01, -1.20382e-01, 3.44568e-02, -6.01374e-03 ],
[ -6.37730e-03, 3.78182e-02, -1.42566e-01, 7.69305e-01, 4.32038e-01, -1.18526e-01, 3.39894e-02, -5.93749e-03 ],
[ -6.31703e-03, 3.75010e-02, -1.41727e-01, 7.77327e-01, 4.21934e-01, -1.16618e-01, 3.35046e-02, -5.85804e-03 ],
[ -6.25277e-03, 3.71596e-02, -1.40794e-01, 7.85244e-01, 4.11823e-01, -1.14659e-01, 3.30027e-02, -5.77544e-03 ],
[ -6.18454e-03, 3.67941e-02, -1.39767e-01, 7.93055e-01, 4.01710e-01, -1.12650e-01, 3.24843e-02, -5.68976e-03 ],
[ -6.11236e-03, 3.64044e-02, -1.38644e-01, 8.00757e-01, 3.91596e-01, -1.10593e-01, 3.19495e-02, -5.60103e-03 ],
[ -6.03624e-03, 3.59905e-02, -1.37426e-01, 8.08348e-01, 3.81484e-01, -1.08490e-01, 3.13987e-02, -5.50931e-03 ],
[ -5.95620e-03, 3.55525e-02, -1.36111e-01, 8.15826e-01, 3.71376e-01, -1.06342e-01, 3.08323e-02, -5.41467e-03 ],
[ -5.87227e-03, 3.50903e-02, -1.34699e-01, 8.23188e-01, 3.61276e-01, -1.04150e-01, 3.02507e-02, -5.31716e-03 ],
[ -5.78446e-03, 3.46042e-02, -1.33189e-01, 8.30432e-01, 3.51186e-01, -1.01915e-01, 2.96543e-02, -5.21683e-03 ],
[ -5.69280e-03, 3.40940e-02, -1.31581e-01, 8.37557e-01, 3.41109e-01, -9.96402e-02, 2.90433e-02, -5.11376e-03 ],
[ -5.59731e-03, 3.35600e-02, -1.29874e-01, 8.44559e-01, 3.31048e-01, -9.73254e-02, 2.84182e-02, -5.00800e-03 ],
[ -5.49804e-03, 3.30021e-02, -1.28068e-01, 8.51437e-01, 3.21004e-01, -9.49727e-02, 2.77794e-02, -4.89961e-03 ],
[ -5.39500e-03, 3.24205e-02, -1.26161e-01, 8.58189e-01, 3.10980e-01, -9.25834e-02, 2.71272e-02, -4.78866e-03 ],
[ -5.28823e-03, 3.18153e-02, -1.24154e-01, 8.64812e-01, 3.00980e-01, -9.01591e-02, 2.64621e-02, -4.67520e-03 ],
[ -5.17776e-03, 3.11866e-02, -1.22047e-01, 8.71305e-01, 2.91006e-01, -8.77011e-02, 2.57844e-02, -4.55932e-03 ],
[ -5.06363e-03, 3.05345e-02, -1.19837e-01, 8.77666e-01, 2.81060e-01, -8.52109e-02, 2.50946e-02, -4.44107e-03 ],
[ -4.94589e-03, 2.98593e-02, -1.17526e-01, 8.83893e-01, 2.71144e-01, -8.26900e-02, 2.43930e-02, -4.32052e-03 ],
[ -4.82456e-03, 2.91609e-02, -1.15113e-01, 8.89984e-01, 2.61263e-01, -8.01399e-02, 2.36801e-02, -4.19774e-03 ],
[ -4.69970e-03, 2.84397e-02, -1.12597e-01, 8.95936e-01, 2.51417e-01, -7.75620e-02, 2.29562e-02, -4.07279e-03 ],
[ -4.57135e-03, 2.76957e-02, -1.09978e-01, 9.01749e-01, 2.41609e-01, -7.49577e-02, 2.22218e-02, -3.94576e-03 ],
[ -4.43955e-03, 2.69293e-02, -1.07256e-01, 9.07420e-01, 2.31843e-01, -7.23286e-02, 2.14774e-02, -3.81671e-03 ],
[ -4.30435e-03, 2.61404e-02, -1.04430e-01, 9.12947e-01, 2.22120e-01, -6.96762e-02, 2.07233e-02, -3.68570e-03 ],
[ -4.16581e-03, 2.53295e-02, -1.01501e-01, 9.18329e-01, 2.12443e-01, -6.70018e-02, 1.99599e-02, -3.55283e-03 ],
[ -4.02397e-03, 2.44967e-02, -9.84679e-02, 9.23564e-01, 2.02814e-01, -6.43069e-02, 1.91877e-02, -3.41815e-03 ],
[ -3.87888e-03, 2.36423e-02, -9.53307e-02, 9.28650e-01, 1.93236e-01, -6.15931e-02, 1.84071e-02, -3.28174e-03 ],
[ -3.73062e-03, 2.27664e-02, -9.20893e-02, 9.33586e-01, 1.83711e-01, -5.88617e-02, 1.76185e-02, -3.14367e-03 ],
[ -3.57923e-03, 2.18695e-02, -8.87435e-02, 9.38371e-01, 1.74242e-01, -5.61142e-02, 1.68225e-02, -3.00403e-03 ],
[ -3.42477e-03, 2.09516e-02, -8.52933e-02, 9.43001e-01, 1.64831e-01, -5.33522e-02, 1.60193e-02, -2.86289e-03 ],
[ -3.26730e-03, 2.00132e-02, -8.17385e-02, 9.47477e-01, 1.55480e-01, -5.05770e-02, 1.52095e-02, -2.72032e-03 ],
[ -3.10689e-03, 1.90545e-02, -7.80792e-02, 9.51795e-01, 1.46192e-01, -4.77900e-02, 1.43934e-02, -2.57640e-03 ],
[ -2.94361e-03, 1.80759e-02, -7.43154e-02, 9.55956e-01, 1.36968e-01, -4.49929e-02, 1.35716e-02, -2.43121e-03 ],
[ -2.77751e-03, 1.70776e-02, -7.04471e-02, 9.59958e-01, 1.27812e-01, -4.21869e-02, 1.27445e-02, -2.28483e-03 ],
[ -2.60868e-03, 1.60599e-02, -6.64743e-02, 9.63798e-01, 1.18725e-01, -3.93735e-02, 1.19125e-02, -2.13733e-03 ],
[ -2.43718e-03, 1.50233e-02, -6.23972e-02, 9.67477e-01, 1.09710e-01, -3.65541e-02, 1.10760e-02, -1.98880e-03 ],
[ -2.26307e-03, 1.39681e-02, -5.82159e-02, 9.70992e-01, 1.00769e-01, -3.37303e-02, 1.02356e-02, -1.83931e-03 ],
[ -2.08645e-03, 1.28947e-02, -5.39305e-02, 9.74342e-01, 9.19033e-02, -3.09033e-02, 9.39154e-03, -1.68894e-03 ],
[ -1.90738e-03, 1.18034e-02, -4.95412e-02, 9.77526e-01, 8.31162e-02, -2.80746e-02, 8.54441e-03, -1.53777e-03 ],
[ -1.72594e-03, 1.06946e-02, -4.50483e-02, 9.80543e-01, 7.44095e-02, -2.52457e-02, 7.69462e-03, -1.38589e-03 ],
[ -1.54221e-03, 9.56876e-03, -4.04519e-02, 9.83392e-01, 6.57852e-02, -2.24178e-02, 6.84261e-03, -1.23337e-03 ],
[ -1.35627e-03, 8.42626e-03, -3.57525e-02, 9.86071e-01, 5.72454e-02, -1.95925e-02, 5.98883e-03, -1.08030e-03 ],
[ -1.16820e-03, 7.26755e-03, -3.09503e-02, 9.88580e-01, 4.87921e-02, -1.67710e-02, 5.13372e-03, -9.26747e-04 ],
[ -9.78093e-04, 6.09305e-03, -2.60456e-02, 9.90917e-01, 4.04274e-02, -1.39548e-02, 4.27773e-03, -7.72802e-04 ],
[ -7.86031e-04, 4.90322e-03, -2.10389e-02, 9.93082e-01, 3.21531e-02, -1.11453e-02, 3.42130e-03, -6.18544e-04 ],
[ -5.92100e-04, 3.69852e-03, -1.59305e-02, 9.95074e-01, 2.39714e-02, -8.34364e-03, 2.56486e-03, -4.64053e-04 ],
[ -3.96391e-04, 2.47942e-03, -1.07209e-02, 9.96891e-01, 1.58840e-02, -5.55134e-03, 1.70888e-03, -3.09412e-04 ],
[ -1.98993e-04, 1.24642e-03, -5.41054e-03, 9.98534e-01, 7.89295e-03, -2.76968e-03, 8.53777e-04, -1.54700e-04 ],
[ 0.00000e+00, 0.00000e+00, 0.00000e+00, 1.00000e+00, 0.00000e+00, 0.00000e+00, 0.00000e+00, 0.00000e+00 ]
]
print("static const int NUM_TAPS = 8;")
print("static const int NUM_STEPS = 128;")
print("static const mmseTaps[NUM_STEPS+1][NUM_TAPS] = {")
for taps in filters:
body = ", ".join("%.5e" % t for t in reversed(taps))
print("{ " + body + " },")
print("};")
| 104.735099
| 117
| 0.613152
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 446
| 0.028201
|
49172233318b11d598754579aedad36e5f01b8f1
| 1,688
|
py
|
Python
|
openpype/hosts/blender/plugins/publish/extract_blend_animation.py
|
jonclothcat/OpenPype
|
d1208cbebc0a7f378de0062ccd653295c6399195
|
[
"MIT"
] | 1
|
2022-02-08T15:40:41.000Z
|
2022-02-08T15:40:41.000Z
|
openpype/hosts/blender/plugins/publish/extract_blend_animation.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | 2
|
2022-03-18T01:46:03.000Z
|
2022-03-18T01:46:16.000Z
|
openpype/hosts/blender/plugins/publish/extract_blend_animation.py
|
zafrs/OpenPype
|
4b8e7e1ed002fc55b31307efdea70b0feaed474f
|
[
"MIT"
] | null | null | null |
import os
import bpy
import openpype.api
class ExtractBlendAnimation(openpype.api.Extractor):
"""Extract a blend file."""
label = "Extract Blend"
hosts = ["blender"]
families = ["animation"]
optional = True
def process(self, instance):
# Define extract output file path
stagingdir = self.staging_dir(instance)
filename = f"{instance.name}.blend"
filepath = os.path.join(stagingdir, filename)
# Perform extraction
self.log.info("Performing extraction..")
data_blocks = set()
for obj in instance:
if isinstance(obj, bpy.types.Object) and obj.type == 'EMPTY':
child = obj.children[0]
if child and child.type == 'ARMATURE':
if child.animation_data and child.animation_data.action:
if not obj.animation_data:
obj.animation_data_create()
obj.animation_data.action = child.animation_data.action
obj.animation_data_clear()
data_blocks.add(child.animation_data.action)
data_blocks.add(obj)
bpy.data.libraries.write(filepath, data_blocks)
if "representations" not in instance.data:
instance.data["representations"] = []
representation = {
'name': 'blend',
'ext': 'blend',
'files': filename,
"stagingDir": stagingdir,
}
instance.data["representations"].append(representation)
self.log.info("Extracted instance '%s' to: %s",
instance.name, representation)
| 30.690909
| 79
| 0.566943
| 1,642
| 0.972749
| 0
| 0
| 0
| 0
| 0
| 0
| 308
| 0.182464
|
4917259ce7d453d0c463913b457ccefb5c69c0f6
| 2,772
|
py
|
Python
|
src/TamaTou.py
|
hirmiura/starsector-mod-Font_Replacement_for_Orbitron
|
ad7b5e3f4d8afd1a2aa97a420a2ec9a3aaf9b3d7
|
[
"MIT"
] | 1
|
2022-01-17T02:58:46.000Z
|
2022-01-17T02:58:46.000Z
|
src/TamaTou.py
|
hirmiura/starsector-mod-Font_Replacement_for_Orbitron
|
ad7b5e3f4d8afd1a2aa97a420a2ec9a3aaf9b3d7
|
[
"MIT"
] | null | null | null |
src/TamaTou.py
|
hirmiura/starsector-mod-Font_Replacement_for_Orbitron
|
ad7b5e3f4d8afd1a2aa97a420a2ec9a3aaf9b3d7
|
[
"MIT"
] | null | null | null |
# SPDX-License-Identifier: MIT
# Copyright 2022 hirmiura (https://github.com/hirmiura)
#
# TamaTouを生成するスクリプト
#
# 使い方
# 1. fontforgeでコンソールを出す(fontforge-console.bat)
# 2. ディレクトリ移動
# 3. ffpython TamaTou.py
# 4. 待つ
#
# Orbitron → オービトロン → オーブ → 玉 → Tamaやな!
# Noto → No Toufu → 豆腐だらけだし → Touやな!
# →→ TamaTou
#
import fontforge
for weight in ['Regular', 'Bold']:
print('玉改変')
fn = fontforge.open(f'Orbitron-{weight}.ttf')
fn.encoding = 'UnicodeFull'
fn.save(f'tmp1-{weight}.sfd')
fn.close()
print(f'能登改変 {weight}')
fn = fontforge.open(f'NotoSansJP-{weight}.otf')
fn.encoding = 'UnicodeFull'
fn.cidFlatten()
# fn.ascent = 800
# fn.descent = 200
# fn.upos = -125
# fn.em = 1000
fn.save(f'tmp2-{weight}.sfd')
fn.close()
print('作成')
name = 'TamaTou'
copyright = 'Copyright (c) 2022, Hiroshi Miura (https://github.com/hirmiura) with Reserved Font Name TamaTou.'
version = '1.0.0'
license = 'Open Font License'
fn = fontforge.open(f'tmp1-{weight}.sfd')
fn.fontname = name
fn.familyname = name
fn.fullname = name
fn.weight = weight
fn.version = version
fn.sfntRevision = None
fn.copyright = copyright
fn.appendSFNTName(0x411, 0, copyright)
fn.appendSFNTName(0x411, 1, name)
fn.appendSFNTName(0x411, 2, '')
fn.appendSFNTName(0x411, 3, '')
fn.appendSFNTName(0x411, 4, name)
fn.appendSFNTName(0x411, 5, version)
fn.appendSFNTName(0x411, 6, name + '-' + weight)
fn.appendSFNTName(0x411, 7, '')
fn.appendSFNTName(0x411, 8, '')
fn.appendSFNTName(0x411, 9, '')
fn.appendSFNTName(0x411, 10, '')
fn.appendSFNTName(0x411, 11, '')
fn.appendSFNTName(0x411, 12, '')
fn.appendSFNTName(0x411, 13, license)
fn.appendSFNTName(0x411, 14, '')
fn.appendSFNTName(0x411, 15, '')
fn.appendSFNTName(0x411, 16, name)
fn.appendSFNTName(0x411, 17, '')
fn.appendSFNTName(0x409, 0, copyright)
fn.appendSFNTName(0x409, 1, name)
fn.appendSFNTName(0x409, 2, '')
fn.appendSFNTName(0x409, 3, '')
fn.appendSFNTName(0x409, 4, name)
fn.appendSFNTName(0x409, 5, version)
fn.appendSFNTName(0x409, 6, name + '-' + weight)
fn.appendSFNTName(0x409, 7, '')
fn.appendSFNTName(0x409, 8, '')
fn.appendSFNTName(0x409, 9, '')
fn.appendSFNTName(0x409, 10, '')
fn.appendSFNTName(0x409, 11, '')
fn.appendSFNTName(0x409, 12, '')
fn.appendSFNTName(0x409, 13, license)
fn.appendSFNTName(0x409, 14, '')
fn.appendSFNTName(0x409, 15, '')
fn.appendSFNTName(0x409, 16, name)
fn.appendSFNTName(0x409, 17, '')
# fn.mergeFonts(f'tmp1-{weight}.sfd')
fn.mergeFonts(f'tmp2-{weight}.sfd')
fn.save(f'tmp3-{weight}.sfd')
fn.generate(f'TamaTou-{weight}.otf')
fn.close()
| 30.461538
| 114
| 0.636724
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 964
| 0.330363
|
4918381c344c6f579cf53ea1bf560dc12227d2bf
| 2,623
|
py
|
Python
|
bambu/bambu.py
|
westurner/pandasrdf
|
c194b1eb9928488bc19b82d3cab409158cd413a3
|
[
"BSD-3-Clause"
] | 2
|
2016-07-01T10:48:04.000Z
|
2017-01-24T16:53:44.000Z
|
bambu/bambu.py
|
westurner/pandasrdf
|
c194b1eb9928488bc19b82d3cab409158cd413a3
|
[
"BSD-3-Clause"
] | 1
|
2016-06-20T10:54:53.000Z
|
2017-02-07T05:47:38.000Z
|
bambu/bambu.py
|
westurner/pandasrdf
|
c194b1eb9928488bc19b82d3cab409158cd413a3
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
"""
bambu
------
pandas RDF functionality
Installation
--------------
::
# pip install pandas
pip install rdflib
"""
import sys
import pandas as pd
import rdflib
def bambu():
"""
mainfunc
"""
pass
def to_rdf(df):
"""
Args:
df (DataFrame): pandas DataFrame to serialize to RDF
kwargs (dict): TODO
Returns:
rdflib.Graph: a serializable RDFLib Graph
"""
def read_rdf(path, **kwargs):
"""
Args:
path (str): path to an RDF source
kwargs (dict): TODO
Returns:
DataFrame: pandas DataFrame
"""
def to_rdfa(df, **kwargs):
"""
Args:
df (DataFrame): pandas DataFrame to serialize to RDF
kwargs (dict): TODO
Returns:
(list, StringIO): namespaces, RDFa table
"""
def read_rdfa(path, **kwargs):
"""
Args:
path (str): path to an RDF source
kwargs (dict): TODO
Returns:
DataFrame: pandas DataFrame
"""
def to_jsonld(df, **kwargs):
"""
Args:
df (DataFrame): pandas DataFrame to serialize to RDF
kwargs (dict): TODO
Returns:
(context, StringIO): JSONLD context, JSONLD data
"""
def read_jsonld(path, **kwargs):
"""
Args:
path (str): path to a JSONLD source
kwargs (dict): TODO
Returns:
DataFrame: pandas DataFrame
"""
import unittest
class Test_bambu(unittest.TestCase):
def test_bambu(self):
pass
def test_10_to_rdf(self):
df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
output = to_rdf(df)
print(output)
self.assertTrue(output)
def main(*args):
import optparse
import logging
prs = optparse.OptionParser(usage="%prog: [args]")
prs.add_option('-v', '--verbose',
dest='verbose',
action='store_true',)
prs.add_option('-q', '--quiet',
dest='quiet',
action='store_true',)
prs.add_option('-t', '--test',
dest='run_tests',
action='store_true',)
args = args and list(args) or sys.argv[1:]
(opts, args) = prs.parse_args(args)
if not opts.quiet:
logging.basicConfig()
if opts.verbose:
logging.getLogger().setLevel(logging.DEBUG)
if opts.run_tests:
sys.argv = [sys.argv[0]] + args
import unittest
sys.exit(unittest.main())
output = bambu()
return 0
if __name__ == "__main__":
sys.exit(main())
| 18.602837
| 63
| 0.552421
| 253
| 0.096454
| 0
| 0
| 0
| 0
| 0
| 0
| 1,277
| 0.486847
|
491871e30f2b60781d5b69aef6ac73571b60d676
| 19,637
|
py
|
Python
|
homework1/problem3/local/mort_icu.py
|
criticaldata/hst953-2021
|
b18c8235a6c878a4a7d3d330a9b69421f0217273
|
[
"MIT"
] | 1
|
2022-03-15T15:52:45.000Z
|
2022-03-15T15:52:45.000Z
|
homework1/problem3/local/mort_icu.py
|
MDenu/HST-homework
|
fff0f277ee18735acbe84dfe8c428e92991b28fa
|
[
"MIT"
] | null | null | null |
homework1/problem3/local/mort_icu.py
|
MDenu/HST-homework
|
fff0f277ee18735acbe84dfe8c428e92991b28fa
|
[
"MIT"
] | 3
|
2021-09-10T19:14:54.000Z
|
2021-09-26T22:23:05.000Z
|
# Generates the following data files from MIMIC:
# adult_icu.gz: data from adult ICUs
# n_icu.gz: data from neonatal ICUs
# adult_notes.gz: clinical notes from adult ICUs
# Import libraries
import numpy as np
import pandas as pd
import psycopg2
from scipy.stats import ks_2samp
import os
import random
# Ouput directory to generate the files
mimicdir = os.path.expanduser("./mimic_data/")
random.seed(42)
# create a database connection
sqluser = 'mimicuser'
dbname = 'mimic'
schema_name = 'mimiciii'
# Connect to local postgres version of mimic
con = psycopg2.connect(dbname=dbname, user=sqluser, host='127.0.0.1', password='PASSWORD')
cur = con.cursor()
cur.execute('SET search_path to ' + schema_name)
#========helper function for imputing missing values
def replace(group):
"""
takes in a pandas group, and replaces the
null value with the mean of the none null
values of the same group
"""
mask = group.isnull()
group[mask] = group[~mask].mean()
return group
#========get the icu details
# this query extracts the following:
# Unique ids for the admission, patient and icu stay
# Patient gender
# admission & discharge times
# length of stay
# age
# ethnicity
# admission type
# in hospital death?
# in icu death?
# one year from admission death?
# first hospital stay
# icu intime, icu outime
# los in icu
# first icu stay?
denquery = \
"""
-- This query extracts useful demographic/administrative information for patient ICU stays
--DROP MATERIALIZED VIEW IF EXISTS icustay_detail CASCADE;
--CREATE MATERIALIZED VIEW icustay_detail as
--ie is the icustays table
--adm is the admissions table
SELECT ie.subject_id, ie.hadm_id, ie.icustay_id
, pat.gender
, adm.admittime, adm.dischtime, adm.diagnosis
, ROUND( (CAST(adm.dischtime AS DATE) - CAST(adm.admittime AS DATE)) , 4) AS los_hospital
, ROUND( (CAST(adm.admittime AS DATE) - CAST(pat.dob AS DATE)) / 365, 4) AS age
, adm.ethnicity, adm.ADMISSION_TYPE
--, adm.hospital_expire_flag
, CASE when adm.deathtime between ie.intime and ie.outtime THEN 1 ELSE 0 END AS mort_icu
, DENSE_RANK() OVER (PARTITION BY adm.subject_id ORDER BY adm.admittime) AS hospstay_seq
, CASE
WHEN DENSE_RANK() OVER (PARTITION BY adm.subject_id ORDER BY adm.admittime) = 1 THEN 1
ELSE 0 END AS first_hosp_stay
-- icu level factors
, ie.intime, ie.outtime
, ie.FIRST_CAREUNIT
, ROUND( (CAST(ie.outtime AS DATE) - CAST(ie.intime AS DATE)) , 4) AS los_icu
, DENSE_RANK() OVER (PARTITION BY ie.hadm_id ORDER BY ie.intime) AS icustay_seq
-- first ICU stay *for the current hospitalization*
, CASE
WHEN DENSE_RANK() OVER (PARTITION BY ie.hadm_id ORDER BY ie.intime) = 1 THEN 1
ELSE 0 END AS first_icu_stay
FROM icustays ie
INNER JOIN admissions adm
ON ie.hadm_id = adm.hadm_id
INNER JOIN patients pat
ON ie.subject_id = pat.subject_id
WHERE adm.has_chartevents_data = 1
ORDER BY ie.subject_id, adm.admittime, ie.intime;
"""
den = pd.read_sql_query(denquery,con)
#----drop patients with less than 48 hour
den['los_icu_hr'] = (den.outtime - den.intime).astype('timedelta64[h]')
den = den[(den.los_icu_hr >= 48)]
den = den[(den.age<300)]
den.drop('los_icu_hr', axis = 1, inplace = True)
# den.isnull().sum()
#----clean up
# micu --> medical
# csru --> cardiac surgery recovery unit
# sicu --> surgical icu
# tsicu --> Trauma Surgical Intensive Care Unit
# NICU --> Neonatal
den['adult_icu'] = np.where(den['first_careunit'].isin(['PICU', 'NICU']), 0, 1)
den['gender'] = np.where(den['gender']=="M", 1, 0)
# no need to yell
den.ethnicity = den.ethnicity.str.lower()
den.ethnicity.loc[(den.ethnicity.str.contains('^white'))] = 'white'
den.ethnicity.loc[(den.ethnicity.str.contains('^black'))] = 'black'
den.ethnicity.loc[(den.ethnicity.str.contains('^hisp')) | (den.ethnicity.str.contains('^latin'))] = 'hispanic'
den.ethnicity.loc[(den.ethnicity.str.contains('^asia'))] = 'asian'
den.ethnicity.loc[~(den.ethnicity.str.contains('|'.join(['white', 'black', 'hispanic', 'asian'])))] = 'other'
den = pd.concat([den, pd.get_dummies(den['ethnicity'], prefix='eth')], axis = 1)
den = pd.concat([den, pd.get_dummies(den['admission_type'], prefix='admType')], axis = 1)
den.drop(['diagnosis', 'hospstay_seq', 'los_icu','icustay_seq', 'admittime', 'dischtime','los_hospital', 'intime', 'outtime', 'ethnicity', 'admission_type', 'first_careunit'], axis = 1, inplace = True)
#========= 48 hour vitals query
# these are the normal ranges. useful to clean
# up the data
vitquery = \
"""
-- This query pivots the vital signs for the first 48 hours of a patient's stay
-- Vital signs include heart rate, blood pressure, respiration rate, and temperature
-- DROP MATERIALIZED VIEW IF EXISTS vitalsfirstday CASCADE;
-- create materialized view vitalsfirstday as
SELECT pvt.subject_id, pvt.hadm_id, pvt.icustay_id
-- Easier names
, min(case when VitalID = 1 then valuenum else null end) as HeartRate_Min
, max(case when VitalID = 1 then valuenum else null end) as HeartRate_Max
, avg(case when VitalID = 1 then valuenum else null end) as HeartRate_Mean
, min(case when VitalID = 2 then valuenum else null end) as SysBP_Min
, max(case when VitalID = 2 then valuenum else null end) as SysBP_Max
, avg(case when VitalID = 2 then valuenum else null end) as SysBP_Mean
, min(case when VitalID = 3 then valuenum else null end) as DiasBP_Min
, max(case when VitalID = 3 then valuenum else null end) as DiasBP_Max
, avg(case when VitalID = 3 then valuenum else null end) as DiasBP_Mean
, min(case when VitalID = 4 then valuenum else null end) as MeanBP_Min
, max(case when VitalID = 4 then valuenum else null end) as MeanBP_Max
, avg(case when VitalID = 4 then valuenum else null end) as MeanBP_Mean
, min(case when VitalID = 5 then valuenum else null end) as RespRate_Min
, max(case when VitalID = 5 then valuenum else null end) as RespRate_Max
, avg(case when VitalID = 5 then valuenum else null end) as RespRate_Mean
, min(case when VitalID = 6 then valuenum else null end) as TempC_Min
, max(case when VitalID = 6 then valuenum else null end) as TempC_Max
, avg(case when VitalID = 6 then valuenum else null end) as TempC_Mean
, min(case when VitalID = 7 then valuenum else null end) as SpO2_Min
, max(case when VitalID = 7 then valuenum else null end) as SpO2_Max
, avg(case when VitalID = 7 then valuenum else null end) as SpO2_Mean
, min(case when VitalID = 8 then valuenum else null end) as Glucose_Min
, max(case when VitalID = 8 then valuenum else null end) as Glucose_Max
, avg(case when VitalID = 8 then valuenum else null end) as Glucose_Mean
FROM (
select ie.subject_id, ie.hadm_id, ie.icustay_id
, case
when itemid in (211,220045) and valuenum > 0 and valuenum < 300 then 1 -- HeartRate
when itemid in (51,442,455,6701,220179,220050) and valuenum > 0 and valuenum < 400 then 2 -- SysBP
when itemid in (8368,8440,8441,8555,220180,220051) and valuenum > 0 and valuenum < 300 then 3 -- DiasBP
when itemid in (456,52,6702,443,220052,220181,225312) and valuenum > 0 and valuenum < 300 then 4 -- MeanBP
when itemid in (615,618,220210,224690) and valuenum > 0 and valuenum < 70 then 5 -- RespRate
when itemid in (223761,678) and valuenum > 70 and valuenum < 120 then 6 -- TempF, converted to degC in valuenum call
when itemid in (223762,676) and valuenum > 10 and valuenum < 50 then 6 -- TempC
when itemid in (646,220277) and valuenum > 0 and valuenum <= 100 then 7 -- SpO2
when itemid in (807,811,1529,3745,3744,225664,220621,226537) and valuenum > 0 then 8 -- Glucose
else null end as VitalID
-- convert F to C
, case when itemid in (223761,678) then (valuenum-32)/1.8 else valuenum end as valuenum
from icustays ie
left join chartevents ce
on ie.subject_id = ce.subject_id and ie.hadm_id = ce.hadm_id and ie.icustay_id = ce.icustay_id
and ce.charttime between ie.intime and ie.intime + interval '48' hour
-- exclude rows marked as error
and ce.error IS DISTINCT FROM 1
where ce.itemid in
(
-- HEART RATE
211, --"Heart Rate"
220045, --"Heart Rate"
-- Systolic/diastolic
51, -- Arterial BP [Systolic]
442, -- Manual BP [Systolic]
455, -- NBP [Systolic]
6701, -- Arterial BP #2 [Systolic]
220179, -- Non Invasive Blood Pressure systolic
220050, -- Arterial Blood Pressure systolic
8368, -- Arterial BP [Diastolic]
8440, -- Manual BP [Diastolic]
8441, -- NBP [Diastolic]
8555, -- Arterial BP #2 [Diastolic]
220180, -- Non Invasive Blood Pressure diastolic
220051, -- Arterial Blood Pressure diastolic
-- MEAN ARTERIAL PRESSURE
456, --"NBP Mean"
52, --"Arterial BP Mean"
6702, -- Arterial BP Mean #2
443, -- Manual BP Mean(calc)
220052, --"Arterial Blood Pressure mean"
220181, --"Non Invasive Blood Pressure mean"
225312, --"ART BP mean"
-- RESPIRATORY RATE
618,-- Respiratory Rate
615,-- Resp Rate (Total)
220210,-- Respiratory Rate
224690, -- Respiratory Rate (Total)
-- SPO2, peripheral
646, 220277,
-- GLUCOSE, both lab and fingerstick
807,-- Fingerstick Glucose
811,-- Glucose (70-105)
1529,-- Glucose
3745,-- BloodGlucose
3744,-- Blood Glucose
225664,-- Glucose finger stick
220621,-- Glucose (serum)
226537,-- Glucose (whole blood)
-- TEMPERATURE
223762, -- "Temperature Celsius"
676, -- "Temperature C"
223761, -- "Temperature Fahrenheit"
678 -- "Temperature F"
)
) pvt
group by pvt.subject_id, pvt.hadm_id, pvt.icustay_id
order by pvt.subject_id, pvt.hadm_id, pvt.icustay_id;
"""
vit48 = pd.read_sql_query(vitquery,con)
vit48.isnull().sum()
#===============48 hour labs query
# This query does the following:
# it extracts the lab events in the first 48 hours
# it labels the lab items and cleans up their values
# it will create a set of lab values
# 48 hours.
labquery = \
"""
WITH pvt AS (
--- ie is the icu stay
--- ad is the admissions table
--- le is the lab events table
SELECT ie.subject_id, ie.hadm_id, ie.icustay_id, le.charttime
-- here we assign labels to ITEMIDs
-- this also fuses together multiple ITEMIDs containing the same data
, CASE
when le.itemid = 50868 then 'ANION GAP'
when le.itemid = 50862 then 'ALBUMIN'
when le.itemid = 50882 then 'BICARBONATE'
when le.itemid = 50885 then 'BILIRUBIN'
when le.itemid = 50912 then 'CREATININE'
when le.itemid = 50806 then 'CHLORIDE'
when le.itemid = 50902 then 'CHLORIDE'
when le.itemid = 50809 then 'GLUCOSE'
when le.itemid = 50931 then 'GLUCOSE'
when le.itemid = 50810 then 'HEMATOCRIT'
when le.itemid = 51221 then 'HEMATOCRIT'
when le.itemid = 50811 then 'HEMOGLOBIN'
when le.itemid = 51222 then 'HEMOGLOBIN'
when le.itemid = 50813 then 'LACTATE'
when le.itemid = 50960 then 'MAGNESIUM'
when le.itemid = 50970 then 'PHOSPHATE'
when le.itemid = 51265 then 'PLATELET'
when le.itemid = 50822 then 'POTASSIUM'
when le.itemid = 50971 then 'POTASSIUM'
when le.itemid = 51275 then 'PTT'
when le.itemid = 51237 then 'INR'
when le.itemid = 51274 then 'PT'
when le.itemid = 50824 then 'SODIUM'
when le.itemid = 50983 then 'SODIUM'
when le.itemid = 51006 then 'BUN'
when le.itemid = 51300 then 'WBC'
when le.itemid = 51301 then 'WBC'
ELSE null
END AS label
, -- add in some sanity checks on the values
-- the where clause below requires all valuenum to be > 0,
-- so these are only upper limit checks
CASE
when le.itemid = 50862 and le.valuenum > 10 then null -- g/dL 'ALBUMIN'
when le.itemid = 50868 and le.valuenum > 10000 then null -- mEq/L 'ANION GAP'
when le.itemid = 50882 and le.valuenum > 10000 then null -- mEq/L 'BICARBONATE'
when le.itemid = 50885 and le.valuenum > 150 then null -- mg/dL 'BILIRUBIN'
when le.itemid = 50806 and le.valuenum > 10000 then null -- mEq/L 'CHLORIDE'
when le.itemid = 50902 and le.valuenum > 10000 then null -- mEq/L 'CHLORIDE'
when le.itemid = 50912 and le.valuenum > 150 then null -- mg/dL 'CREATININE'
when le.itemid = 50809 and le.valuenum > 10000 then null -- mg/dL 'GLUCOSE'
when le.itemid = 50931 and le.valuenum > 10000 then null -- mg/dL 'GLUCOSE'
when le.itemid = 50810 and le.valuenum > 100 then null -- % 'HEMATOCRIT'
when le.itemid = 51221 and le.valuenum > 100 then null -- % 'HEMATOCRIT'
when le.itemid = 50811 and le.valuenum > 50 then null -- g/dL 'HEMOGLOBIN'
when le.itemid = 51222 and le.valuenum > 50 then null -- g/dL 'HEMOGLOBIN'
when le.itemid = 50813 and le.valuenum > 50 then null -- mmol/L 'LACTATE'
when le.itemid = 50960 and le.valuenum > 60 then null -- mmol/L 'MAGNESIUM'
when le.itemid = 50970 and le.valuenum > 60 then null -- mg/dL 'PHOSPHATE'
when le.itemid = 51265 and le.valuenum > 10000 then null -- K/uL 'PLATELET'
when le.itemid = 50822 and le.valuenum > 30 then null -- mEq/L 'POTASSIUM'
when le.itemid = 50971 and le.valuenum > 30 then null -- mEq/L 'POTASSIUM'
when le.itemid = 51275 and le.valuenum > 150 then null -- sec 'PTT'
when le.itemid = 51237 and le.valuenum > 50 then null -- 'INR'
when le.itemid = 51274 and le.valuenum > 150 then null -- sec 'PT'
when le.itemid = 50824 and le.valuenum > 200 then null -- mEq/L == mmol/L 'SODIUM'
when le.itemid = 50983 and le.valuenum > 200 then null -- mEq/L == mmol/L 'SODIUM'
when le.itemid = 51006 and le.valuenum > 300 then null -- 'BUN'
when le.itemid = 51300 and le.valuenum > 1000 then null -- 'WBC'
when le.itemid = 51301 and le.valuenum > 1000 then null -- 'WBC'
ELSE le.valuenum
END AS valuenum
FROM icustays ie
LEFT JOIN labevents le
ON le.subject_id = ie.subject_id
AND le.hadm_id = ie.hadm_id
-- TODO: they are using lab times 6 hours before the start of the
-- ICU stay.
AND le.charttime between (ie.intime - interval '6' hour)
AND (ie.intime + interval '48' hour)
AND le.itemid IN
(
-- comment is: LABEL | CATEGORY | FLUID | NUMBER OF ROWS IN LABEVENTS
50868, -- ANION GAP | CHEMISTRY | BLOOD | 769895
50862, -- ALBUMIN | CHEMISTRY | BLOOD | 146697
50882, -- BICARBONATE | CHEMISTRY | BLOOD | 780733
50885, -- BILIRUBIN, TOTAL | CHEMISTRY | BLOOD | 238277
50912, -- CREATININE | CHEMISTRY | BLOOD | 797476
50902, -- CHLORIDE | CHEMISTRY | BLOOD | 795568
50806, -- CHLORIDE, WHOLE BLOOD | BLOOD GAS | BLOOD | 48187
50931, -- GLUCOSE | CHEMISTRY | BLOOD | 748981
50809, -- GLUCOSE | BLOOD GAS | BLOOD | 196734
51221, -- HEMATOCRIT | HEMATOLOGY | BLOOD | 881846
50810, -- HEMATOCRIT, CALCULATED | BLOOD GAS | BLOOD | 89715
51222, -- HEMOGLOBIN | HEMATOLOGY | BLOOD | 752523
50811, -- HEMOGLOBIN | BLOOD GAS | BLOOD | 89712
50813, -- LACTATE | BLOOD GAS | BLOOD | 187124
50960, -- MAGNESIUM | CHEMISTRY | BLOOD | 664191
50970, -- PHOSPHATE | CHEMISTRY | BLOOD | 590524
51265, -- PLATELET COUNT | HEMATOLOGY | BLOOD | 778444
50971, -- POTASSIUM | CHEMISTRY | BLOOD | 845825
50822, -- POTASSIUM, WHOLE BLOOD | BLOOD GAS | BLOOD | 192946
51275, -- PTT | HEMATOLOGY | BLOOD | 474937
51237, -- INR(PT) | HEMATOLOGY | BLOOD | 471183
51274, -- PT | HEMATOLOGY | BLOOD | 469090
50983, -- SODIUM | CHEMISTRY | BLOOD | 808489
50824, -- SODIUM, WHOLE BLOOD | BLOOD GAS | BLOOD | 71503
51006, -- UREA NITROGEN | CHEMISTRY | BLOOD | 791925
51301, -- WHITE BLOOD CELLS | HEMATOLOGY | BLOOD | 753301
51300 -- WBC COUNT | HEMATOLOGY | BLOOD | 2371
)
AND le.valuenum IS NOT null
AND le.valuenum > 0 -- lab values cannot be 0 and cannot be negative
LEFT JOIN admissions ad
ON ie.subject_id = ad.subject_id
AND ie.hadm_id = ad.hadm_id
),
ranked AS (
SELECT pvt.*, DENSE_RANK() OVER (PARTITION BY
pvt.subject_id, pvt.hadm_id,pvt.icustay_id,pvt.label ORDER BY pvt.charttime) as drank
FROM pvt
)
SELECT r.subject_id, r.hadm_id, r.icustay_id
, max(case when label = 'ANION GAP' then valuenum else null end) as ANIONGAP
, max(case when label = 'ALBUMIN' then valuenum else null end) as ALBUMIN
, max(case when label = 'BICARBONATE' then valuenum else null end) as BICARBONATE
, max(case when label = 'BILIRUBIN' then valuenum else null end) as BILIRUBIN
, max(case when label = 'CREATININE' then valuenum else null end) as CREATININE
, max(case when label = 'CHLORIDE' then valuenum else null end) as CHLORIDE
, max(case when label = 'GLUCOSE' then valuenum else null end) as GLUCOSE
, max(case when label = 'HEMATOCRIT' then valuenum else null end) as HEMATOCRIT
, max(case when label = 'HEMOGLOBIN' then valuenum else null end) as HEMOGLOBIN
, max(case when label = 'LACTATE' then valuenum else null end) as LACTATE
, max(case when label = 'MAGNESIUM' then valuenum else null end) as MAGNESIUM
, max(case when label = 'PHOSPHATE' then valuenum else null end) as PHOSPHATE
, max(case when label = 'PLATELET' then valuenum else null end) as PLATELET
, max(case when label = 'POTASSIUM' then valuenum else null end) as POTASSIUM
, max(case when label = 'PTT' then valuenum else null end) as PTT
, max(case when label = 'INR' then valuenum else null end) as INR
, max(case when label = 'PT' then valuenum else null end) as PT
, max(case when label = 'SODIUM' then valuenum else null end) as SODIUM
, max(case when label = 'BUN' then valuenum else null end) as BUN
, max(case when label = 'WBC' then valuenum else null end) as WBC
FROM ranked r
WHERE r.drank = 1
GROUP BY r.subject_id, r.hadm_id, r.icustay_id, r.drank
ORDER BY r.subject_id, r.hadm_id, r.icustay_id, r.drank;
"""
lab48 = pd.read_sql_query(labquery,con)
#=========notes
notesquery = \
"""
SELECT fin.subject_id, fin.hadm_id, fin.icustay_id, string_agg(fin.text, ' ') as chartext
FROM (
select ie.subject_id, ie.hadm_id, ie.icustay_id, ne.text
from icustays ie
left join noteevents ne
on ie.subject_id = ne.subject_id and ie.hadm_id = ne.hadm_id
and ne.charttime between ie.intime and ie.intime + interval '48' hour
--and ne.iserror != '1'
) fin
group by fin.subject_id, fin.hadm_id, fin.icustay_id
order by fin.subject_id, fin.hadm_id, fin.icustay_id;
"""
notes48 = pd.read_sql_query(notesquery,con)
#=====combine all variables
mort_ds = den.merge(vit48,how = 'left', on = ['subject_id', 'hadm_id', 'icustay_id'])
mort_ds = mort_ds.merge(lab48,how = 'left', on = ['subject_id', 'hadm_id', 'icustay_id'])
#======missing values (following joydeep ghosh's paper)
# create means by age group and gender
mort_ds['age_group'] = pd.cut(mort_ds['age'], [-1,5,10,15,20, 25, 40,60, 80, 200],
labels = ['l5','5_10', '10_15', '15_20', '20_25', '25_40', '40_60', '60_80', '80p'])
mort_ds = mort_ds.groupby(['age_group', 'gender'])
mort_ds = mort_ds.transform(replace)
#mort_ds.drop('age_group', 1, inplace =True )
# one missing variable
adult_icu = mort_ds[(mort_ds.adult_icu==1)].dropna()
# create training and testing labels
msk = np.random.rand(len(adult_icu)) < 0.7
adult_icu['train'] = np.where(msk, 1, 0)
adult_icu.to_csv(os.path.join(mimicdir, 'adult_icu.gz'), compression='gzip', index = False)
# notes
adult_notes = notes48.merge(adult_icu[['train', 'subject_id', 'hadm_id', 'icustay_id', 'mort_icu']], how = 'right', on = ['subject_id', 'hadm_id', 'icustay_id'])
adult_notes.to_csv(os.path.join(mimicdir, 'adult_notes.gz'), compression='gzip', index = False)
| 41.254202
| 202
| 0.685135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 17,507
| 0.891531
|
4918d6d679945db227bfcba68023e986105933df
| 14,098
|
py
|
Python
|
release/alert.py
|
77loopin/ray
|
9322f6aab53f4ca5baf5a3573e1ffde12feae519
|
[
"Apache-2.0"
] | 21,382
|
2016-09-26T23:12:52.000Z
|
2022-03-31T21:47:45.000Z
|
release/alert.py
|
77loopin/ray
|
9322f6aab53f4ca5baf5a3573e1ffde12feae519
|
[
"Apache-2.0"
] | 19,689
|
2016-09-17T08:21:25.000Z
|
2022-03-31T23:59:30.000Z
|
release/alert.py
|
gramhagen/ray
|
c18caa4db36d466718bdbcb2229aa0b2dc03da1f
|
[
"Apache-2.0"
] | 4,114
|
2016-09-23T18:54:01.000Z
|
2022-03-31T15:07:32.000Z
|
import argparse
from collections import defaultdict, Counter
from typing import Any, List, Tuple, Mapping, Optional
import datetime
import hashlib
import json
import logging
import os
import requests
import sys
import boto3
from e2e import GLOBAL_CONFIG
from alerts.default import handle_result as default_handle_result
from alerts.rllib_tests import handle_result as rllib_tests_handle_result
from alerts.long_running_tests import handle_result as \
long_running_tests_handle_result
from alerts.tune_tests import handle_result as tune_tests_handle_result
from alerts.xgboost_tests import handle_result as xgboost_tests_handle_result
SUITE_TO_FN = {
"long_running_tests": long_running_tests_handle_result,
"rllib_tests": rllib_tests_handle_result,
"tune_tests": tune_tests_handle_result,
"xgboost_tests": xgboost_tests_handle_result,
}
GLOBAL_CONFIG["RELEASE_AWS_DB_STATE_TABLE"] = "alert_state"
GLOBAL_CONFIG["SLACK_WEBHOOK"] = os.environ.get("SLACK_WEBHOOK", "")
GLOBAL_CONFIG["SLACK_CHANNEL"] = os.environ.get("SLACK_CHANNEL",
"#oss-test-cop")
RESULTS_LIMIT = 120
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter(fmt="[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
def maybe_fetch_slack_webhook():
if GLOBAL_CONFIG["SLACK_WEBHOOK"] in [None, ""]:
print("Missing SLACK_WEBHOOK, retrieving from AWS secrets store")
GLOBAL_CONFIG["SLACK_WEBHOOK"] = boto3.client(
"secretsmanager", region_name="us-west-2"
).get_secret_value(
SecretId="arn:aws:secretsmanager:us-west-2:029272617770:secret:"
"release-automation/"
"slack-webhook-Na0CFP")["SecretString"]
def _obj_hash(obj: Any) -> str:
json_str = json.dumps(obj, sort_keys=True, ensure_ascii=True)
sha = hashlib.sha256()
sha.update(json_str.encode())
return sha.hexdigest()
def fetch_latest_alerts(rds_data_client):
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_STATE_TABLE"]
sql = (f"""
SELECT DISTINCT ON (category, test_suite, test_name)
category, test_suite, test_name, last_result_hash,
last_notification_dt
FROM {schema}
ORDER BY category, test_suite, test_name, last_notification_dt DESC
LIMIT {RESULTS_LIMIT}
""")
result = rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql,
)
for row in result["records"]:
category, test_suite, test_name, last_result_hash, \
last_notification_dt = (
r["stringValue"]
if "stringValue" in r else None
for r in row
)
last_notification_dt = datetime.datetime.strptime(
last_notification_dt, "%Y-%m-%d %H:%M:%S")
yield category, test_suite, test_name, last_result_hash, \
last_notification_dt
def fetch_latest_results(rds_data_client,
fetch_since: Optional[datetime.datetime] = None):
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_TABLE"]
sql = (f"""
SELECT DISTINCT ON (category, test_suite, test_name)
created_on, category, test_suite, test_name, status, results,
artifacts, last_logs
FROM {schema} """)
parameters = []
if fetch_since is not None:
sql += "WHERE created_on >= :created_on "
parameters = [
{
"name": "created_on",
"typeHint": "TIMESTAMP",
"value": {
"stringValue": fetch_since.strftime("%Y-%m-%d %H:%M:%S")
},
},
]
sql += "ORDER BY category, test_suite, test_name, created_on DESC "
sql += f"LIMIT {RESULTS_LIMIT}"
result = rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql,
parameters=parameters,
)
for row in result["records"]:
created_on, category, test_suite, test_name, status, results, \
artifacts, last_logs = (
r["stringValue"] if "stringValue" in r else None for r in row)
# Calculate hash before converting strings to objects
result_obj = (created_on, category, test_suite, test_name, status,
results, artifacts, last_logs)
result_json = json.dumps(result_obj)
result_hash = _obj_hash(result_json)
# Convert some strings to python objects
created_on = datetime.datetime.strptime(created_on,
"%Y-%m-%d %H:%M:%S")
results = json.loads(results)
artifacts = json.loads(artifacts)
yield result_hash, created_on, category, test_suite, test_name, \
status, results, artifacts, last_logs
def mark_as_handled(rds_data_client, update: bool, category: str,
test_suite: str, test_name: str, result_hash: str,
last_notification_dt: datetime.datetime):
schema = GLOBAL_CONFIG["RELEASE_AWS_DB_STATE_TABLE"]
if not update:
sql = (f"""
INSERT INTO {schema}
(category, test_suite, test_name,
last_result_hash, last_notification_dt)
VALUES (:category, :test_suite, :test_name,
:last_result_hash, :last_notification_dt)
""")
else:
sql = (f"""
UPDATE {schema}
SET last_result_hash=:last_result_hash,
last_notification_dt=:last_notification_dt
WHERE category=:category AND test_suite=:test_suite
AND test_name=:test_name
""")
rds_data_client.execute_statement(
database=GLOBAL_CONFIG["RELEASE_AWS_DB_NAME"],
parameters=[
{
"name": "category",
"value": {
"stringValue": category
}
},
{
"name": "test_suite",
"value": {
"stringValue": test_suite or ""
}
},
{
"name": "test_name",
"value": {
"stringValue": test_name
}
},
{
"name": "last_result_hash",
"value": {
"stringValue": result_hash
}
},
{
"name": "last_notification_dt",
"typeHint": "TIMESTAMP",
"value": {
"stringValue": last_notification_dt.strftime(
"%Y-%m-%d %H:%M:%S")
},
},
],
secretArn=GLOBAL_CONFIG["RELEASE_AWS_DB_SECRET_ARN"],
resourceArn=GLOBAL_CONFIG["RELEASE_AWS_DB_RESOURCE_ARN"],
schema=schema,
sql=sql,
)
def post_alerts_to_slack(channel: str, alerts: List[Tuple[str, str, str, str]],
non_alerts: Mapping[str, int]):
if len(alerts) == 0:
logger.info("No alerts to post to slack.")
return
markdown_lines = [
f"* {len(alerts)} new release test failures found!*",
"",
]
category_alerts = defaultdict(list)
for (category, test_suite, test_name, alert) in alerts:
category_alerts[category].append(
f" *{test_suite}/{test_name}* failed: {alert}")
for category, alert_list in category_alerts.items():
markdown_lines.append(f"Branch: *{category}*")
markdown_lines.extend(alert_list)
markdown_lines.append("")
total_non_alerts = sum(n for n in non_alerts.values())
non_alert_detail = [f"{n} on {c}" for c, n in non_alerts.items()]
markdown_lines += [
f"Additionally, {total_non_alerts} tests passed successfully "
f"({', '.join(non_alert_detail)})."
]
slack_url = GLOBAL_CONFIG["SLACK_WEBHOOK"]
resp = requests.post(
slack_url,
json={
"text": "\n".join(markdown_lines),
"channel": channel,
"username": "Fail Bot",
"icon_emoji": ":red_circle:",
},
)
print(resp.status_code)
print(resp.text)
def post_statistics_to_slack(channel: str,
alerts: List[Tuple[str, str, str, str]],
non_alerts: Mapping[str, int]):
total_alerts = len(alerts)
category_alerts = defaultdict(list)
for (category, test_suite, test_name, alert) in alerts:
category_alerts[category].append(f"`{test_suite}/{test_name}`")
alert_detail = [f"{len(a)} on {c}" for c, a in category_alerts.items()]
total_non_alerts = sum(n for n in non_alerts.values())
non_alert_detail = [f"{n} on {c}" for c, n in non_alerts.items()]
markdown_lines = [
"*Periodic release test report*", "", f"In the past 24 hours, "
f"*{total_non_alerts}* release tests finished successfully, and "
f"*{total_alerts}* release tests failed."
]
markdown_lines.append("")
if total_alerts:
markdown_lines.append(f"*Failing:* {', '.join(alert_detail)}")
for c, a in category_alerts.items():
markdown_lines.append(f" *{c}*: {', '.join(sorted(a))}")
else:
markdown_lines.append("*Failing:* None")
markdown_lines.append("")
if total_non_alerts:
markdown_lines.append(f"*Passing:* {', '.join(non_alert_detail)}")
else:
markdown_lines.append("*Passing:* None")
slack_url = GLOBAL_CONFIG["SLACK_WEBHOOK"]
resp = requests.post(
slack_url,
json={
"text": "\n".join(markdown_lines),
"channel": channel,
"username": "Fail Bot",
"icon_emoji": ":red_circle:",
},
)
print(resp.status_code)
print(resp.text)
def handle_results_and_get_alerts(
rds_data_client,
fetch_since: Optional[datetime.datetime] = None,
always_try_alert: bool = False,
no_status_update: bool = False):
# First build a map of last notifications
last_notifications_map = {}
for category, test_suite, test_name, last_result_hash, \
last_notification_dt in fetch_latest_alerts(rds_data_client):
last_notifications_map[(category, test_suite,
test_name)] = (last_result_hash,
last_notification_dt)
alerts = []
non_alerts = Counter()
# Then fetch latest results
for result_hash, created_on, category, test_suite, test_name, status, \
results, artifacts, last_logs in fetch_latest_results(
rds_data_client, fetch_since=fetch_since):
key = (category, test_suite, test_name)
try_alert = always_try_alert
if key in last_notifications_map:
# If we have an alert for this key, fetch info
last_result_hash, last_notification_dt = last_notifications_map[
key]
if last_result_hash != result_hash:
# If we got a new result, handle new result
try_alert = True
# Todo: maybe alert again after some time?
else:
try_alert = True
if try_alert:
handle_fn = SUITE_TO_FN.get(test_suite, None)
if not handle_fn:
logger.warning(f"No handle for suite {test_suite}")
alert = default_handle_result(created_on, category, test_suite,
test_name, status, results,
artifacts, last_logs)
else:
alert = handle_fn(created_on, category, test_suite, test_name,
status, results, artifacts, last_logs)
if alert:
logger.warning(
f"Alert raised for test {test_suite}/{test_name} "
f"({category}): {alert}")
alerts.append((category, test_suite, test_name, alert))
else:
logger.debug(
f"No alert raised for test {test_suite}/{test_name} "
f"({category})")
non_alerts[category] += 1
if not no_status_update:
mark_as_handled(rds_data_client, key in last_notifications_map,
category, test_suite, test_name, result_hash,
datetime.datetime.now())
return alerts, non_alerts
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--stats",
action="store_true",
default=False,
help="Finish quickly for training.")
args = parser.parse_args()
maybe_fetch_slack_webhook()
rds_data_client = boto3.client("rds-data", region_name="us-west-2")
if args.stats:
# Only update last 24 hour stats
fetch_since = datetime.datetime.now() - datetime.timedelta(days=1)
alerts, non_alerts = handle_results_and_get_alerts(
rds_data_client,
fetch_since=fetch_since,
always_try_alert=True,
no_status_update=True)
post_statistics_to_slack(GLOBAL_CONFIG["SLACK_CHANNEL"], alerts,
non_alerts)
else:
alerts, non_alerts = handle_results_and_get_alerts(rds_data_client)
post_alerts_to_slack(GLOBAL_CONFIG["SLACK_CHANNEL"], alerts,
non_alerts)
| 34.724138
| 79
| 0.589658
| 0
| 0
| 3,227
| 0.228898
| 0
| 0
| 0
| 0
| 3,798
| 0.2694
|
4918f13347223ad7457de28e1dde690394ca0299
| 2,176
|
py
|
Python
|
bios-token.py
|
emahear/openusm
|
96bb62b91f4b5520e14d86ae86e1b320404134e6
|
[
"MIT"
] | 4
|
2019-08-04T05:50:46.000Z
|
2020-04-16T19:24:11.000Z
|
bios-token.py
|
emahear/openusm
|
96bb62b91f4b5520e14d86ae86e1b320404134e6
|
[
"MIT"
] | null | null | null |
bios-token.py
|
emahear/openusm
|
96bb62b91f4b5520e14d86ae86e1b320404134e6
|
[
"MIT"
] | 6
|
2019-08-03T12:57:47.000Z
|
2020-06-08T01:50:43.000Z
|
import os
import argparse
def _create_parser():
parser = argparse.ArgumentParser(description='Welcome to Universal Systems Manager'
'Bios Token Change')
parser.add_argument('--verbose',
help='Turn on verbose logging',
action='store_true')
parser.add_argument('-i', '--idrac',
help='iDRAC IP of the Host'
)
parser.add_argument('-n', '--nfs',
help='NFS server IP address',
default=None)
parser.add_argument('-s', '--share',
help='NFS Share folder'
)
parser.add_argument('-c', '--config',
help='XML File to be imported'
)
parser.add_argument('-f', '--ips',
help='IP files to be updated'
)
return parser
def main():
parser = _create_parser()
args = parser.parse_args()
nfs_server = args.nfs
idrac = args.idrac
nfs_share = args.share
config = args.config
os.system("docker build -t ajeetraina/usm_redfish . ")
if(args.ips):
ip_file = args.ips
ips_file = open(ip_file)
ips = ips_file.readlines()
for ip in ips:
print ("Iteration %s"%ip)
ip = ip.strip()
command = "docker run --rm --log-driver=syslog --log-opt syslog-address=tcp://0.0.0.0:5000 --log-opt syslog-facility=daemon -itd --name=%s_server -e IDRAC_IP=%s -e NFS_SERVER=%s -e NFS_SERVER_SHARE=%s -e CONFIG_FILE=%s ajeetraina/usm_redfish python import_scp.py &"%(ip,ip,nfs_server,nfs_share,config)
print command
os.system(command)
if (args.idrac):
os.system(
"docker run --rm --log-driver=syslog --log-opt syslog-address=tcp://0.0.0.0:5000 --log-opt syslog-facility=daemon -itd --name=%s_server -e IDRAC_IP=%s -e NFS_SERVER=%s -e NFS_SERVER_SHARE=%s -e CONFIG_FILE=%s ajeetraina/usm_redfish python import_scp.py &" % (
idrac,idrac, nfs_server, nfs_share, config))
if __name__ == '__main__':
main()
| 32.477612
| 306
| 0.552849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 857
| 0.393842
|
491929694fdf621f13fc8f553f27ac207d5a59c5
| 333
|
py
|
Python
|
Examples/user_data/sharptrack.py
|
FedeClaudi/brainrender
|
b1d8adcef52615fcd86a083be4dc48c68a8b0bb9
|
[
"MIT"
] | null | null | null |
Examples/user_data/sharptrack.py
|
FedeClaudi/brainrender
|
b1d8adcef52615fcd86a083be4dc48c68a8b0bb9
|
[
"MIT"
] | null | null | null |
Examples/user_data/sharptrack.py
|
FedeClaudi/brainrender
|
b1d8adcef52615fcd86a083be4dc48c68a8b0bb9
|
[
"MIT"
] | null | null | null |
import brainrender
brainrender.SHADER_STYLE = 'cartoon'
from brainrender.scene import Scene
sharptrack_file = 'Examples/example_files/sharptrack_probe_points.mat'
scene = Scene(use_default_key_bindings=True)
scene.add_brain_regions('TH', alpha=.2, wireframe=True)
scene.add_probe_from_sharptrack(sharptrack_file)
scene.render()
| 23.785714
| 70
| 0.831832
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 65
| 0.195195
|
4919e36bd5888cd73891195c85651a0ab2da447b
| 7,910
|
py
|
Python
|
homeassistant/components/metoffice/sensor.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 7
|
2019-08-15T13:36:58.000Z
|
2020-03-18T10:46:29.000Z
|
homeassistant/components/metoffice/sensor.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 87
|
2020-07-06T22:22:54.000Z
|
2022-03-31T06:01:46.000Z
|
homeassistant/components/metoffice/sensor.py
|
winning1120xx/home-assistant
|
53d4c0ce2d374b5e97bbdc37742656c27adf8eea
|
[
"Apache-2.0"
] | 11
|
2020-12-16T13:48:14.000Z
|
2022-02-01T00:28:05.000Z
|
"""Support for UK Met Office weather service."""
from __future__ import annotations
from homeassistant.components.sensor import SensorEntity, SensorEntityDescription
from homeassistant.const import (
ATTR_ATTRIBUTION,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
LENGTH_KILOMETERS,
PERCENTAGE,
SPEED_MILES_PER_HOUR,
TEMP_CELSIUS,
UV_INDEX,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.typing import ConfigType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
ATTRIBUTION,
CONDITION_CLASSES,
DOMAIN,
METOFFICE_COORDINATES,
METOFFICE_DAILY_COORDINATOR,
METOFFICE_HOURLY_COORDINATOR,
METOFFICE_NAME,
MODE_3HOURLY_LABEL,
MODE_DAILY,
MODE_DAILY_LABEL,
VISIBILITY_CLASSES,
VISIBILITY_DISTANCE_CLASSES,
)
ATTR_LAST_UPDATE = "last_update"
ATTR_SENSOR_ID = "sensor_id"
ATTR_SITE_ID = "site_id"
ATTR_SITE_NAME = "site_name"
SENSOR_TYPES: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
key="name",
name="Station Name",
device_class=None,
native_unit_of_measurement=None,
icon="mdi:label-outline",
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="weather",
name="Weather",
device_class=None,
native_unit_of_measurement=None,
icon="mdi:weather-sunny", # but will adapt to current conditions
entity_registry_enabled_default=True,
),
SensorEntityDescription(
key="temperature",
name="Temperature",
device_class=DEVICE_CLASS_TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
icon=None,
entity_registry_enabled_default=True,
),
SensorEntityDescription(
key="feels_like_temperature",
name="Feels Like Temperature",
device_class=DEVICE_CLASS_TEMPERATURE,
native_unit_of_measurement=TEMP_CELSIUS,
icon=None,
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="wind_speed",
name="Wind Speed",
device_class=None,
native_unit_of_measurement=SPEED_MILES_PER_HOUR,
icon="mdi:weather-windy",
entity_registry_enabled_default=True,
),
SensorEntityDescription(
key="wind_direction",
name="Wind Direction",
device_class=None,
native_unit_of_measurement=None,
icon="mdi:compass-outline",
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="wind_gust",
name="Wind Gust",
device_class=None,
native_unit_of_measurement=SPEED_MILES_PER_HOUR,
icon="mdi:weather-windy",
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="visibility",
name="Visibility",
device_class=None,
native_unit_of_measurement=None,
icon="mdi:eye",
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="visibility_distance",
name="Visibility Distance",
device_class=None,
native_unit_of_measurement=LENGTH_KILOMETERS,
icon="mdi:eye",
entity_registry_enabled_default=False,
),
SensorEntityDescription(
key="uv",
name="UV Index",
device_class=None,
native_unit_of_measurement=UV_INDEX,
icon="mdi:weather-sunny-alert",
entity_registry_enabled_default=True,
),
SensorEntityDescription(
key="precipitation",
name="Probability of Precipitation",
device_class=None,
native_unit_of_measurement=PERCENTAGE,
icon="mdi:weather-rainy",
entity_registry_enabled_default=True,
),
SensorEntityDescription(
key="humidity",
name="Humidity",
device_class=DEVICE_CLASS_HUMIDITY,
native_unit_of_measurement=PERCENTAGE,
icon=None,
entity_registry_enabled_default=False,
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigType, async_add_entities
) -> None:
"""Set up the Met Office weather sensor platform."""
hass_data = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
[
MetOfficeCurrentSensor(
hass_data[METOFFICE_HOURLY_COORDINATOR],
hass_data,
True,
description,
)
for description in SENSOR_TYPES
]
+ [
MetOfficeCurrentSensor(
hass_data[METOFFICE_DAILY_COORDINATOR],
hass_data,
False,
description,
)
for description in SENSOR_TYPES
],
False,
)
class MetOfficeCurrentSensor(CoordinatorEntity, SensorEntity):
"""Implementation of a Met Office current weather condition sensor."""
def __init__(
self,
coordinator,
hass_data,
use_3hourly,
description: SensorEntityDescription,
):
"""Initialize the sensor."""
super().__init__(coordinator)
self.entity_description = description
mode_label = MODE_3HOURLY_LABEL if use_3hourly else MODE_DAILY_LABEL
self._attr_name = f"{hass_data[METOFFICE_NAME]} {description.name} {mode_label}"
self._attr_unique_id = f"{description.name}_{hass_data[METOFFICE_COORDINATES]}"
if not use_3hourly:
self._attr_unique_id = f"{self._attr_unique_id}_{MODE_DAILY}"
self.use_3hourly = use_3hourly
@property
def native_value(self):
"""Return the state of the sensor."""
value = None
if self.entity_description.key == "visibility_distance" and hasattr(
self.coordinator.data.now, "visibility"
):
value = VISIBILITY_DISTANCE_CLASSES.get(
self.coordinator.data.now.visibility.value
)
if self.entity_description.key == "visibility" and hasattr(
self.coordinator.data.now, "visibility"
):
value = VISIBILITY_CLASSES.get(self.coordinator.data.now.visibility.value)
elif self.entity_description.key == "weather" and hasattr(
self.coordinator.data.now, self.entity_description.key
):
value = [
k
for k, v in CONDITION_CLASSES.items()
if self.coordinator.data.now.weather.value in v
][0]
elif hasattr(self.coordinator.data.now, self.entity_description.key):
value = getattr(self.coordinator.data.now, self.entity_description.key)
if hasattr(value, "value"):
value = value.value
return value
@property
def icon(self):
"""Return the icon for the entity card."""
value = self.entity_description.icon
if self.entity_description.key == "weather":
value = self.state
if value is None:
value = "sunny"
elif value == "partlycloudy":
value = "partly-cloudy"
value = f"mdi:weather-{value}"
return value
@property
def extra_state_attributes(self):
"""Return the state attributes of the device."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_LAST_UPDATE: self.coordinator.data.now.date,
ATTR_SENSOR_ID: self.entity_description.key,
ATTR_SITE_ID: self.coordinator.data.site.id,
ATTR_SITE_NAME: self.coordinator.data.site.name,
}
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return (
self.entity_description.entity_registry_enabled_default and self.use_3hourly
)
| 30.898438
| 93
| 0.643236
| 3,102
| 0.392162
| 0
| 0
| 2,276
| 0.287737
| 756
| 0.095575
| 1,282
| 0.162073
|
491bbb7a18db6baa7c684edc4d966b84daa2ba53
| 3,513
|
py
|
Python
|
examples/advanced.py
|
ajrichardson/formlayout
|
4b267ad29cbbdab9baf1bef3fcc48f23c699eebc
|
[
"MIT"
] | null | null | null |
examples/advanced.py
|
ajrichardson/formlayout
|
4b267ad29cbbdab9baf1bef3fcc48f23c699eebc
|
[
"MIT"
] | null | null | null |
examples/advanced.py
|
ajrichardson/formlayout
|
4b267ad29cbbdab9baf1bef3fcc48f23c699eebc
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright © 2009 Pierre Raybaut
# Licensed under the terms of the MIT License
# (see formlayout.py for details)
"""
Simple formlayout example
Please take a look at formlayout.py for more examples
(at the end of the script, after the 'if __name__ == "__main__":' line)
"""
import datetime
# for normal usage
from formlayout import fedit
# for programming usage
from formlayout import QLineEdit
def create_datalist_example():
test = [('str *', 'this is a string'),
('str_m *', """this is a
MULTILINE
string"""),
('file *', 'file'),
('list *', [0, '1', '3', '4']),
('tuple *', (0, '1', '3', '4')),
('list2', ['--', ('none', 'None'), ('--', 'Dashed'),
('-.', 'DashDot'), ('-', 'Solid'),
('steps', 'Steps'), (':', 'Dotted')]),
('float', 1.2),
(None, [('fi&rst', first_function), ('s&econd', second_function)]),
(None, 'Other:'),
('slider to 30 at 20 with ticks', 'slider:30:@20'),
('slider from -100 to 50 at -10', 'slider:-100:50@-10'),
('int', 12),
('font', ('Arial', 10, False, True)),
('color', '#123409'),
('bool', True),
('date', datetime.date(2010, 10, 10)),
('time', datetime.time(12, 30)),
('datetime', datetime.datetime(2010, 10, 10)),
]
return test
def create_datagroup_example():
datalist = create_datalist_example()
return ((datalist, "Category 1", "Category 1 comment"),
(datalist, "Category 2", "Category 2 comment"),
(datalist, "Category 3", "Category 3 comment"))
def apply_function(result, widgets):
print('result:', result)
print('widgets:', widgets)
for widget in widgets:
if isinstance(widget, QLineEdit) and not widget.validator():
widget.setText(widget.text() + ' Apply !')
def first_function(result, widgets):
print('first')
print('result:', result)
print('widgets:', widgets)
for widget in widgets:
if isinstance(widget, QLineEdit) and not widget.validator():
widget.setText(widget.text() + ' First !')
def second_function(result, widgets):
print('second')
print('result:', result)
print('widgets:', widgets)
for widget in widgets:
if isinstance(widget, QLineEdit) and not widget.validator():
widget.setText(widget.text() + ' Second !')
#--------- datalist example
datalist = create_datalist_example()
print("result:", fedit(datalist, title="Example",
comment="This is just an <b>example</b>.",
apply=('Custom &Apply button', apply_function),
ok='Custom &OK button',
cancel='Custom &Cancel button',
result='dict',
type='questions',
scrollbar=True))
#--------- datagroup example
datagroup = create_datagroup_example()
print("result:", fedit(datagroup, "Global title", result='JSON'))
#--------- datagroup inside a datagroup example
datalist = create_datalist_example()
datagroup = create_datagroup_example()
print("result:", fedit(((datagroup, "Title 1", "Tab 1 comment"),
(datalist, "Title 2", "Tab 2 comment"),
(datalist, "Title 3", "Tab 3 comment")),
"Global title", result='XML'))
| 36.216495
| 79
| 0.542841
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,315
| 0.374217
|
491c2d65ca77e28affc08be455a8dcb0f85ffc8e
| 1,527
|
py
|
Python
|
PyBank/main.py
|
jackaloppy/python-challenge
|
71690ca4059fee9c31334347275866431f5d9155
|
[
"RSA-MD"
] | null | null | null |
PyBank/main.py
|
jackaloppy/python-challenge
|
71690ca4059fee9c31334347275866431f5d9155
|
[
"RSA-MD"
] | null | null | null |
PyBank/main.py
|
jackaloppy/python-challenge
|
71690ca4059fee9c31334347275866431f5d9155
|
[
"RSA-MD"
] | null | null | null |
# Import Modules
import os
import csv
# Set the path
filepath = os.path.join("Resources","budget_data.csv")
# Open the CSV file
with open(filepath) as csvfile:
csvreader = csv.reader(csvfile, delimiter=",")
# Skip the header row
next(csvreader)
# Set up some numbers
month = 0
total = 0
maxpro = 0
minpro = 0
# .reader can only iterate the file once. So you need to get ouput from one single loop.
for row in csvreader:
month += 1
total += int(row[1])
if maxpro < int(row[1]):
maxpro = int(row[1])
maxmon = row[0]
if minpro > int(row[1]):
minpro = int(row[1])
minmon = row[0]
# Direct print to txt file
f = open("analysis/output.txt", "a")
print("Financial Analysis", file =f)
print("----------------------------", file = f)
print("Total Months: " + str(month), file = f)
print("Total: $" + str(total), file=f)
print("Average Change: $" + str(total/month), file = f)
print("Greatest Increase in Profits: " + maxmon + " ($" + str(maxpro) +")", file =f)
print("Greatest Decrease in Profits: " + minmon + " ($" + str(minpro) +")", file =f)
f.close()
# Print out to terminal
print("Financial Analysis")
print("----------------------------")
print("Total Months: " + str(month))
print("Total: $" + str(total))
print("Average Change: $" + str(total/month))
print("Greatest Increase in Profits: " + maxmon + " ($" + str(maxpro) +")")
print("Greatest Decrease in Profits: " + minmon + " ($" + str(minpro) +")")
| 33.933333
| 92
| 0.579568
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 633
| 0.414538
|
491cf38094ed0cb56e1412d6daa74c8867a4538f
| 4,103
|
py
|
Python
|
odoo-13.0/addons/web/models/ir_http.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | 12
|
2021-03-26T08:39:40.000Z
|
2022-03-16T02:20:10.000Z
|
odoo-13.0/addons/web/models/ir_http.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | 13
|
2020-12-20T16:00:21.000Z
|
2022-03-14T14:55:30.000Z
|
odoo-13.0/addons/web/models/ir_http.py
|
VaibhavBhujade/Blockchain-ERP-interoperability
|
b5190a037fb6615386f7cbad024d51b0abd4ba03
|
[
"MIT"
] | 17
|
2020-08-31T11:18:49.000Z
|
2022-02-09T05:57:31.000Z
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import hashlib
import json
from odoo import api, models
from odoo.http import request
from odoo.tools import ustr
from odoo.addons.web.controllers.main import module_boot, HomeStaticTemplateHelpers
import odoo
class Http(models.AbstractModel):
_inherit = 'ir.http'
def webclient_rendering_context(self):
return {
'menu_data': request.env['ir.ui.menu'].load_menus(request.session.debug),
'session_info': self.session_info(),
}
def session_info(self):
user = request.env.user
version_info = odoo.service.common.exp_version()
user_context = request.session.get_context() if request.session.uid else {}
session_info = {
"uid": request.session.uid,
"is_system": user._is_system() if request.session.uid else False,
"is_admin": user._is_admin() if request.session.uid else False,
"user_context": request.session.get_context() if request.session.uid else {},
"db": request.session.db,
"server_version": version_info.get('server_version'),
"server_version_info": version_info.get('server_version_info'),
"name": user.name,
"username": user.login,
"partner_display_name": user.partner_id.display_name,
"company_id": user.company_id.id if request.session.uid else None, # YTI TODO: Remove this from the user context
"partner_id": user.partner_id.id if request.session.uid and user.partner_id else None,
"web.base.url": self.env['ir.config_parameter'].sudo().get_param('web.base.url', default=''),
}
if self.env.user.has_group('base.group_user'):
# the following is only useful in the context of a webclient bootstrapping
# but is still included in some other calls (e.g. '/web/session/authenticate')
# to avoid access errors and unnecessary information, it is only included for users
# with access to the backend ('internal'-type users)
mods = module_boot()
qweb_checksum = HomeStaticTemplateHelpers.get_qweb_templates_checksum(addons=mods, debug=request.session.debug)
lang = user_context.get("lang")
translation_hash = request.env['ir.translation'].get_web_translations_hash(mods, lang)
menu_json_utf8 = json.dumps(request.env['ir.ui.menu'].load_menus(request.session.debug), default=ustr, sort_keys=True).encode()
cache_hashes = {
"load_menus": hashlib.sha1(menu_json_utf8).hexdigest(),
"qweb": qweb_checksum,
"translations": translation_hash,
}
session_info.update({
# current_company should be default_company
"user_companies": {'current_company': (user.company_id.id, user.company_id.name), 'allowed_companies': [(comp.id, comp.name) for comp in user.company_ids]},
"currencies": self.get_currencies(),
"show_effect": True,
"display_switch_company_menu": user.has_group('base.group_multi_company') and len(user.company_ids) > 1,
"cache_hashes": cache_hashes,
})
return session_info
@api.model
def get_frontend_session_info(self):
return {
'is_admin': request.session.uid and self.env.user._is_admin() or False,
'is_system': request.session.uid and self.env.user._is_system() or False,
'is_website_user': request.session.uid and self.env.user._is_public() or False,
'user_id': request.session.uid and self.env.user.id or False,
'is_frontend': True,
}
def get_currencies(self):
Currency = request.env['res.currency']
currencies = Currency.search([]).read(['symbol', 'position', 'decimal_places'])
return {c['id']: {'symbol': c['symbol'], 'position': c['position'], 'digits': [69,c['decimal_places']]} for c in currencies}
| 48.845238
| 172
| 0.6427
| 3,788
| 0.923227
| 0
| 0
| 447
| 0.108945
| 0
| 0
| 1,150
| 0.280283
|
491db1b8d8dc21a65a486fcf67ecab8e646adeff
| 1,500
|
py
|
Python
|
anonymize-attributes.py
|
thormeier-fhnw-repos/sna-holaspirit-to-gephi
|
e83d44e887608d4d584ded825be9cd950d87e590
|
[
"MIT"
] | 2
|
2018-11-12T22:10:54.000Z
|
2021-08-30T10:13:53.000Z
|
anonymize-attributes.py
|
thormeier-fhnw-repos/sna-holaspirit-to-gephi
|
e83d44e887608d4d584ded825be9cd950d87e590
|
[
"MIT"
] | null | null | null |
anonymize-attributes.py
|
thormeier-fhnw-repos/sna-holaspirit-to-gephi
|
e83d44e887608d4d584ded825be9cd950d87e590
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
import argparse
sys.path.append('./')
from src.utils.list_to_dict import list_to_dict
from src.utils.read_csv import read_csv
from src.utils.map_to_list_csv import map_to_list_csv
from src.gephi.write_csv import write_csv
print("")
print("-----------------------------")
print("Anonymize attributes")
print("-----------------------------")
print("")
parser = argparse.ArgumentParser(description="Anonymizes a given attributes CSV")
required_parser = parser.add_argument_group('required named arguments')
required_parser.add_argument("--attributes-file", dest="attrs", help="Attributes, a given file with attributes for Gephi", required=True)
required_parser.add_argument("--person-file", dest="persons", help="Personss, a list of persons and their anonymized tokens", required=True)
args = parser.parse_args()
attributes_file = args.attrs
persons_file = args.persons
print("Reading attributes file...")
attributes_raw = read_csv(attributes_file)
attributes = list_to_dict(attributes_raw[1:])
print("Reading persons file...")
persons = list_to_dict(read_csv(persons_file)[1:])
print("Anonymizing...")
anonymized_attributes = list()
for key, value in attributes.items():
name = persons[key][0]
row = value
row.insert(0, name)
anonymized_attributes.append(row)
print("Write anonymized attributes to attributes file again")
anonymized_attributes.insert(0, attributes_raw[0])
write_csv(anonymized_attributes, attributes_file)
print("All done!")
| 28.846154
| 140
| 0.744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 463
| 0.308667
|
491f6d6d11d857e14ccaa0306230176130993c29
| 5,487
|
py
|
Python
|
custom_laboratory/custom_laboratory/doctype/vaccination/vaccination.py
|
panhavad/custom_laboratory
|
a86d24bd955dc078ded044e714955cdf0c257176
|
[
"MIT"
] | null | null | null |
custom_laboratory/custom_laboratory/doctype/vaccination/vaccination.py
|
panhavad/custom_laboratory
|
a86d24bd955dc078ded044e714955cdf0c257176
|
[
"MIT"
] | 1
|
2021-01-12T08:27:54.000Z
|
2021-01-12T08:27:54.000Z
|
custom_laboratory/custom_laboratory/doctype/vaccination/vaccination.py
|
panhavad/custom_laboratory
|
a86d24bd955dc078ded044e714955cdf0c257176
|
[
"MIT"
] | 1
|
2021-01-12T08:34:12.000Z
|
2021-01-12T08:34:12.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Duk Panhavad and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import getdate, cstr
from datetime import timedelta
class Vaccination(Document):
def on_submit(self):
frappe.db.set_value(self.doctype,self.name,"submitted_date", getdate())
insert_vaccination_to_medical_record(self)
frappe.db.set_value("Vaccination", self.name, "status", "Completed")
def on_cancel(self):
delete_vaccination_from_medical_record(self)
frappe.db.set_value("Vaccination", self.name, "status", "Cancelled")
self.reload()
@frappe.whitelist()
def create_multiple(doctype, docname):
vaccination_created = False
if doctype == "Sales Invoice":
vaccination_created = create_vaccination_from_invoice(docname)
if vaccination_created:
frappe.msgprint(_("Vaccination(s) {0} created".format(vaccination_created)))
else:
frappe.msgprint(_("No Vaccinations created"))
def create_vaccination_from_invoice(invoice_name):
vaccination_names = list()
vaccinations_created = False
invoice = frappe.get_doc("Sales Invoice", invoice_name)
if invoice.patient:
patient = frappe.get_doc("Patient", invoice.patient)
for item in invoice.items:
vaccination_created = 0
print('-------', item.item_group)
print('=======', item.reference_dt)
if item.reference_dt == "Vaccination Ordered": #check if the invoice created already
vaccination_created = 1
vaccinations_created = "Already create before!! Cannot"
if vaccination_created != 1:
if item.item_group == "Vaccination":
template = get_vaccination_template(item.item_code)
if template:
if template.vaccination_dosage_items:
dosage_durations = [0]#today
for num_day in template.vaccination_dosage_items.split('-'):
dosage_durations.append(int(num_day))
dosage_dates = [getdate() + timedelta(days=each_duration) for each_duration in dosage_durations]
for dosage_number, each_dosage_date in enumerate(dosage_dates):
vaccination = create_vaccination_doc(True, patient, template, invoice.company, each_dosage_date, dosage_number+1)
vaccination.save(ignore_permissions = True)
vaccinations_created = True
vaccination_names.append(vaccination.name)
if not vaccinations_created:
vaccinations_created = vaccination.name
else:
vaccinations_created = ", ".join(vaccination_names)
else:
vaccination = create_vaccination_doc(True, patient, template, invoice.company, getdate())
vaccination.save(ignore_permissions = True)
vaccinations_created = vaccination.name
if item.reference_dt != "Vaccination Ordered":
frappe.db.set_value("Sales Invoice Item", item.name, "reference_dt", "Vaccination Ordered")
frappe.db.set_value("Sales Invoice Item", item.name, "reference_dn", vaccination.name)
return vaccinations_created
def get_vaccination_template(item):
template_id = check_template_exists(item)
if template_id:
return frappe.get_doc("Vaccination Template", template_id)
return False
def check_template_exists(item):
template_exists = frappe.db.exists(
"Vaccination Template",
{
'item': item
}
)
if template_exists:
return template_exists
return False
def create_vaccination_doc(invoiced, patient, template, company, each_dosage_date, dosage_number):
vaccination = frappe.new_doc("Vaccination")
vaccination.invoiced = invoiced
vaccination.patient = patient.name
vaccination.patient_age = patient.get_age()
vaccination.patient_sex = patient.sex
vaccination.email = patient.email
vaccination.mobile = patient.mobile
vaccination.report_preference = patient.report_preference
vaccination.vaccination_template = template.name
vaccination.vaccination_name = template.name
vaccination.dosage_date = each_dosage_date
vaccination.dosage_number = dosage_number
vaccination.company = company
return vaccination
def insert_vaccination_to_medical_record(doc):
if doc.vaccination_name:
vac_name = frappe.bold(_("Vaccination Conducted: ")) + cstr(doc.vaccination_name)
else:
vac_name = ""
if doc.dosage_number:
dos_number = frappe.bold(_("Dosage Number: ")) + cstr(doc.dosage_number)
else:
dos_number = ""
if doc.dosage_date:
planed_date = frappe.bold(_("Planed Dosage Date: ")) + cstr(doc.dosage_date)
else:
planed_date = ""
if doc.vaccination_comment:
comment = frappe.bold(_("Comment: ")) + cstr(doc.vaccination_comment)
else:
comment = ""
actual_date = frappe.bold(_("Actual Dosage Date: ")) + cstr(getdate())
subject = vac_name + "<br>" + dos_number + "<br>" + planed_date + "<br>" + actual_date + "<br>" + comment
medical_record = frappe.new_doc("Patient Medical Record")
medical_record.patient = doc.patient
medical_record.subject = subject
medical_record.status = "Open"
medical_record.communication_date = getdate()
medical_record.reference_doctype = "Vaccination"
medical_record.reference_name = doc.name
medical_record.reference_owner = doc.owner
medical_record.save(ignore_permissions=True)
def delete_vaccination_from_medical_record(self):
medical_record_id = frappe.db.sql("select name from `tabPatient Medical Record` where reference_name=%s",(self.name))
if medical_record_id and medical_record_id[0][0]:
frappe.delete_doc("Patient Medical Record", medical_record_id[0][0])
| 38.104167
| 121
| 0.755786
| 398
| 0.072535
| 0
| 0
| 344
| 0.062694
| 0
| 0
| 868
| 0.158192
|
491fd8107e4c9d56c5674e827165e680a0067c06
| 4,445
|
py
|
Python
|
FinalRound_ImprovedAccuracy_Functionality/training/utils/detectingCARColor.py
|
tejasmagia/DetectCarParkingSlot_Contest
|
e9e38f12347aa3cc40234efb000959df20d28f21
|
[
"MIT"
] | 9
|
2019-10-20T15:15:06.000Z
|
2020-09-07T09:44:37.000Z
|
FinalRound_ImprovedAccuracy_Functionality/training/utils/detectingCARColor.py
|
tejasmagia/DetectCarParkingSlot_Contest
|
e9e38f12347aa3cc40234efb000959df20d28f21
|
[
"MIT"
] | null | null | null |
FinalRound_ImprovedAccuracy_Functionality/training/utils/detectingCARColor.py
|
tejasmagia/DetectCarParkingSlot_Contest
|
e9e38f12347aa3cc40234efb000959df20d28f21
|
[
"MIT"
] | null | null | null |
from sklearn.cluster import KMeans
import cv2
import PIL
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from matplotlib import image as img1
import pandas as pd
from scipy.cluster.vq import whiten
import os
class DominantColors:
CLUSTERS = None
IMAGEPATH = None
IMAGE = None
COLORS = None
LABELS = None
BASEWIDTH = 256
def __init__(self, image, clusters=3):
self.CLUSTERS = clusters
self.IMAGEPATH = image
def dominantColors(self):
# read image
img = cv2.imread(self.IMAGEPATH)
# resize image
imgh, imgw, _ = img.shape
wpercent = (self.BASEWIDTH / float(imgw))
hsize = int((float(imgh) * float(wpercent)))
img = cv2.resize(img, (self.BASEWIDTH, hsize), PIL.Image.ANTIALIAS)
# convert to rgb from bgr
img = cv2.cvtColor(img, cv2.COLOR_RGB2Luv)
# reshaping to a list of pixels
img = img.reshape((img.shape[0] * img.shape[1], 3))
# save image after operations
self.IMAGE = img
# using k-means to cluster pixels
kmeans = KMeans(n_clusters=self.CLUSTERS)
kmeans.fit(img)
# the cluster centers are our dominant colors.
self.COLORS = kmeans.cluster_centers_
# save labels
self.LABELS = kmeans.labels_
# returning after converting to integer from float
return self.COLORS.astype(int)
def rgb_to_hex(self, rgb):
return '#%02x%02x%02x' % (int(rgb[0]), int(rgb[1]), int(rgb[2]))
def analyseRGB(self):
r = []
g = []
b = []
image = img1.imread(self.IMAGEPATH)
for line in image:
for pixel in line:
# print(pixel)
temp_r, temp_g, temp_b = pixel
r.append(temp_r)
g.append(temp_g)
b.append(temp_b)
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(r, g, b)
plt.show()
df = pd.DataFrame({'red': r, 'blue': b, 'green': g})
df['scaled_red'] = whiten(df['red'])
df['scaled_blue'] = whiten(df['blue'])
df['scaled_green'] = whiten(df['green'])
df.sample(n=10)
from scipy.cluster.vq import kmeans
cluster_centers, distortion = kmeans(df[['scaled_red', 'scaled_green', 'scaled_blue']], 2)
print(cluster_centers)
colors = []
r_std, g_std, b_std = df[['red', 'green', 'blue']].std()
for cluster_center in cluster_centers:
scaled_r, scaled_g, scaled_b = cluster_center
colors.append((scaled_r * r_std / 255, scaled_g * g_std / 255, scaled_b * b_std / 255))
plt.imshow([colors])
plt.show()
def plotClusters(self):
# plotting
fig = plt.figure()
ax = Axes3D(fig)
for label, pix in zip(self.LABELS, self.IMAGE):
ax.scatter(pix[0], pix[1], pix[2], color=self.rgb_to_hex(self.COLORS[label]))
plt.show()
def plotHistogram(self):
# labels form 0 to no. of clusters
numLabels = np.arange(0, self.CLUSTERS + 1)
# create frequency count tables
(hist, _) = np.histogram(self.LABELS, bins=numLabels)
hist = hist.astype("float")
hist /= hist.sum()
# appending frequencies to cluster centers
colors = self.COLORS
# descending order sorting as per frequency count
colors = colors[(-hist).argsort()]
hist = hist[(-hist).argsort()]
# creating empty chart
chart = np.zeros((50, 500, 3), np.uint8)
start = 0
# creating color rectangles
for i in range(self.CLUSTERS):
end = start + hist[i] * 500
# getting rgb values
r = colors[i][0]
g = colors[i][1]
b = colors[i][2]
# using cv2.rectangle to plot colors
cv2.rectangle(chart, (int(start), 0), (int(end), 50), (r, g, b), -1)
start = end
# display chart
plt.figure()
plt.axis("off")
plt.imshow(chart)
plt.show()
def _main_():
clusters = 8
for img in sorted(os.listdir('output\\predicted\\')):
print(img)
dc = DominantColors('..\\..\\data\\output\\predicted\\{0}'.format(img), clusters)
colors = dc.dominantColors()
dc.analyseRGB()
if __name__ == '__main__':
_main_()
| 28.49359
| 99
| 0.566479
| 3,889
| 0.874916
| 0
| 0
| 0
| 0
| 0
| 0
| 781
| 0.175703
|
49206ede4930182521d4ce9c6b49dda8aef894c6
| 97
|
py
|
Python
|
cash/settings/product.py
|
anshengme/cash
|
7b24338ea2f3f92fe82f668335bb2eb6e6479f9e
|
[
"MIT"
] | 18
|
2019-01-04T01:58:03.000Z
|
2021-06-25T09:03:58.000Z
|
cash/settings/product.py
|
anshengme/cash
|
7b24338ea2f3f92fe82f668335bb2eb6e6479f9e
|
[
"MIT"
] | 3
|
2019-02-26T16:24:40.000Z
|
2020-04-04T10:41:38.000Z
|
cash/settings/product.py
|
anshengme/cash
|
7b24338ea2f3f92fe82f668335bb2eb6e6479f9e
|
[
"MIT"
] | 5
|
2019-02-26T15:32:52.000Z
|
2019-04-22T09:35:27.000Z
|
from .base import *
DEBUG = False
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', SECRET_KEY)
| 16.166667
| 60
| 0.752577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 19
| 0.195876
|
492186225f16d44bf92c164978829eca7c8d9540
| 2,807
|
py
|
Python
|
seqpos/lib/python2.7/site-packages/mercurial/lsprofcalltree.py
|
guanjue/seqpos
|
ab9308ad128547ca968a1d944490710e583703bc
|
[
"MIT"
] | null | null | null |
seqpos/lib/python2.7/site-packages/mercurial/lsprofcalltree.py
|
guanjue/seqpos
|
ab9308ad128547ca968a1d944490710e583703bc
|
[
"MIT"
] | null | null | null |
seqpos/lib/python2.7/site-packages/mercurial/lsprofcalltree.py
|
guanjue/seqpos
|
ab9308ad128547ca968a1d944490710e583703bc
|
[
"MIT"
] | null | null | null |
"""
lsprofcalltree.py - lsprof output which is readable by kcachegrind
Authors:
* David Allouche <david <at> allouche.net>
* Jp Calderone & Itamar Shtull-Trauring
* Johan Dahlin
This software may be used and distributed according to the terms
of the GNU General Public License, incorporated herein by reference.
"""
from __future__ import absolute_import
from . import (
pycompat,
)
def label(code):
if isinstance(code, str):
# built-in functions ('~' sorts at the end)
return '~' + pycompat.sysbytes(code)
else:
return '%s %s:%d' % (pycompat.sysbytes(code.co_name),
pycompat.sysbytes(code.co_filename),
code.co_firstlineno)
class KCacheGrind(object):
def __init__(self, profiler):
self.data = profiler.getstats()
self.out_file = None
def output(self, out_file):
self.out_file = out_file
out_file.write(b'events: Ticks\n')
self._print_summary()
for entry in self.data:
self._entry(entry)
def _print_summary(self):
max_cost = 0
for entry in self.data:
totaltime = int(entry.totaltime * 1000)
max_cost = max(max_cost, totaltime)
self.out_file.write(b'summary: %d\n' % max_cost)
def _entry(self, entry):
out_file = self.out_file
code = entry.code
if isinstance(code, str):
out_file.write(b'fi=~\n')
else:
out_file.write(b'fi=%s\n' % pycompat.sysbytes(code.co_filename))
out_file.write(b'fn=%s\n' % label(code))
inlinetime = int(entry.inlinetime * 1000)
if isinstance(code, str):
out_file.write(b'0 %d\n' % inlinetime)
else:
out_file.write(b'%d %d\n' % (code.co_firstlineno, inlinetime))
# recursive calls are counted in entry.calls
if entry.calls:
calls = entry.calls
else:
calls = []
if isinstance(code, str):
lineno = 0
else:
lineno = code.co_firstlineno
for subentry in calls:
self._subentry(lineno, subentry)
out_file.write(b'\n')
def _subentry(self, lineno, subentry):
out_file = self.out_file
code = subentry.code
out_file.write(b'cfn=%s\n' % label(code))
if isinstance(code, str):
out_file.write(b'cfi=~\n')
out_file.write(b'calls=%d 0\n' % subentry.callcount)
else:
out_file.write(b'cfi=%s\n' % pycompat.sysbytes(code.co_filename))
out_file.write(b'calls=%d %d\n' % (
subentry.callcount, code.co_firstlineno))
totaltime = int(subentry.totaltime * 1000)
out_file.write(b'%d %d\n' % (lineno, totaltime))
| 29.861702
| 77
| 0.586035
| 2,069
| 0.737086
| 0
| 0
| 0
| 0
| 0
| 0
| 589
| 0.209833
|
4921f463d2a5ff012c886046b237e9741fc7a1a8
| 2,328
|
py
|
Python
|
nats/aio/errors.py
|
sr34/asyncio-nats
|
347a8e4b3eab275085858a6c8016feb3457905a3
|
[
"Apache-2.0"
] | null | null | null |
nats/aio/errors.py
|
sr34/asyncio-nats
|
347a8e4b3eab275085858a6c8016feb3457905a3
|
[
"Apache-2.0"
] | null | null | null |
nats/aio/errors.py
|
sr34/asyncio-nats
|
347a8e4b3eab275085858a6c8016feb3457905a3
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016-2018 The NATS Authors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import asyncio
STALE_CONNECTION = b"'Stale Connection'"
AUTHORIZATION_VIOLATION = b"'Authorization Violation'"
class NatsError(Exception):
pass
class ErrConnectionClosed(NatsError):
def __str__(self):
return "nats: Connection Closed"
class ErrSecureConnRequired(NatsError):
def __str__(self):
return "nats: Secure Connection required"
class ErrSecureConnWanted(NatsError):
def __str__(self):
return "nats: Secure Connection not available"
class ErrSecureConnFailed(NatsError):
def __str__(self):
return "nats: Secure Connection failed"
class ErrBadSubscription(NatsError):
def __str__(self):
return "nats: Invalid Subscription"
class ErrBadSubject(NatsError):
def __str__(self):
return "nats: Invalid Subject"
class ErrSlowConsumer(NatsError):
def __init__(self, subject=None, sid=None):
self.subject = subject
self.sid = sid
def __str__(self):
return "nats: Slow Consumer, messages dropped"
class ErrTimeout(asyncio.TimeoutError):
def __str__(self):
return "nats: Timeout"
class ErrBadTimeout(NatsError):
def __str__(self):
return "nats: Timeout Invalid"
class ErrAuthorization(NatsError):
def __str__(self):
return "nats: Authorization Failed"
class ErrNoServers(NatsError):
def __str__(self):
return "nats: No servers available for connection"
class ErrJsonParse(NatsError):
def __str__(self):
return "nats: Connect message, json parse err"
class ErrStaleConnection(NatsError):
def __str__(self):
return "nats: Stale Connection"
class ErrMaxPayload(NatsError):
def __str__(self):
return "nats: Maximum Payload Exceeded"
| 24.25
| 74
| 0.717354
| 1,590
| 0.68299
| 0
| 0
| 0
| 0
| 0
| 0
| 1,041
| 0.447165
|
492207457366b7b786ff7beb4e298c9226ecc040
| 1,912
|
py
|
Python
|
sqlite/app/elephant_queries.py
|
CurdtMillion/DS-Unit-3-Sprint-2-SQL-and-Databases
|
ba300b2da1e5fff153d8db76fdf1f67e82cefb9b
|
[
"MIT"
] | null | null | null |
sqlite/app/elephant_queries.py
|
CurdtMillion/DS-Unit-3-Sprint-2-SQL-and-Databases
|
ba300b2da1e5fff153d8db76fdf1f67e82cefb9b
|
[
"MIT"
] | null | null | null |
sqlite/app/elephant_queries.py
|
CurdtMillion/DS-Unit-3-Sprint-2-SQL-and-Databases
|
ba300b2da1e5fff153d8db76fdf1f67e82cefb9b
|
[
"MIT"
] | null | null | null |
import os
import psycopg2
import sqlite3
from psycopg2.extras import DictCursor
from dotenv import load_dotenv
DB_FILEPATH = os.path.join(os.path.dirname(__file__), "..", "data", "rpg.db")
connection = sqlite3.connect(DB_FILEPATH)
load_dotenv()
DB_NAME = os.getenv("DB_NAME", default="OOPS")
DB_USER = os.getenv("DB_USER", default="OOPS")
DB_PASSWORD = os.getenv("DB_PASSWORD", default="OOPS")
DB_HOST = os.getenv("DB_HOST", default="OOPS")
connection_pg = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PASSWORD, host=DB_HOST)
print("CONNECTION_PG: ", type(connection_pg))
print("CONNECTION: ", type(connection))
cursor = connection.cursor()
cursor_pg = connection_pg.cursor()
print("CURSOR: ", type(cursor))
print("CURSOR_PG: ", type(cursor_pg))
## Connecting to SQLite3 DB for RPG data ##
characters = cursor.execute('SELECT * FROM charactercreator_character').fetchall()
print(characters)
## Create Character Table in Postgres ##
create_character_table_query = '''
CREATE TABLE IF NOT EXISTS rpg_characters (
character_id SERIAL PRIMARY KEY,
name VARCHAR(30),
level INT,
exp INT,
hp INT,
strength INT,
intelligence INT,
dexterity INT,
wisdom INT
)
'''
add_data_query = '''
INSERT INTO rpg_characters (name, level, exp, hp, strength, intelligence, dexterity, wisdom) VALUES
(
'Mr. Wizard', 45, 55, 76, 100, 1000, 50, 1000
),
(
'Honey-Boo-Boo', 15, 2, 3, 5, 1, 1, 1
),
(
'Igor', 10, 43, 54, 123, 345, 66, 100
)
'''
cursor_pg.execute(create_character_table_query)
cursor_pg.execute(add_data_query)
for character in characters:
insert_query = f''' INSERT INTO rpg_characters
(character_id, name, level, exp, hp, strength, intelligence, dexterity, wisdom) VALUES
{character}
'''
cursor_pg.execute(insert_query)
connection.commit()
connection.close()
connection_pg.commit()
connection_pg.close()
| 25.157895
| 99
| 0.710251
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 911
| 0.476464
|
49227b0882dd3d460d38e78e915ad62615d5837a
| 12,852
|
py
|
Python
|
models/EmbracementLayer.py
|
gcunhase/EmbraceBERT
|
01f04f8e2362c5425359c5758f22794937708095
|
[
"MIT"
] | 7
|
2020-10-30T06:36:23.000Z
|
2022-01-07T11:08:49.000Z
|
models/EmbracementLayer.py
|
gcunhase/EmbraceBERT
|
01f04f8e2362c5425359c5758f22794937708095
|
[
"MIT"
] | null | null | null |
models/EmbracementLayer.py
|
gcunhase/EmbraceBERT
|
01f04f8e2362c5425359c5758f22794937708095
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
import numpy as np
from models.AttentionLayer import AttentionLayer
from models.SelfAttentionLayer import SelfAttention, SelfAttentionPytorch,\
BertSelfAttentionScores, BertSelfAttentionScoresP, BertMultiSelfAttentionScoresP,\
BertMultiAttentionScoresP, BertAttentionClsQuery
from pytorch_transformers.modeling_bert import BertAttention, BertSelfAttention
from utils import visualize_attention
__author__ = "Gwena Cunha"
class EmbracementLayer(nn.Module):
def __init__(self, config, hidden_size, p, max_seq_length):
super(EmbracementLayer, self).__init__()
self.p = p
self.hidden_size = hidden_size # 768
self.max_seq_length = max_seq_length # 128
if self.p == 'selfattention':
self.self_attention = SelfAttention(self.hidden_size) #self.max_seq_length) # AttentionLayer(self.hidden_size)
elif self.p == 'multihead_bertselfattention':
self.self_attention = BertSelfAttention(config)
elif self.p == 'multihead_bertselfattention_in_p':
config.num_attention_heads = 1
self.self_attention = BertSelfAttentionScoresP(config)
elif self.p == 'multihead_bertattention':
self.self_attention = BertAttention(config)
elif self.p == 'multihead_bertattention_clsquery':
config.output_attentions = True
self.self_attention = BertAttentionClsQuery(config, hidden_size)
self.softmax = nn.Softmax()
elif self.p == 'attention_clsquery':
self.self_attention = AttentionLayer(self.hidden_size)
elif self.p == 'attention_clsquery_weights':
self.self_attention = AttentionLayer(self.hidden_size, return_att_weights=True)
self.softmax = nn.Softmax(dim=-1)
elif self.p == 'multiheadattention':
config_att = config
config_att.output_attentions = True
self.self_attention = BertSelfAttentionScores(config_att)
elif self.p == 'selfattention_pytorch':
self.self_attention = SelfAttentionPytorch(self.max_seq_length) # 128
elif self.p == 'multiple_multihead_bertselfattention_in_p':
config.num_attention_heads = 1
self.self_attention = BertMultiSelfAttentionScoresP(config)
elif self.p == 'multiple_multihead_bertattention_in_p':
config.num_attention_heads = 1
config.output_attentions = True
self.self_attention = BertMultiAttentionScoresP(config, max_seq_length)
self.softmax = nn.Softmax(dim=-1)
def forward(self, output_tokens_from_bert, cls_token=None):
# pooled_enc_output = bs x 768
# output_tokens_from_bert = bert_output[0]
# cls_output = bert_output[1] # CLS
# Note: Docking layer not needed given that all features have the same size
# tokens_to_embrace = output_tokens_from_bert[:, 1:, :] # (8, 128, 768) = (bs, sequence_length (where the first index is CLS), embedding_size)
tokens_to_embrace = output_tokens_from_bert[:, :, :] # (8, 128, 768) = (bs, sequence_length, embedding_size)
[bs, seq_len, emb_size] = tokens_to_embrace.size()
tokens_to_embrace = tokens_to_embrace.cpu().detach().numpy()
# Note: Consider each token in the sequence of 128 as one modality.
embraced_features_token = []
for i_bs in range(bs):
# 1. Choose feature indexes to be considered in the Embrace vector
if self.p == 'multinomial':
# A. Multinomial distribution: randomly choose features from all 128 with same probability for each index feature
probability = torch.tensor(np.ones(seq_len), dtype=torch.float)
embraced_features_index = torch.multinomial(probability, emb_size, replacement=True) # shape = [768]
embraced_features_index = embraced_features_index.cpu().detach().numpy() # shape = 768
elif self.p == 'multihead_bertselfattention' or self.p == 'multihead_bertattention':
tokens_to_embrace_bs = tokens_to_embrace[i_bs, :, :]
head_mask = torch.ones([1, 1, 1, np.shape(tokens_to_embrace_bs)[0]]).cuda()
attention_mask = torch.zeros([1, 1, 1, np.shape(tokens_to_embrace_bs)[0]]).cuda()
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).unsqueeze(0).cuda()
#selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, attention_mask, head_mask=None)
selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, attention_mask, head_mask=head_mask)
selfattention_scores = selfattention_scores[0]
elif self.p == 'multihead_bertattention_clsquery':
print("TODO. Use cls_token - Come back to this")
tokens_to_embrace_bs = tokens_to_embrace[i_bs, :, :]
cls_token_bs = cls_token[i_bs, :]
#cls_token_bs = torch.tensor(cls_token_bs, dtype=torch.float).unsqueeze(0).unsqueeze(0).cuda()
attention_mask = torch.ones([1, 1, 1, np.shape(tokens_to_embrace_bs)[0]]).cuda()
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).unsqueeze(0).cuda()
cls_token_bs = torch.tensor(cls_token_bs, dtype=torch.float).unsqueeze(0)
cls_token_bs = cls_token_bs.repeat(self.max_seq_length, 1)
cls_token_bs = cls_token_bs.unsqueeze(0).cuda()
selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, head_mask=attention_mask, cls_query=cls_token_bs)
selfattention_scores = self.softmax(selfattention_scores[1])
print("")
elif self.p == 'attention_clsquery':
tokens_to_embrace_bs = tokens_to_embrace[i_bs, :, :]
cls_token_bs = cls_token[i_bs, :]
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).cuda()
cls_token_bs = torch.tensor(cls_token_bs, dtype=torch.float).unsqueeze(0).cuda()
selfattention_scores = self.self_attention(cls_token_bs, tokens_to_embrace_bs_tensor, unsqueeze_idx=0)
selfattention_scores = selfattention_scores[0]
elif self.p == 'multiple_multihead_bertselfattention_in_p' or self.p == 'multiple_multihead_bertattention_in_p':
tokens_to_embrace_bs = tokens_to_embrace[i_bs, :, :]
attention_mask = torch.ones([1, 1, 1, np.shape(tokens_to_embrace_bs)[0]]).cuda()
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).unsqueeze(0).cuda()
selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, head_mask=attention_mask,
is_visualize_attention=False)
if self.p == 'multiple_multihead_bertattention_in_p':
selfattention_scores = selfattention_scores.squeeze()
selfattention_scores = self.softmax(selfattention_scores)
# Choose features using information from self-attention
multiple_embrace_vectors = []
for i in range(self.max_seq_length): # 128
score = selfattention_scores[i, :]
#attention_probs_img = score.unsqueeze(0).cpu().detach().numpy()
#visualize_attention(attention_probs_img)
embraced_features_index = torch.multinomial(score, emb_size, replacement=True) # shape = [768]
embraced_features_index = embraced_features_index.cpu().detach().numpy() # shape = 768
embraced_features_token_bs = []
for i_emb, e in enumerate(embraced_features_index):
embraced_features_token_bs.append(tokens_to_embrace[i_bs, e, i_emb])
multiple_embrace_vectors.append(embraced_features_token_bs)
multiple_embrace_vectors = torch.tensor(multiple_embrace_vectors, dtype=torch.float)
else:
# B. Self-attention used to choose most important indexes -> p = softmax(mean(self_att))
# 'selfattention_scores' shape -> (bs, 128)
tokens_to_embrace_bs = tokens_to_embrace[i_bs, :, :]
# ADD THE NEXT 2 LINES TO CONDENSED
# attention_mask_bs = attention_mask[i_bs, :]
# _, selfattention_scores = self.self_attention(tokens_to_embrace_bs, attention_mask_bs)
# Original attention_mask ranges from 0 to -1000
# If we want to mask the scores by multiplying between 0 and 1, we should give the attention_mask
# as head_mask
if self.p == 'selfattention':
_, selfattention_scores = self.self_attention(tokens_to_embrace_bs)
elif self.p == 'attention_clsquery_weights':
tokens_to_embrace_bs = tokens_to_embrace[i_bs, :, :]
cls_token_bs = cls_token[i_bs, :]
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).cuda()
cls_token_bs = torch.tensor(cls_token_bs, dtype=torch.float).unsqueeze(0).cuda()
selfattention_scores = self.self_attention(cls_token_bs, tokens_to_embrace_bs_tensor, unsqueeze_idx=0)
selfattention_scores = selfattention_scores[1]
selfattention_scores = self.softmax(selfattention_scores).squeeze()
elif self.p == 'multiheadattention': # BertAttention
attention_mask = torch.ones([1, 1, 1, np.shape(tokens_to_embrace_bs)[0]]).cuda()
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).unsqueeze(0).cuda()
#selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, attention_mask, head_mask=None)
selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, head_mask=attention_mask)
elif self.p == 'multihead_bertselfattention_in_p':
attention_mask = torch.ones([1, 1, 1, np.shape(tokens_to_embrace_bs)[0]]).cuda()
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).unsqueeze(0).cuda()
#selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, attention_mask, head_mask=None)
selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor, head_mask=attention_mask)
else: # 'selfattention_pytorch'
tokens_to_embrace_bs_tensor = torch.tensor(tokens_to_embrace_bs, dtype=torch.float).unsqueeze(
0).cuda()
selfattention_scores = self.self_attention(tokens_to_embrace_bs_tensor)
# Choose features using information from self-attention
selfattention_scores = torch.tensor(selfattention_scores, dtype=torch.float)
embraced_features_index = torch.multinomial(selfattention_scores, emb_size, replacement=True) # shape = [768]
embraced_features_index = embraced_features_index.cpu().detach().numpy() # shape = 768
# 2. Add features into one of size (bs, embedding_size)
embraced_features_token_bs = []
if self.p == 'multihead_bertselfattention' or self.p == 'multihead_bertattention':
embraced_features_index = torch.sum(selfattention_scores, dim=1)
embraced_features_token_bs = embraced_features_index.squeeze()
embraced_features_token_bs = embraced_features_token_bs.cpu().detach().numpy()
elif self.p == 'multiple_multihead_bertselfattention_in_p' or self.p == 'multiple_multihead_bertattention_in_p':
embraced_features_token_bs = torch.sum(multiple_embrace_vectors, dim=0)
embraced_features_token_bs = embraced_features_token_bs.squeeze()
embraced_features_token_bs = embraced_features_token_bs.cpu().detach().numpy()
elif self.p == 'attention_clsquery':
embraced_features_token_bs = selfattention_scores.cpu().detach().numpy()
else:
for i_emb, e in enumerate(embraced_features_index):
embraced_features_token_bs.append(tokens_to_embrace[i_bs, e, i_emb])
embraced_features_token.append(embraced_features_token_bs) # (768)
embraced_features_token = torch.tensor(embraced_features_token, dtype=torch.float) # (bs, 768)
return embraced_features_token
| 69.847826
| 151
| 0.662621
| 12,384
| 0.963585
| 0
| 0
| 0
| 0
| 0
| 0
| 2,851
| 0.221833
|
4922ee0740ccdfdc837d0e1e02e2f0bf4fe6c81f
| 8,227
|
py
|
Python
|
tests/test_csv2bufr.py
|
tomkralidis/CSV2BUFR
|
ba7ce4ed2bb41e42fcb9d03f10049ffc6a2073f8
|
[
"Apache-2.0"
] | null | null | null |
tests/test_csv2bufr.py
|
tomkralidis/CSV2BUFR
|
ba7ce4ed2bb41e42fcb9d03f10049ffc6a2073f8
|
[
"Apache-2.0"
] | 8
|
2021-11-04T12:44:46.000Z
|
2021-11-23T02:23:05.000Z
|
tests/test_csv2bufr.py
|
tomkralidis/CSV2BUFR
|
ba7ce4ed2bb41e42fcb9d03f10049ffc6a2073f8
|
[
"Apache-2.0"
] | 2
|
2021-11-10T14:43:08.000Z
|
2021-11-23T01:54:46.000Z
|
###############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
###############################################################################
import csv
from io import StringIO
import logging
from eccodes import (codes_bufr_new_from_samples, codes_release)
import pytest
from csv2bufr import (validate_mapping, apply_scaling, validate_value,
transform, SUCCESS)
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel("DEBUG")
# test data
@pytest.fixture
def mapping_dict():
return {
"inputDelayedDescriptorReplicationFactor": [],
"number_header_rows": 1,
"names_on_row": 1,
"header": [
{"eccodes_key": "edition", "value": 4}, # noqa
{"eccodes_key": "masterTableNumber", "value": 0}, # noqa
{"eccodes_key": "bufrHeaderCentre", "value": 0}, # noqa
{"eccodes_key": "bufrHeaderSubCentre", "value": 0}, # noqa
{"eccodes_key": "updateSequenceNumber", "value": 0}, # noqa
{"eccodes_key": "dataCategory", "value": 0}, # noqa
{"eccodes_key": "internationalDataSubCategory", "value": 6}, # noqa
{"eccodes_key": "masterTablesVersionNumber", "value": 36}, # noqa
{"eccodes_key": "numberOfSubsets", "value": 1}, # noqa
{"eccodes_key": "observedData", "value": 1}, # noqa
{"eccodes_key": "compressedData", "value": 0}, # noqa
{"eccodes_key": "typicalYear", "csv_column": "year"}, # noqa
{"eccodes_key": "typicalMonth", "csv_column": "month"}, # noqa
{"eccodes_key": "typicalDay", "csv_column": "day"}, # noqa
{"eccodes_key": "typicalHour", "csv_column": "hour"}, # noqa
{"eccodes_key": "typicalMinute", "csv_column": "minute"}, # noqa
{"eccodes_key": "unexpandedDescriptors","value": [301021, 301011, 301012, 10051, 12101]} # noqa
],
"data": [
{"eccodes_key": "#1#year", "csv_column": "year"}, # noqa
{"eccodes_key": "#1#month", "csv_column": "month"}, # noqa
{"eccodes_key": "#1#day", "csv_column": "day"}, # noqa
{"eccodes_key": "#1#hour", "csv_column": "hour"}, # noqa
{"eccodes_key": "#1#minute", "csv_column": "minute"}, # noqa
{"eccodes_key": "#1#latitude", "csv_column": "latitude"}, # noqa
{"eccodes_key": "#1#longitude", "csv_column": "longitude"}, # noqa
{"eccodes_key": "#1#pressureReducedToMeanSeaLevel", "csv_column": "pressure"}, # noqa
{"eccodes_key": "#1#airTemperature", "csv_column": "air_temperature"} # noqa
]
}
@pytest.fixture
def data_dict():
return {
"air_temperature": 290.31,
"pressure": 100130,
"latitude": 55.154,
"longitude": 0.0,
"year": 2021,
"month": 11,
"day": 18,
"hour": 18,
"minute": 0
}
@pytest.fixture
def data_to_encode():
return {
"edition": 4,
"masterTableNumber": 0,
"bufrHeaderCentre": 0,
"bufrHeaderSubCentre": 0,
"updateSequenceNumber": 0,
"section1Flags": 0,
"dataCategory": 0,
"internationalDataSubCategory": 6,
"masterTablesVersionNumber": 36,
"numberOfSubsets": 1,
"observedData": 1,
"compressedData": 0,
"typicalYear": 2021.0,
"typicalMonth": 11.0,
"typicalDay": 18.0,
"typicalHour": 18.0,
"typicalMinute": 0.0,
"unexpandedDescriptors": [
301021,
301011,
301012,
10051,
12101
],
"#1#year": 2021.0,
"#1#month": 11.0,
"#1#day": 18.0,
"#1#hour": 18.0,
"#1#minute": 0.0,
"#1#latitude": 55.154,
"#1#longitude": 0.0,
"#1#pressureReducedToMeanSeaLevel": 100130.0,
"#1#airTemperature": 290.31
}
@pytest.fixture
def station_dict():
return {
"metadata": {
"last-sync": "2021-10-22"
},
"data": {
"station-name": "test data"
},
"wigosIds": [
{"wid": "0-1-2-ABCD"}
]
}
# test to check whether eccodes is installed
def test_eccodes():
# call to eccodes library to test if accessible
bufr_msg = codes_bufr_new_from_samples('BUFR4')
# call to release the BUFR message
codes_release(bufr_msg)
assert True
# test to check validate_mapping is not broken
def test_validate_mapping_pass(mapping_dict):
success = validate_mapping(mapping_dict)
assert success == SUCCESS
# test to check validate_mapping fails when we expect it to
def test_validate_mapping_fail():
# not sure on this one, do we need this test and if so have many
# different exceptions do we want to test?
test_data = {
"inputDelayedDescriptorReplicationFactor": [],
"header": [],
"data": [
{"eccodes_key": "abc", "value": 1, "offset": 1}, # noqa
{"eccodes_key": "def", "csv_column": "col1", "valid-min": 0, "valid-max": 10}, # noqa
{"eccodes_key": "ghi", "csv_column": "col2", "valid-min": 250.0, "valid-max": 350.0, "scale": 0.0, "offset": 273.15}, # noqa
{"eccodes_key": "jkl", "csv_column": "col3", "valid-min": 90000.0, "valid-max": 120000.0, "scale": 2.0, "offset": 0.0} # noqa
]
}
try:
success = validate_mapping(test_data)
except Exception:
success = False
assert success != SUCCESS
# test to make sure apply_scaling works as expected
def test_apply_scaling():
scale = 1
offset = 20.0
test_value = 10.0
assert 120.0 == apply_scaling(test_value, scale, offset)
# test to check that valid_value works
def test_validate_value_pass():
input_value = 10.0
try:
value = validate_value("test value", input_value, 0.0, 100.0, False)
except Exception:
assert False
assert value == input_value
# test to check that valid_value fails when we expect it to
def test_validate_value_fail():
input_value = 10.0
try:
_ = validate_value("test value", input_value, 0.0, 9.9, False)
except Exception:
return
assert False
# test to check that valid_value returns null value when we expect it to
def test_validate_value_nullify():
input_value = 10.0
try:
value = validate_value("test value", input_value, 0.0, 9.9, True)
except Exception:
assert False
assert value is None
# check that test transform works
def test_transform(data_dict, station_dict, mapping_dict):
# create CSV
output = StringIO()
writer = csv.DictWriter(output, quoting=csv.QUOTE_NONNUMERIC,
fieldnames=data_dict.keys())
writer.writeheader()
writer.writerow(data_dict)
data = output.getvalue()
result = transform(data, station_dict, mapping_dict)
for item in result:
assert isinstance(item, dict)
assert "_meta" in item
item_meta_keys = ['data_category', 'data_date', 'identifier',
'md5', 'originating_centre', 'wigos_id']
assert sorted(item["_meta"].keys()) == item_meta_keys
assert item["_meta"]["md5"] == "981938dbd97be3e5adc8e7b1c6eb642c"
| 34.422594
| 138
| 0.579312
| 0
| 0
| 0
| 0
| 3,847
| 0.467607
| 0
| 0
| 4,021
| 0.488757
|
49233a1586edace305f6bd1e93ae01acb6ace60f
| 4,381
|
py
|
Python
|
map_methods_to_mach_ports/parseXcodeLogs.py
|
malus-security/kobold
|
2ae23b75ec503ef3702ad297ddbb7824ac4da53c
|
[
"BSD-3-Clause"
] | 3
|
2020-06-26T19:44:46.000Z
|
2021-03-25T07:00:04.000Z
|
map_methods_to_mach_ports/parseXcodeLogs.py
|
malus-security/kobold
|
2ae23b75ec503ef3702ad297ddbb7824ac4da53c
|
[
"BSD-3-Clause"
] | null | null | null |
map_methods_to_mach_ports/parseXcodeLogs.py
|
malus-security/kobold
|
2ae23b75ec503ef3702ad297ddbb7824ac4da53c
|
[
"BSD-3-Clause"
] | 1
|
2020-09-14T23:46:31.000Z
|
2020-09-14T23:46:31.000Z
|
#Input
#Xcode logs output by autogenerated method invocations
#Requirements
#Infer which invocation numbers failed/succeeded
#Infer apparent entitlement requirements based on error message from completion handlers
#Detect which invocation numbers should have had completion handlers
#Map new information to mach-port and method declaration
import pickle
import re
#TODO: this code needs to get a lot smarter as we collect more entitlement samples
def lookForEntReqs(id, thisInvocation, knownEntKeyList):
errorString = "NSError * var"
for line in thisInvocation["logLines"]:
if errorString in line:
if "entitlement" in line:
#We can use a corpus of known entitlement keys and search for those values in the error message
for key in knownEntKeyList:
if key in line:
thisInvocation["entitlementRequirement"].add(key)
#If the word entitlement is in the error message, but we don't find a key then something went wrong.
if len(thisInvocation["entitlementRequirement"]) == 0:
print "ERROR: No known entitlement key found in message that mentions the word 'entitlement'. Likely false negative."
#If we find more than one key in the error message, then something went wrong.
if len(thisInvocation["entitlementRequirement"]) > 1:
print "ERROR: More than one known entitlement key was detected in the same message. Likely false positive."
#this is some old code that assumed entitlement keys would be surrounded by fancy quotes, but that is not always the case.
"""
fancyQuoteSlices= line.split('\xe2\x80\x9c')[1:]
for slice in fancyQuoteSlices:
thisInvocation["entitlementRequirement"].add(slice.split('\xe2\x80\x9d')[0])
"""
def parseRelevantLogs(id, thisInvocation, logLines):
#logLines = raw_xcode_logs.split("\n")
for line in logLines:
idTag = "id "+id+": "
if idTag in line:
#break off the text before the id tag
#TODO this system will miss array output that uses multiple lines.
thisInvocation["logLines"].append(line.split(idTag)[-1])
def checkForCompletionStatus(id, thisInvocation):
completionString = "Completion message"
if completionString in thisInvocation["logLines"]:
thisInvocation["valid"] = True
raw_xcode_logs = open("input_data/xcode_results.txt","rb").read().strip().split("\n")
sanitized_logs = []
for log in raw_xcode_logs:
if re.match('^[0-9][0-9][0-9][0-9]\-.*$', log):
sanitized_logs.append(log)
else:
sanitized_logs[-1] += " " + log
#TODO I need to know how many invocations to search for.
# Perhaps it would be easier to deal with this if I could import a pickle file with data from map_potential_methods_to_mach-ports.py
with open('./input_data/invocationDictionary.pk', 'rb') as handle:
invocationDictionary = pickle.load(handle)
#print invocationDictionary
knownEntKeyList = open('./input_data/ent_key_corpus.txt', 'rb').read().strip().split('\n')
for id in invocationDictionary:
thisInvocation = invocationDictionary[id]
id = str(id)
thisProtocol = thisInvocation["protocol"]
thisMethod = thisInvocation["method"]
thisMachPort = thisInvocation["mach-port"]
thisInvocation["valid"] = False
thisInvocation["entitlementRequirement"] = set()
thisInvocation["logLines"] = []
#The invocation is the correct combination of accessible mach-port and remote method
#Note that a valid invocation could still be inaccessible due to decentralized checks
#run various tests to see if the connection should be considered valid or not
parseRelevantLogs(id, thisInvocation, sanitized_logs)
#block based tests
hasBlockString = "Invocation has a completion handler"
if hasBlockString in thisInvocation["logLines"]:
checkForCompletionStatus(id, thisInvocation)
lookForEntReqs(id, thisInvocation, knownEntKeyList)
#if thisInvocation["valid"] and len(thisInvocation["entitlementRequirement"]) == 0:
if thisInvocation["valid"]:
print id
#print " "+thisProtocol
print " "+thisMethod
print " "+thisMachPort
#print " "+str(thisInvocation["valid"])
print " "+str(thisInvocation["entitlementRequirement"])
for line in thisInvocation["logLines"]:
print "****"+line
#for line in thisInvocation["logLines"]:
# print " "+line
| 40.564815
| 132
| 0.716047
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,547
| 0.581374
|
49245578c9299525db939b3fe79e00703d3dcb25
| 10,298
|
py
|
Python
|
inferelator_ng/tests/test_design_response.py
|
asistradition/inferelator_ng
|
56ef2ce3b1ace35b9b2b2821a0e78746563c309a
|
[
"BSD-2-Clause"
] | 1
|
2019-01-10T17:04:43.000Z
|
2019-01-10T17:04:43.000Z
|
inferelator_ng/tests/test_design_response.py
|
asistradition/inferelator_ng
|
56ef2ce3b1ace35b9b2b2821a0e78746563c309a
|
[
"BSD-2-Clause"
] | 1
|
2019-01-21T21:05:19.000Z
|
2019-01-21T21:05:19.000Z
|
inferelator_ng/tests/test_design_response.py
|
asistradition/inferelator_ng
|
56ef2ce3b1ace35b9b2b2821a0e78746563c309a
|
[
"BSD-2-Clause"
] | null | null | null |
import unittest, os
import pandas as pd
import numpy as np
import pdb
from .. import design_response_translation
from .. import utils
my_dir = os.path.dirname(__file__)
class TestDR(unittest.TestCase):
"""
Superclass for common methods
"""
def calculate_design_and_response(self):
#drd = design_response_R.DRDriver()
drd = design_response_translation.PythonDRDriver()
target = drd.target_directory = os.path.join(my_dir, "artifacts")
if not os.path.exists(target):
os.makedirs(target)
drd.delTmin = self.delT_min
drd.delTmax = self.delT_max
drd.tau = self.tau
(self.design, self.response) = drd.run(self.exp, self.meta)
@unittest.skip("I don't know why the special character thing is necessary so I didn't reimplement it")
class TestSpecialCharacter(TestDR):
def setUp(self):
spchrs='~!@#$%^&*()_-+=|\}]{[;:/?.><\\'
self.meta = pd.DataFrame()
self.meta['isTs']=[True, True, True, True, False]
self.meta['is1stLast'] = ['f','m','m','l','e']
self.meta['prevCol'] = ['NA','ts1'+spchrs,'ts2'+spchrs,'ts3'+spchrs, 'NA']
self.meta['del.t'] = ['NA', 3, 2, 5, 'NA']
self.meta['condName'] = ['ts1'+spchrs,'ts2'+spchrs,'ts3'+spchrs,'ts4'+spchrs,'ss']
self.exp = pd.DataFrame(np.reshape(range(10), (2,5)) + 1,
index = ['gene' + str(i + 1) + spchrs for i in range(2)],
columns = ['ts' + str(i + 1) + spchrs for i in range(4)] + ['ss'])
self.delT_min = 2
self.delT_max = 4
self.tau = 2
self.calculate_design_and_response()
def testspecialcharacter(self):
spchrs='~!@#$%^&*()_-+=|\}]{[;:/?.><\\'
ds, resp = (self.design, self.response)
expression_1 = np.array(list(self.exp['ts1' + spchrs]))
expression_2 = np.array(list(self.exp['ts2' + spchrs]))
expected_response_1 = (expression_1 + self.tau * (expression_2 - expression_1) / (float(self.meta['del.t'][1])))
expression_3 = np.array(list(self.exp['ts3' + spchrs]))
expected_response_2 = expression_2 + self.tau * (expression_3 - expression_2) / (float(self.meta['del.t'][2]))
np.testing.assert_almost_equal(np.array(resp['ts1' + spchrs]), expected_response_1)
np.testing.assert_almost_equal(np.array(resp['ts2' + spchrs]), expected_response_2)
@unittest.skip("Need to redo this to match the current drd")
class TestDRModelOrganisms(TestDR):
def test_on_bsubtilis(self):
self.exp = utils.df_from_tsv('data/bsubtilis/expression.tsv')
self.meta = utils.df_from_tsv('data/bsubtilis/meta_data.tsv', has_index=False)
expected_design = utils.df_from_tsv('data/bsubtilis/bsubtilis_design_matrix.tsv')
expected_response = utils.df_from_tsv('data/bsubtilis/bsubtilis_response_matrix.tsv')
self.delT_min = 0
self.delT_max = 110
self.tau = 45
self.calculate_design_and_response()
np.testing.assert_allclose(self.response.values, expected_response.values, atol=1e-15)
self.assertEqual(len(set(expected_response.columns)), len(set(self.response.columns)))
self.assertEqual(expected_response.columns.tolist(), self.response.columns.tolist())
self.assertEqual(expected_response.index.tolist(), self.response.index.tolist())
self.assertTrue(pd.DataFrame.equals(expected_design, self.design))
class TestDRAboveDeltMax(TestDR):
def setUp(self):
self.meta = pd.DataFrame()
self.meta['isTs']=[True, True, True, True, False]
self.meta['is1stLast'] = ['f','m','m','l','e']
self.meta['prevCol'] = ['NA','ts1','ts2','ts3', 'NA']
self.meta['del.t'] = ['NA', 3, 2, 5, 'NA']
self.meta['condName'] = ['ts1','ts2','ts3','ts4','ss']
self.exp = pd.DataFrame(np.reshape(range(10), (2,5)) + 1,
index = ['gene' + str(i + 1) for i in range(2)],
columns = ['ts' + str(i + 1) for i in range(4)] + ['ss'])
self.delT_min = 2
self.delT_max = 4
self.tau = 2
self.calculate_design_and_response()
def test_design_matrix_above_delt_max(self):
# Set up variables
ds, resp = (self.design, self.response)
self.assertEqual(ds.shape, (2, 4))
self.assertEqual(list(ds.columns), ['ts1-ts2', 'ts2-ts3', 'ss', 'ts4'],
msg = "Guarantee that the ts3-ts4 condition is dropped, "
"since its delT of 5 is greater than delt_max of 4")
self.assertEqual(list(ds['ss']), [5, 10])
self.assertEqual(list(ds['ss']), list(resp['ss']),
msg = 'Steady State design and response should be equal')
self.assertTrue((resp['ts2-ts3'].values == [3, 8]).all())
def test_response_matrix_steady_state_above_delt_max(self):
ds, resp = (self.design, self.response)
self.assertEqual(list(resp.columns), ['ts1-ts2', 'ts2-ts3', 'ss', 'ts4'])
self.assertEqual(list(resp['ts4']), list(self.exp['ts4']))
self.assertEqual(list(resp['ss']), list(self.exp['ss']))
def test_response_matrix_time_series_above_delt_max(self):
ds, resp = (self.design, self.response)
expression_1 = np.array(list(self.exp['ts1']))
expression_2 = np.array(list(self.exp['ts2']))
expected_response_1 = (expression_1 + self.tau * (expression_2 - expression_1) / (
float(self.meta['del.t'][1])))
expression_3 = np.array(list(self.exp['ts3']))
expected_response_2 = expression_2 + self.tau * (expression_3 - expression_2) / (
float(self.meta['del.t'][2]))
np.testing.assert_almost_equal(np.array(resp['ts1-ts2']), expected_response_1)
np.testing.assert_almost_equal(np.array(resp['ts2-ts3']), expected_response_2)
class TestDRMicro(TestDR):
def setUp(self):
self.meta = pd.DataFrame()
self.meta['isTs']=[False, False]
self.meta['is1stLast'] = ['e','e']
self.meta['prevCol'] = ['NA','NA']
self.meta['del.t'] = ['NA', 'NA']
self.meta['condName'] = ['ss1', 'ss2']
self.exp = pd.DataFrame(
np.reshape(range(4), (2, 2)) + 1,
index=['gene' + str(i + 1) for i in range(2)],
columns=['ss1', 'ss2'])
self.delT_min = 2
self.delT_max = 4
self.tau = 2
self.calculate_design_and_response()
def test_micro(self):
ds, resp = (self.design, self.response)
self.assertEqual(ds.shape, (2, 2))
self.assertTrue((ds['ss1'].values == [1, 3]).all())
self.assertTrue((ds['ss2'].values == [2, 4]).all())
# In steady state, expect design and response to be identical
self.assertTrue(ds.equals(resp))
class TestDRBelowDeltMin(TestDR):
def setUp(self):
self.meta = pd.DataFrame()
self.meta['isTs']=[True, True, True, True, False]
self.meta['is1stLast'] = ['f','m','m','l','e']
self.meta['prevCol'] = ['NA','ts1','ts2','ts3', 'NA']
self.meta['del.t'] = ['NA', 1, 2, 3, 'NA']
self.meta['condName'] = ['ts1', 'ts2', 'ts3', 'ts4', 'ss']
self.exp = pd.DataFrame(
np.reshape(range(10), (2, 5)) + 1,
index=['gene' + str(i + 1) for i in range(2)],
columns=['ts' + str(i + 1) for i in range(4)] + ['ss'])
self.delT_min = 2
self.delT_max = 4
self.tau = 2
self.calculate_design_and_response()
@unittest.skip("I'm not sure this is the behavior I want")
def test_response_matrix_below_delt_min(self):
ds, resp = (self.design, self.response)
expression_1 = np.array(list(self.exp['ts1']))
expression_3 = np.array(list(self.exp['ts3']))
expected_response_1 = expression_1 + self.tau * (expression_3 - expression_1) / (float(self.meta['del.t'][1]) + float(self.meta['del.t'][2]))
np.testing.assert_almost_equal(np.array(resp['ts1-ts3']), expected_response_1)
#pdb.set_trace()
@unittest.skip("skipping until we've determined if we want to modify the legacy R code")
def test_design_matrix_headers_below_delt_min(self):
ds, resp = (self.design, self.response)
print(ds.columns)
self.assertEqual(list(ds.columns), ['ss', 'ts1', 'ts2', 'ts3'],
msg = "Guarantee that the ts4 condition is dropped, since its the last in the time series")
class TestBranchingTimeSeries(TestDR):
def setUp(self):
self.meta = pd.DataFrame()
self.meta['isTs']=[True, True, True]
self.meta['is1stLast'] = ['f','l','l']
self.meta['prevCol'] = ['NA','ts1','ts1']
self.meta['del.t'] = ['NA', 2, 2]
self.meta['condName'] = ['ts1','ts2','ts3']
self.exp = pd.DataFrame(np.reshape(range(9), (3,3)) + 1,
index = ['gene' + str(i + 1) for i in range(3)],
columns = ['ts' + str(i + 1) for i in range(3)])
self.delT_min = 1
self.delT_max = 4
self.tau = 1
self.calculate_design_and_response()
def test_design_matrix_branching_time_series(self):
ds, resp = (self.design, self.response)
self.assertEqual(ds.shape, (3, 2))
self.assertEqual(list(ds.columns), ['ts1-ts2', 'ts1-ts3'],
msg = 'This is how the R code happens to name branching time series')
for col in ds:
self.assertEqual(list(ds[col]), list(self.exp['ts1']),
msg = '{} column in the design matrix should be equal to the branching source, ts1, in the exp matrix'.format(col))
def test_response_matrix_branching_time_series(self):
ds, resp = (self.design, self.response)
self.assertEqual(resp.shape, (3, 2))
expression_1 = np.array(list(self.exp['ts1']))
expression_2 = np.array(list(self.exp['ts2']))
expected_response_1 = (expression_1 + self.tau * (expression_2 - expression_1) /
float(self.meta['del.t'][1]))
expression_3 = np.array(list(self.exp['ts3']))
expected_response_2 = (expression_1 + self.tau * (expression_3 - expression_1) /
float(self.meta['del.t'][2]))
np.testing.assert_almost_equal(np.array(resp['ts1-ts2']), expected_response_1)
np.testing.assert_almost_equal(np.array(resp['ts1-ts3']), expected_response_2)
| 46.809091
| 150
| 0.604098
| 9,950
| 0.966207
| 0
| 0
| 3,628
| 0.352301
| 0
| 0
| 1,917
| 0.186153
|
4924946192a3e01f6cc5df5f86d6a37b39e0f8e7
| 94
|
py
|
Python
|
flights/urls.py
|
olubiyiontheweb/travelworld
|
ca9d2206108bd59fd222e384bcaab7efd6832e24
|
[
"MIT"
] | null | null | null |
flights/urls.py
|
olubiyiontheweb/travelworld
|
ca9d2206108bd59fd222e384bcaab7efd6832e24
|
[
"MIT"
] | null | null | null |
flights/urls.py
|
olubiyiontheweb/travelworld
|
ca9d2206108bd59fd222e384bcaab7efd6832e24
|
[
"MIT"
] | null | null | null |
from django.urls import path
from flights import views
urlpatterns = [path("", views.index)]
| 18.8
| 37
| 0.755319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2
| 0.021277
|
4924d0f3858273f23eb72e262ac3af691158f5e6
| 835
|
py
|
Python
|
invenio_app_ils/ill/loaders/jsonschemas/borrowing_request.py
|
equadon/invenio-app-ils
|
42ba282968d0aa28fb1bfc71d0709685165aaec4
|
[
"MIT"
] | null | null | null |
invenio_app_ils/ill/loaders/jsonschemas/borrowing_request.py
|
equadon/invenio-app-ils
|
42ba282968d0aa28fb1bfc71d0709685165aaec4
|
[
"MIT"
] | null | null | null |
invenio_app_ils/ill/loaders/jsonschemas/borrowing_request.py
|
equadon/invenio-app-ils
|
42ba282968d0aa28fb1bfc71d0709685165aaec4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019-2020 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""BorrowingRequest schema for marshmallow loader."""
from invenio_records_rest.schemas import RecordMetadataSchemaJSONV1
from marshmallow import EXCLUDE, fields
class BorrowingRequestSchemaV1(RecordMetadataSchemaJSONV1):
"""BorrowingRequest schema."""
class Meta:
"""Meta attributes for the schema."""
unknown = EXCLUDE
cancel_reason = fields.Str()
document_pid = fields.Str(required=True)
name = fields.Str(required=True)
notes = fields.Str()
library_pid = fields.Str(required=True) # TODO: validate
status = fields.Str(required=True) # TODO: this should be an enum
| 29.821429
| 76
| 0.718563
| 458
| 0.548503
| 0
| 0
| 0
| 0
| 0
| 0
| 370
| 0.443114
|
4925282781ec37ab8cc1089001ebc7822dc2c473
| 366
|
py
|
Python
|
systemtest/users/apps.py
|
IBM-Power-SystemTest/systemtest
|
a29e6d54500ca13f554073cc66a4a2d403ea5b14
|
[
"BSD-3-Clause"
] | 1
|
2022-03-09T18:07:11.000Z
|
2022-03-09T18:07:11.000Z
|
systemtest/users/apps.py
|
IBM-Power-SystemTest/systemtest
|
a29e6d54500ca13f554073cc66a4a2d403ea5b14
|
[
"BSD-3-Clause"
] | null | null | null |
systemtest/users/apps.py
|
IBM-Power-SystemTest/systemtest
|
a29e6d54500ca13f554073cc66a4a2d403ea5b14
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Users app config
References:
https://docs.djangoproject.com/en/3.1/ref/applications/
"""
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = "systemtest.users"
verbose_name = "Users"
def ready(self):
try:
import systemtest.users.signals # noqa F401
except ImportError:
pass
| 19.263158
| 63
| 0.628415
| 223
| 0.60929
| 0
| 0
| 0
| 0
| 0
| 0
| 140
| 0.382514
|
49252e8b63616c3d05ff08bd21ef0e85c4a7e7b9
| 1,675
|
py
|
Python
|
setup.py
|
grigi/configy
|
86f6bdd3164f39e83e82e3527f5863032c0ed1e7
|
[
"MIT"
] | 3
|
2015-09-18T13:06:04.000Z
|
2021-08-10T16:37:21.000Z
|
setup.py
|
grigi/configy
|
86f6bdd3164f39e83e82e3527f5863032c0ed1e7
|
[
"MIT"
] | null | null | null |
setup.py
|
grigi/configy
|
86f6bdd3164f39e83e82e3527f5863032c0ed1e7
|
[
"MIT"
] | null | null | null |
import sys
from setuptools import setup, find_packages
def get_version(fname):
import re
verstrline = open(fname, "rt").read()
mo = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", verstrline, re.M)
if mo:
return mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (fname,))
def get_test_requirements():
requirements = []
if sys.version_info[0:2] == (2, 6):
requirements.append('unittest2')
return requirements
setup(
name='configy',
version=get_version('configy/__init__.py'),
description='Simple Configuration manager, plays well with testing',
long_description=open('README.rst').read(),
author='Nickolas Grigoriadis',
author_email='nagrigoriadis@gmail.com',
url='https://github.com/grigi/configy',
zip_safe=False,
test_suite='configy.test_suite',
# Dependencies
install_requires=[
'PyYAML',
],
tests_require=get_test_requirements(),
# Packages
packages=find_packages(),
include_package_data=True,
# Scripts
scripts=[],
# Classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| 28.389831
| 77
| 0.62209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 770
| 0.459701
|
4925bdb182506624c0e0646cabaacc310b61faa3
| 2,755
|
py
|
Python
|
opentelemetry-sdk/tests/trace/export/test_in_memory_span_exporter.py
|
vmihailenco/opentelemetry-python
|
0a9eba3bb62f4ddf686b55b68286979a5ec84de5
|
[
"Apache-2.0"
] | 2
|
2020-08-13T21:10:48.000Z
|
2020-09-30T00:55:05.000Z
|
opentelemetry-sdk/tests/trace/export/test_in_memory_span_exporter.py
|
vmihailenco/opentelemetry-python
|
0a9eba3bb62f4ddf686b55b68286979a5ec84de5
|
[
"Apache-2.0"
] | 1
|
2021-02-24T01:32:32.000Z
|
2021-02-24T01:32:32.000Z
|
opentelemetry-sdk/tests/trace/export/test_in_memory_span_exporter.py
|
vmihailenco/opentelemetry-python
|
0a9eba3bb62f4ddf686b55b68286979a5ec84de5
|
[
"Apache-2.0"
] | 2
|
2020-06-01T12:38:23.000Z
|
2021-01-07T10:55:47.000Z
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
from opentelemetry import trace as trace_api
from opentelemetry.sdk import trace
from opentelemetry.sdk.trace import export
from opentelemetry.sdk.trace.export.in_memory_span_exporter import (
InMemorySpanExporter,
)
class TestInMemorySpanExporter(unittest.TestCase):
def setUp(self):
self.tracer_provider = trace.TracerProvider()
self.tracer = self.tracer_provider.get_tracer(__name__)
self.memory_exporter = InMemorySpanExporter()
span_processor = export.SimpleExportSpanProcessor(self.memory_exporter)
self.tracer_provider.add_span_processor(span_processor)
self.exec_scenario()
def exec_scenario(self):
with self.tracer.start_as_current_span("foo"):
with self.tracer.start_as_current_span("bar"):
with self.tracer.start_as_current_span("xxx"):
pass
def test_get_finished_spans(self):
span_list = self.memory_exporter.get_finished_spans()
spans_names_list = [span.name for span in span_list]
self.assertListEqual(["xxx", "bar", "foo"], spans_names_list)
def test_clear(self):
self.memory_exporter.clear()
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 0)
def test_shutdown(self):
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 3)
self.memory_exporter.shutdown()
# after shutdown no new spans are accepted
self.exec_scenario()
span_list = self.memory_exporter.get_finished_spans()
self.assertEqual(len(span_list), 3)
def test_return_code(self):
span = trace.Span("name", mock.Mock(spec=trace_api.SpanContext))
span_list = (span,)
memory_exporter = InMemorySpanExporter()
ret = memory_exporter.export(span_list)
self.assertEqual(ret, export.SpanExportResult.SUCCESS)
memory_exporter.shutdown()
# after shutdown export should fail
ret = memory_exporter.export(span_list)
self.assertEqual(ret, export.SpanExportResult.FAILURE)
| 36.25
| 79
| 0.717604
| 1,903
| 0.690744
| 0
| 0
| 0
| 0
| 0
| 0
| 684
| 0.248276
|
49289bb9e651ac9eb5d083a6d7aaea07feb28a79
| 2,417
|
py
|
Python
|
lib/acli/commands/ec2.py
|
jonhadfield/acli
|
d14abf3ad67bb8cb5ecac93c380544a16eddc7fb
|
[
"MIT"
] | 9
|
2015-10-06T01:33:39.000Z
|
2017-08-23T22:32:50.000Z
|
lib/acli/commands/ec2.py
|
jonhadfield/acli
|
d14abf3ad67bb8cb5ecac93c380544a16eddc7fb
|
[
"MIT"
] | 6
|
2016-05-06T07:30:01.000Z
|
2020-06-22T08:11:40.000Z
|
lib/acli/commands/ec2.py
|
jonhadfield/acli
|
d14abf3ad67bb8cb5ecac93c380544a16eddc7fb
|
[
"MIT"
] | 1
|
2020-06-01T10:44:23.000Z
|
2020-06-01T10:44:23.000Z
|
# -*- coding: utf-8 -*-
"""Usage:
acli ec2 (ls | list | summary) [options] [--region=<region>]
acli ec2 (start | stop | reboot | terminate | info | cpu | vols | net) <instance_id> [options]
-f, --filter=<term> filter results by term
-s, --start=<start_date> metrics start-date
-e, --end=<end_date> metrics end-date
-p, --period=<period> metrics period
-i, --intervals=<intervals> metrics intervals
-h, --help
"""
from __future__ import (absolute_import, print_function, unicode_literals)
from docopt import docopt
from acli.services import (ec2, cloudwatch)
def ec2_command(argv=None, aws_config=None):
ec2_res = docopt(__doc__, argv=argv)
if any((ec2_res.get('ls'), ec2_res.get('list'))):
ec2.ec2_list(aws_config, filter_term=ec2_res.get('--filter'))
elif ec2_res.get('info'):
ec2.ec2_info(aws_config, instance_id=ec2_res.get('<instance_id>'))
elif ec2_res.get('stop'):
ec2.ec2_manage(aws_config, instance_id=ec2_res.get('<instance_id>'), action="stop")
elif ec2_res.get('reboot'):
ec2.ec2_manage(aws_config, instance_id=ec2_res.get('<instance_id>'), action="reboot")
elif ec2_res.get('start'):
ec2.ec2_manage(aws_config, instance_id=ec2_res.get('<instance_id>'), action="start")
elif ec2_res.get('terminate'):
ec2.ec2_manage(aws_config, instance_id=ec2_res.get('<instance_id>'), action="terminate")
elif ec2_res.get('cpu'):
cloudwatch.ec2_cpu(aws_config=aws_config, instance_id=ec2_res.get('<instance_id>'))
elif ec2_res.get('net'):
cloudwatch.ec2_net(aws_config=aws_config,
instance_id=ec2_res.get('<instance_id>'),
start=ec2_res.get('--start'),
period=ec2_res.get('--end'),
intervals=ec2_res.get('intervals')
)
elif ec2_res.get('vols'):
cloudwatch.ec2_vol(aws_config=aws_config,
instance_id=ec2_res.get('<instance_id>'),
start=ec2_res.get('--start'),
period=ec2_res.get('--end'),
intervals=ec2_res.get('intervals')
)
elif ec2_res.get('summary'):
ec2.ec2_summary(aws_config=aws_config)
if __name__ == '__main__':
print(docopt(__doc__))
| 45.603774
| 98
| 0.595366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 787
| 0.32561
|
4929eddd19df20bbf2ed2c78fc54f5d2edf96f51
| 7,630
|
py
|
Python
|
pharedox/gui/gui.py
|
omarvaneer/pharynx_redox
|
ffcd5733fd0823244f50590951e9af0bc9ae2518
|
[
"MIT"
] | 2
|
2018-06-08T12:45:03.000Z
|
2018-07-13T04:17:01.000Z
|
pharedox/gui/gui.py
|
omarvaneer/pharynx_redox
|
ffcd5733fd0823244f50590951e9af0bc9ae2518
|
[
"MIT"
] | 17
|
2020-03-18T11:43:39.000Z
|
2020-07-21T18:04:25.000Z
|
pharedox/gui/gui.py
|
half-adder/pharynx_redox
|
a5b99f6afb4a36a021d0439bb15d2c826de605b1
|
[
"MIT"
] | 3
|
2021-07-21T16:14:28.000Z
|
2021-07-27T15:38:39.000Z
|
"""
A Basic GUI based on napari
"""
import logging
import multiprocessing as mp
import sys
from multiprocessing import Process
from pathlib import Path
from typing import Optional
import matplotlib
import napari
import numpy as np
import xarray as xr
from napari.qt.threading import thread_worker
from PyQt5.QtCore import pyqtSignal
from qtpy.QtWidgets import QMessageBox, QWidget
from skimage import morphology
from skimage.measure import label
from pharedox import experiment, plots, utils
from pharedox.gui.qt_py_files.pipeline_buttons import Ui_Form
# set matplotlib to use headless backend, otherwise it crashes the app when it tries to save
matplotlib.use("agg")
def remove_small_objects(label_data, min_obj_size):
return xr.apply_ufunc(
lambda x: morphology.remove_small_objects(x, min_obj_size),
label_data,
vectorize=True,
input_core_dims=[["y", "x"]],
output_core_dims=[["y", "x"]],
)
def run_experiment(exp_: experiment.Experiment):
exp_.full_pipeline()
@thread_worker
def run_neuron_analysis(exp_: experiment.Experiment,):
exp_.full_pipeline()
class PipelineButtonsWidget(QWidget):
t_slider_changed = pyqtSignal()
def __init__(self, *args, **kwargs):
super(PipelineButtonsWidget, self).__init__(*args, **kwargs)
self.ui = Ui_Form()
self.ui.setupUi(self)
self.ui.thresholdSlider.setMinimum(np.iinfo(np.uint16).min)
self.ui.thresholdSlider.setMaximum(np.iinfo(np.uint16).max)
self.ui.thresholdSlider.valueChanged.connect(self.handle_t_slider_changed)
self.ui.thresholdSpinBox.valueChanged.connect(
self.handle_threshold_spin_box_changed
)
def handle_t_slider_changed(self):
self.ui.thresholdSpinBox.setValue(self.ui.thresholdSlider.value())
self.t_slider_changed.emit()
def handle_threshold_spin_box_changed(self):
self.ui.thresholdSlider.setValue(self.ui.thresholdSpinBox.value())
self.t_slider_changed.emit()
class App:
viewer = None
buttons = None
def __init__(self, exp_):
self.experiment = exp_
def set_up_viewer(self):
self.viewer = napari.Viewer()
self.buttons = PipelineButtonsWidget()
self.viewer.window.add_dock_widget(self.buttons, name="pipeline", area="left")
# connect signals/slots
self.buttons.ui.runNeuronsButton.pressed.connect(self.run_neuron_analysis)
self.buttons.ui.runPharynxButton.pressed.connect(self.run_pharynx_analysis)
self.buttons.t_slider_changed.connect(self.handle_t_slider_changed)
self.buttons.ui.removeObjectsButton.pressed.connect(
self.handle_remove_objects_pressed
)
def run_pharynx_analysis(self):
if self.experiment.seg_images is None:
self.show_simple_dialog("no masks")
else:
p = Process(target=run_experiment, args=(self.experiment,))
p.start()
self.show_simple_dialog("analysis running in background (check log)")
@thread_worker
def run_neuron_analysis(self):
if self.experiment.seg_images is None:
self.show_simple_dialog("no masks")
else:
worker = run_neuron_analysis(self.experiment)
worker.errored.connect(
lambda e: self.show_simple_dialog(f"The pipeline had an error {e}")
)
worker.start()
self.show_simple_dialog("analysis running in background (check log)")
def show_dialog(self, message, title=""):
msg_box = QMessageBox()
msg_box.setIcon(QMessageBox.Information)
msg_box.setText(message)
msg_box.setWindowTitle(title)
msg_box.setStandardButtons(QMessageBox.Open | QMessageBox.Ok)
return_value = msg_box.exec()
if return_value == QMessageBox.Open:
utils.open_folder(self.experiment.analysis_dir)
@staticmethod
def show_simple_dialog(message, title=""):
msg_box = QMessageBox()
msg_box.setIcon(QMessageBox.Information)
msg_box.setText(message)
msg_box.setWindowTitle(title)
msg_box.setStandardButtons(QMessageBox.Ok)
msg_box.exec()
def get_layer(self, name):
for layer in self.viewer.layers:
if layer.name == name:
return layer
return None
def handle_remove_objects_pressed(self):
if self.experiment.seg_images is None:
return
layer = self.get_layer("masks")
min_obj_size = self.buttons.ui.smallObjectSizeSpinBox.value()
self.experiment.seg_images = remove_small_objects(
self.experiment.seg_images, min_obj_size
)
layer.data = self.experiment.seg_images.values
layer.refresh()
def get_current_wvl(self) -> Optional[str]:
"""
Get the wavelength of the active layer in Napari, or `None` if the active layer
name does not correspond to a wavelength in the experiment's images.
"""
wvl_candidate = self.viewer.active_layer.name
true_wvls = self.experiment.images.wavelength.values
if wvl_candidate in true_wvls:
return wvl_candidate
else:
return None
def segment_pharynxes(self, t) -> xr.DataArray:
wvl = self.get_current_wvl()
if wvl is None:
self.show_simple_dialog(
message="The active layer does not correspond to a wavelength in the data set.",
title="Invalid Wavelength Selected",
)
return
masks = self.experiment.images.sel(wavelength=wvl) > t
masks = xr.apply_ufunc(
lambda x: label(x),
masks,
vectorize=True,
input_core_dims=[["y", "x"]],
output_core_dims=[["y", "x"]],
)
return masks
def handle_t_slider_changed(self):
t = self.buttons.ui.thresholdSlider.value()
self.update_threshold(t)
def update_threshold(self, t):
masks = self.segment_pharynxes(t)
if masks is None:
return
self.experiment.seg_images = masks
try:
self.get_layer("masks").data = masks
except AttributeError:
current_layer = self.viewer.active_layer
self.viewer.add_labels(self.experiment.seg_images, name="masks")
self.viewer.active_layer = current_layer
self.get_layer("masks").refresh()
def run(self):
with napari.gui_qt():
self.set_up_viewer()
if self.experiment.images is not None:
self.viewer.add_image(
plots.imgs_to_rgb(
self.experiment.images, r_min=0.9, r_max=1.9, i_max=1500,
),
name="R (population-normalized)",
)
for wvl in self.experiment.images.wavelength.values:
self.viewer.add_image(
self.experiment.images.sel(wavelength=wvl),
name=wvl,
visible=False,
)
if self.experiment.seg_images is not None:
self.viewer.add_labels(
self.experiment.seg_images, name="masks",
)
if __name__ == "__main__":
logging.basicConfig(
format="%(asctime)s %(levelname)s:%(message)s",
level=logging.INFO,
datefmt="%I:%M:%S",
)
exp_dir = sys.argv[1]
exp = experiment.Experiment(Path(exp_dir))
app = App(exp_=exp)
app.run()
| 31.399177
| 96
| 0.636697
| 6,217
| 0.81481
| 0
| 0
| 839
| 0.109961
| 0
| 0
| 740
| 0.096986
|
4929f7cf615e61de5c4f61ef44c5340e9ac4492a
| 3,290
|
py
|
Python
|
python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py
|
QingshuChen/Paddle
|
25a92be3e123ed21fd98c7be6bd7e3a6320756a3
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py
|
QingshuChen/Paddle
|
25a92be3e123ed21fd98c7be6bd7e3a6320756a3
|
[
"Apache-2.0"
] | 9
|
2017-09-13T07:39:31.000Z
|
2017-10-18T05:58:23.000Z
|
python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py
|
QingshuChen/Paddle
|
25a92be3e123ed21fd98c7be6bd7e3a6320756a3
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import paddle.v2 as paddle
import paddle.v2.fluid.core as core
import paddle.v2.fluid.evaluator as evaluator
import paddle.v2.fluid.framework as framework
import paddle.v2.fluid.layers as layers
import paddle.v2.fluid.nets as nets
from paddle.v2.fluid.executor import Executor
from paddle.v2.fluid.optimizer import AdamOptimizer
def convolution_net(input_dim, class_dim=2, emb_dim=32, hid_dim=32):
data = layers.data(name="words", shape=[1], data_type="int64")
label = layers.data(name="label", shape=[1], data_type="int64")
emb = layers.embedding(input=data, size=[input_dim, emb_dim])
conv_3 = nets.sequence_conv_pool(
input=emb,
num_filters=hid_dim,
filter_size=3,
act="tanh",
pool_type="sqrt")
conv_4 = nets.sequence_conv_pool(
input=emb,
num_filters=hid_dim,
filter_size=4,
act="tanh",
pool_type="sqrt")
prediction = layers.fc(input=[conv_3, conv_4],
size=class_dim,
act="softmax")
cost = layers.cross_entropy(input=prediction, label=label)
avg_cost = layers.mean(x=cost)
adam_optimizer = AdamOptimizer(learning_rate=0.002)
opts = adam_optimizer.minimize(avg_cost)
accuracy, acc_out = evaluator.accuracy(input=prediction, label=label)
return avg_cost, accuracy, acc_out
def to_lodtensor(data, place):
seq_lens = [len(seq) for seq in data]
cur_len = 0
lod = [cur_len]
for l in seq_lens:
cur_len += l
lod.append(cur_len)
flattened_data = np.concatenate(data, axis=0).astype("int64")
flattened_data = flattened_data.reshape([len(flattened_data), 1])
res = core.LoDTensor()
res.set(flattened_data, place)
res.set_lod([lod])
return res
def main():
BATCH_SIZE = 100
PASS_NUM = 5
word_dict = paddle.dataset.imdb.word_dict()
dict_dim = len(word_dict)
class_dim = 2
cost, accuracy, acc_out = convolution_net(
input_dim=dict_dim, class_dim=class_dim)
train_data = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.imdb.train(word_dict), buf_size=1000),
batch_size=BATCH_SIZE)
place = core.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
for pass_id in xrange(PASS_NUM):
accuracy.reset(exe)
for data in train_data():
tensor_words = to_lodtensor(map(lambda x: x[0], data), place)
label = np.array(map(lambda x: x[1], data)).astype("int64")
label = label.reshape([BATCH_SIZE, 1])
tensor_label = core.LoDTensor()
tensor_label.set(label, place)
outs = exe.run(framework.default_main_program(),
feed={"words": tensor_words,
"label": tensor_label},
fetch_list=[cost, acc_out])
cost_val = np.array(outs[0])
acc_val = np.array(outs[1])
pass_acc = accuracy.eval(exe)
print("cost=" + str(cost_val) + " acc=" + str(acc_val) +
" pass_acc=" + str(pass_acc))
if cost_val < 1.0 and pass_acc > 0.8:
exit(0)
exit(1)
if __name__ == '__main__':
main()
| 32.254902
| 73
| 0.620669
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 125
| 0.037994
|
492b44bce7d8334fd49f7ee0f9eda198e717c6cb
| 6,847
|
py
|
Python
|
sem.py
|
sree314/simple-abstract-interpreter
|
6445db9ea2c8418ece4ec1436e552fb427d7ae2f
|
[
"CC0-1.0"
] | 3
|
2020-05-04T20:09:30.000Z
|
2022-01-29T20:34:03.000Z
|
sem.py
|
sree314/simple-abstract-interpreter
|
6445db9ea2c8418ece4ec1436e552fb427d7ae2f
|
[
"CC0-1.0"
] | null | null | null |
sem.py
|
sree314/simple-abstract-interpreter
|
6445db9ea2c8418ece4ec1436e552fb427d7ae2f
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
#
# sem.py
#
# An implementation of the concrete semantics, including an
# interpreter
#
# Author: Sreepathi Pai
#
# Written for CSC2/455 Spring 2020
#
# To the extent possible under law, Sreepathi Pai has waived all
# copyright and related or neighboring rights to sem.py. This work
# is published from: United States.
from typing import Dict, List
from tinyast import *
import random
import logging
logger = logging.getLogger(__name__)
# map of variables (here str, instead of Var) -> values
#TODO: we could use var if we defined hash to be on the name of Var?
Memory = Dict[str, int]
def f_binop(op: BinaryOps, left: Scalar, right: Scalar) -> Scalar:
if op == '+':
return left + right
elif op == '-':
return left - right
elif op == '*':
return left * right
elif op == '/':
return left // right
else:
raise NotImplementedError(f"Unknown operator: {op}")
def f_cmpop(op: ComparisonOps, left: Scalar, right: Scalar) -> bool:
if op == '<':
return left < right
elif op == '>':
return left > right
elif op == '<=':
return left <= right
elif op == '>=':
return left >= right
elif op == '!=':
return left != right
else:
raise NotImplementedError(f"Unknown comparison operator: {op}")
def evaluate_Expr(E: Expr, m: Memory) -> Scalar:
if isinstance(E, Scalar):
return E
elif isinstance(E, Var):
return m[E.name]
elif isinstance(E, BinOp):
return f_binop(E.op,
evaluate_Expr(E.left, m),
evaluate_Expr(E.right, m))
def evaluate_BoolExpr(B: BoolExpr, m: Memory) -> bool:
return f_cmpop(B.op, m[B.left.name], B.right)
def filter_memory(B: BoolExpr, M: List[Memory], res = True) -> List[Memory]:
out = [m for m in M if evaluate_BoolExpr(B, m) == res]
return list(out) #TODO: why materialize this generator?
def union_memories(M0: List[Memory], M1: List[Memory]) -> List[Memory]:
# this is, of course, ridiculous
# convert everything to sets
M0_set = set([frozenset(m.items()) for m in M0])
M1_set = set([frozenset(m.items()) for m in M1])
M_set = M0_set.union(M1_set)
# convert back to lists of dicts
return list([dict(m) for m in M_set])
# M is a set of memory states, it belongs to Powerset(Memory)
# We're using List, because set would choke on Dict and we don't have a frozendict type...
def evaluate_Cmd(C: Cmd, M: List[Memory]) -> List[Memory]:
def update_memories(var, value_lambda):
out = []
for m in M:
# not sure using dicts is gaining us anything when we're copying dicts around...
m_out = dict(m)
m_out[var] = value_lambda(m)
out.append(m_out)
return out
if isinstance(C, Skip):
return M
elif isinstance(C, Program):
return evaluate_Cmd(C.program, M)
elif isinstance(C, Assign):
return update_memories(C.left.name, lambda m: evaluate_Expr(C.right, m))
elif isinstance(C, Input):
n = random.randint(0, 100) # could be anything, actually
return update_memories(C.var.name, lambda _: n)
elif isinstance(C, Seq):
return evaluate_Cmd(C.cmd1, evaluate_Cmd(C.cmd0, M))
elif isinstance(C, IfThenElse):
then_memory = evaluate_Cmd(C.then_, filter_memory(C.cond, M))
else_memory = evaluate_Cmd(C.else_, filter_memory(C.cond, M, res = False))
return union_memories(then_memory, else_memory)
elif isinstance(C, While):
# L0 but we apply filter at the end
out = [m for m in M] # copy all input states
# the next loop computes L1, L2, L3, ....
# identify those memories where condition is true
pre_iter_memories = filter_memory(C.cond, out)
accum: List[Memory] = []
while len(pre_iter_memories):
logger.debug(f"pre_iter_memories: {pre_iter_memories}")
after_iter_memories = evaluate_Cmd(C.body, pre_iter_memories)
logger.debug(f"after_iter_memories: {after_iter_memories}")
accum = union_memories(accum, after_iter_memories)
logger.debug(f"accum: {accum}")
# only keep memories where the condition is true for the next iteration
pre_iter_memories = filter_memory(C.cond, after_iter_memories)
# This computes L0 U (L1 U L2...) and retains only those memory states where the loop has
# terminated.
#
# we have exited the loop, so only keep those memories where condition is false
out = filter_memory(C.cond, union_memories(out, accum), res=False)
return out
else:
raise NotImplementedError(f"Don't know how to interpret {type(C).__name__}({C})")
def test_evaluate_Expr():
x = Var('x')
y = Var('y')
m = {'x': 5, 'y': 6}
x1 = BinOp('+', x, y)
ex1 = evaluate_Expr(x1, m)
assert ex1 == 11, ex1
def test_evaluate_BoolExpr():
x = Var('x')
y = Var('y')
m = {'x': 5, 'y': 6}
b1 = BoolExpr('<', x, 6)
eb1 = evaluate_BoolExpr(b1, m)
assert eb1 == True, eb1
def test_evaluate_Cmd():
#TODO: actually put in asserts for testing. Right now, rely on visual inspection...
x = Var('x')
y = Var('y')
m1 = {'x': 5, 'y': 6}
m2 = {'x': 8, 'y': 7}
M_in = [m1, m2]
s = Program(Skip())
M_out = evaluate_Cmd(s, M_in)
print(M_out)
pasgn = Program(Assign(x, 9))
M_out = evaluate_Cmd(pasgn, M_in)
print(M_out)
pinput = Program(Input(y))
M_out = evaluate_Cmd(pinput, M_in)
print(M_out)
pseq = Program(sequence([Assign(x, 10), Assign(y, 11)]))
M_out = evaluate_Cmd(pseq, M_in)
print(M_out)
pite = Program(IfThenElse(BoolExpr('>', x, 7),
Assign(y, BinOp('-', x, 7)),
Assign(y, BinOp('-', 7, x))
)
)
M_out = evaluate_Cmd(pite, M_in)
print(M_out)
ploop = Program(While(BoolExpr('<', x, 7),
Seq(Assign(y, BinOp('-', y, 1)),
Assign(x, BinOp('+', x, 1)))
))
M_out = evaluate_Cmd(ploop, M_in)
print(M_in, M_out)
def test_While():
x = Var('x')
y = Var('y')
m1 = {x.name: 4, y.name: 0}
m2 = {x.name: 8, y.name: 0}
m3 = {x.name: 5, y.name: 0}
M_in = [m1, m2, m3]
print(M_in)
p = Program(While(BoolExpr('<', x, 7),
Seq(Assign(y, BinOp('+', y, 1)),
Assign(x, BinOp('+', x, 1)))))
print(p)
M_out = evaluate_Cmd(p, M_in)
print(M_out)
if __name__ == "__main__":
logging.basicConfig(level = logging.DEBUG)
test_evaluate_Expr()
test_evaluate_BoolExpr()
test_evaluate_Cmd()
test_While()
| 30.162996
| 97
| 0.590916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,665
| 0.243172
|
492e98f0563190adbf1aba90431f35623fa73162
| 4,925
|
py
|
Python
|
python_script/neutron.py
|
namptit307/openstack_upgrade_test
|
a6a4ce57a931ce6959f85b7bafa95e10a0d0ed52
|
[
"MIT"
] | 1
|
2018-06-26T03:37:17.000Z
|
2018-06-26T03:37:17.000Z
|
python_script/neutron.py
|
namptit307/openstack_upgrade_test
|
a6a4ce57a931ce6959f85b7bafa95e10a0d0ed52
|
[
"MIT"
] | null | null | null |
python_script/neutron.py
|
namptit307/openstack_upgrade_test
|
a6a4ce57a931ce6959f85b7bafa95e10a0d0ed52
|
[
"MIT"
] | null | null | null |
import json
from requests import ConnectionError
from config import *
from utils import *
from get_auth import TOKEN
# Create network
create_network_url = "http://{}:9696/v2.0/networks".format(IP)
token_headers = {
'X-Auth-Token': TOKEN,
'Content-Type': 'application/json'
}
# Create router
create_router_url = "http://{}:9696/v2.0/routers".format(IP)
# Get network for DELETE
get_network_list_url = create_network_url
future = send_request(get_network_list_url, 'GET', headers=token_headers)
result = future.result().content
result = json.loads(result)
list_networks = result.get("networks")
list_networks = [network for network in list_networks if "testing" in network.get('name')]
# Get routers for DELETE
get_router_list_url = create_router_url
future = send_request(get_router_list_url, 'GET', headers=token_headers)
result = future.result().content
result = json.loads(result)
list_routers = result.get("routers")
list_routers = [router for router in list_routers if "testing" in router.get('name')]
# Update network
# We should have separate network for updating --> ensure have network for update, that is.
NETWORK_ID = "f6e3556e-29ab-4ee7-ba64-7fab0c423e26"
# Update router
# We should have separate router for updating --> ensure have router for update, that is.
ROUTER_ID = "b0e19990-d9ba-4981-9da7-5aeec2957c77"
if __name__ == '__main__':
i = 1
while continue_test:
time.sleep(0.3)
try:
# Create network
create_network_data = {
"network": {
"name": "new_network_{}".format(i)
}
}
i += 1
future = send_request(create_network_url, 'POST',
headers=token_headers,
data=json.JSONEncoder().encode(create_network_data))
try:
result = future.result().content
result = json.loads(result)
network = result.get('network')
if type(network) is dict:
network_id = result['network']['id']
create_subnet_data = {
"subnet": {
"network_id": network_id,
"ip_version": 4,
"cidr": "192.168.199.0/24"
}
}
create_subnet_url = "http://{}:9696/v2.0/subnets".format(IP)
send_request(create_subnet_url, 'POST',
headers=token_headers,
data=json.JSONEncoder().encode(create_subnet_data))
except:
pass
# Get and delete network
if not (len(list_networks) == 0):
delete_network = list_networks.pop()
delete_network_id = delete_network.get("id")
get_network_url = "http://{}:9696/v2.0/networks/{}".format(IP, delete_network_id)
send_request(get_network_url, 'GET', headers=token_headers)
send_request(get_network_url, 'DELETE', headers=token_headers)
# Update network name
update_network_data = {
"network": {
"name": "new_name_{}".format(i)
}
}
update_network_url = "http://{}:9696/v2.0/networks/{}".format(IP, NETWORK_ID)
send_request(update_network_url, 'PUT',
headers=token_headers,
data=json.JSONEncoder().encode(update_network_data))
# Create router
create_router_data = {
"router": {
"name": "new_router_{}".format(i)
}
}
future = send_request(create_router_url, 'POST',
headers=token_headers,
data=json.JSONEncoder().encode(create_router_data))
# Get and delete network
if not (len(list_routers) == 0):
delete_router = list_routers.pop()
delete_router_id = delete_router.get("id")
get_router_url = "http://{}:9696/v2.0/routers/{}".format(IP, delete_router_id)
send_request(get_router_url, 'GET', headers=token_headers)
send_request(get_router_url, 'DELETE', headers=token_headers)
# Update router name
update_router_data = {
"router": {
"name": "new_name_{}".format(i)
}
}
update_router_url = "http://{}:9696/v2.0/routers/{}".format(IP, ROUTER_ID)
send_request(update_router_url, 'PUT',
headers=token_headers,
data=json.JSONEncoder().encode(update_router_data))
except ConnectionError:
pass
| 39.087302
| 97
| 0.552081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,074
| 0.218071
|
492f40da8141b4f392fd82f2242b755c0bb7c8b7
| 1,406
|
py
|
Python
|
app/view/forms.py
|
weizy1981/WatsonRobot
|
bb718a589a8f2d394fbc86582bff29c1015e79fc
|
[
"Apache-2.0"
] | null | null | null |
app/view/forms.py
|
weizy1981/WatsonRobot
|
bb718a589a8f2d394fbc86582bff29c1015e79fc
|
[
"Apache-2.0"
] | null | null | null |
app/view/forms.py
|
weizy1981/WatsonRobot
|
bb718a589a8f2d394fbc86582bff29c1015e79fc
|
[
"Apache-2.0"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, FileField
from wtforms.validators import DataRequired
from app.view import errormessage
class LoginForm(FlaskForm):
username = StringField(label='username', validators=[DataRequired(errormessage.LOGIN002)])
password = PasswordField(label='password', validators=[DataRequired(errormessage.LOGIN003)])
submit = SubmitField(label='GO')
class FileForm(FlaskForm):
file = FileField(label='File')
submit = SubmitField(label='GO')
class QuestionForm(FlaskForm):
question = StringField(label='Question',
validators=[DataRequired(errormessage.ONLINE_SERVICE001)])
submit = SubmitField(label='GO')
class QuerySNForm(FlaskForm):
customer_id = StringField(label='Customer_Id', validators=[DataRequired(errormessage.QUERY_SN001)])
submit = SubmitField(label='GO')
action = ''
class CustomerForm(FlaskForm):
customer_id = StringField(label='Customer_Id', validators=[DataRequired(errormessage.CUSTOMER_INPUT001)])
name = StringField(label='Name', validators=[DataRequired(errormessage.CUSTOMER_INPUT002)])
age = StringField(label='Age', validators=[DataRequired(errormessage.CUSTOMER_INPUT003)])
sex = StringField(label='Sex', validators=[DataRequired(errormessage.CUSTOMER_INPUT004)])
submit = SubmitField(label='GO')
action = ''
| 43.9375
| 109
| 0.753201
| 1,215
| 0.864154
| 0
| 0
| 0
| 0
| 0
| 0
| 102
| 0.072546
|
492f4214ee6f18acfc6f5dd9c2a40c4761fa8d61
| 2,519
|
py
|
Python
|
setup.py
|
alexweav/nisystemlink-clients-python
|
f19a30907a7fef536043ecbddc5a755e5fedf846
|
[
"MIT"
] | null | null | null |
setup.py
|
alexweav/nisystemlink-clients-python
|
f19a30907a7fef536043ecbddc5a755e5fedf846
|
[
"MIT"
] | null | null | null |
setup.py
|
alexweav/nisystemlink-clients-python
|
f19a30907a7fef536043ecbddc5a755e5fedf846
|
[
"MIT"
] | null | null | null |
from setuptools import find_namespace_packages, find_packages, setup # type: ignore
from setuptools.command.test import test as TestCommand # type: ignore
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest # type: ignore
pytest.main(self.test_args)
pypi_name = "nisystemlink-clients"
packages = find_namespace_packages(include=["systemlink.*"]) + find_packages(
exclude=["systemlink", "examples", "tests"]
)
def _get_version(name):
import os
version = None
script_dir = os.path.dirname(os.path.realpath(__file__))
script_dir = os.path.join(script_dir, name)
if not os.path.exists(os.path.join(script_dir, "VERSION")):
version = "0.1.3"
else:
with open(os.path.join(script_dir, "VERSION"), "r") as version_file:
version = version_file.read().rstrip()
return version
def _read_contents(file_to_read):
with open(file_to_read, "r") as f:
return f.read()
setup(
name=pypi_name,
version=_get_version(pypi_name),
description="NI-SystemLink Python API",
long_description=_read_contents("README.rst"),
author="National Instruments",
maintainer="Paul Spangler, Alex Weaver",
maintainer_email="paul.spangler@ni.com, alex.weaver@ni.com",
keywords=["nisystemlink", "systemlink"],
license="MIT",
packages=packages,
install_requires=[
'aenum;python_version<"3.6"',
"events",
'httpx;python_version>="3.6"',
'requests;python_version<"3.6"',
"typing-extensions",
],
tests_require=["pytest", "pytest-asyncio", "mypy"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Manufacturing",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: System :: Hardware :: Hardware Drivers",
],
cmdclass={"test": PyTest},
package_data={"": ["VERSION", "*.pyi", "py.typed"]},
)
| 31.4875
| 84
| 0.639143
| 261
| 0.103613
| 0
| 0
| 0
| 0
| 0
| 0
| 1,006
| 0.399365
|
492f9041b41552075a47cd3267bbbce97f7550d5
| 13,534
|
py
|
Python
|
shell_ui/__init__.py
|
conducto/conducto
|
b480780905f5a25e8c803b60ca7cdf6976ce5ef6
|
[
"Apache-2.0"
] | 25
|
2020-05-07T22:51:11.000Z
|
2021-11-17T16:14:42.000Z
|
shell_ui/__init__.py
|
conducto/conducto
|
b480780905f5a25e8c803b60ca7cdf6976ce5ef6
|
[
"Apache-2.0"
] | 3
|
2020-04-21T06:38:02.000Z
|
2020-05-31T01:57:19.000Z
|
shell_ui/__init__.py
|
conducto/conducto
|
b480780905f5a25e8c803b60ca7cdf6976ce5ef6
|
[
"Apache-2.0"
] | 2
|
2020-05-14T01:47:32.000Z
|
2020-06-03T21:58:12.000Z
|
import os
import sys
import signal
import asyncio
import json
import time
import traceback
import typing
import socket
import re
import select
import websockets
if sys.platform != "win32":
import termios
import tty
else:
import msvcrt
import win32api
from .. import api
from ..shared import constants, log, types as t
from ..shared.constants import State
import conducto.internal.host_detection as hostdet
if sys.version_info < (3, 7):
# create_task is stdlib in 3.7, but we can declare it as a synonym for the
# 3.6 ensure_future
asyncio.create_task = asyncio.ensure_future
STATE_TO_COLOR = {
State.PENDING: log.Color.TRUEWHITE,
State.QUEUED: log.Color.GRAY,
State.RUNNING: log.Color.BLUE,
State.DONE: log.Color.GREEN,
State.ERROR: log.Color.RED,
State.WORKER_ERROR: log.Color.PURPLE,
}
class Listener(object):
def update_node(self, name, data):
pass
async def background_task(self, title):
pass
async def key_press(self, char):
# Listeners are passed the quit_func so that they can decide when to exit
pass
def render(self):
pass
def shutdown(self):
pass
def connect(token: t.Token, pipeline_id: t.PipelineId, starthelp: str):
pipeline = api.Pipeline().get(pipeline_id, token=token)
ui = ShellUI(token, pipeline, starthelp)
if sys.platform == "win32":
win32api.SetConsoleCtrlHandler(ui.ctrl_c, True)
try:
asyncio.get_event_loop().run_until_complete(ui.run())
except Exception:
ui.reset_stdin()
traceback.print_exc()
class ShellUI(object):
def __init__(self, token, pipeline: dict, starthelp: str):
self.pipeline = pipeline
self.quitting = False
self.loop = asyncio.get_event_loop()
self.gw_socket = None
self.start_func_complete = None
self.starthelp = starthelp
from . import one_line, full_screen
self.listeners: typing.List[Listener] = [one_line.OneLineDisplay(self)]
@property
def allow_sleep(self):
# TODO: This is an ugly time-out to avoid shutting down the shell UI
# because the NS cache still believes the pipeline is sleeping.
return self.start_func_complete and time.time() > self.start_func_complete + 3.0
async def view_loop(self):
"""
Every 0.25 seconds render the pipeline
"""
log.info("[view] starting")
while True:
await asyncio.sleep(0.25)
for listener in self.listeners:
listener.render()
def set_gw(self, gw_socket):
self.gw_socket = gw_socket
async def wait_gw(self):
while self.gw_socket is None:
await asyncio.sleep(0.1)
async def start_pipeline(self):
if self.gw_socket is None:
pipeline_id = self.pipeline["pipeline_id"]
api.Manager().launch(pipeline_id)
await self.wait_gw()
payload = {"type": "SET_AUTORUN", "payload": {"value": True}}
await self.gw_socket.send(json.dumps(payload))
async def sleep_pipeline(self):
if self.gw_socket is None:
pipeline_id = self.pipeline["pipeline_id"]
api.Pipeline().sleep_standby(pipeline_id)
else:
payload = {"type": "CLOSE_PROGRAM", "payload": None}
await self.gw_socket.send(json.dumps(payload))
async def reset(self):
if self.gw_socket is None:
pipeline_id = self.pipeline["pipeline_id"]
api.Manager().launch(pipeline_id)
await self.wait_gw()
payload = {"type": "RESET", "payload": ["/"]}
await self.gw_socket.send(json.dumps(payload))
async def gw_socket_loop(self):
"""
Loop and listen for socket messages
"""
start_tasks = await self.run_start_func()
pl = constants.PipelineLifecycle
while True:
if (
self.pipeline is None
or self.pipeline.get("status", None) not in pl.active
):
await asyncio.sleep(0.5)
continue
if start_tasks is not None:
tasks = start_tasks
# we clear the start_tasks now since later reconnects should
# show reconnecting.
start_tasks = None
else:
msg = "Connection lost. Reconnecting"
pretasks = [xx.background_task(msg) for xx in self.listeners]
tasks = [asyncio.create_task(task) for task in pretasks]
try:
websocket = await api.connect_to_pipeline(self.pipeline["pipeline_id"])
except PermissionError:
print()
print("You are not permitted to connect to this pipeline.")
self.quit()
break
except ConnectionError:
self.quit()
break
for task in tasks:
task.cancel()
for listener in self.listeners:
listener.install_normal_key_mode()
self.set_gw(websocket)
was_slept = False
try:
await websocket.send(
json.dumps({"type": "RENDER_NODE", "payload": "/"})
)
log.info("[gw_socket_loop] starting")
async for msg_text in websocket:
msg = json.loads(msg_text)
if msg["type"] in ("NODES_STATE_UPDATE", "RENDER_NODE"):
log.debug(f"incoming gw message {msg['type']}")
for name, data in msg["payload"].items():
for listener in self.listeners:
listener.update_node(name, data)
elif msg["type"] == "SLEEP":
was_slept = True
# we are done here, do not try to reconnect.
break
except websockets.ConnectionClosedError as e:
log.debug(f"ConnectionClosedError {e.code} {e.reason}")
self.set_gw(None)
if was_slept:
break
def get_ns_url(self):
url = api.Config().get_url()
url = re.sub("^http", "ws", url) + "/ns/"
return url
async def reconnect_ns(self):
ns_url = self.get_ns_url()
log.debug("[run] Connecting to", ns_url)
header = {"Authorization": f"bearer {api.Config().get_token(refresh=False)}"}
# we retry connection for roughly 2 minutes
for i in range(45):
try:
websocket = await websockets.connect(ns_url, extra_headers=header)
break
except (
websockets.ConnectionClosedError,
websockets.InvalidStatusCode,
socket.gaierror,
):
log.debug(f"cannot connect to ns ... waiting {i}")
await asyncio.sleep(min(3.0, (2 ** i) / 8))
else:
self.quit()
return None
log.debug("[run] ns Connected")
return websocket
async def ns_socket_loop(self):
"""
Loop and listen for socket messages
"""
while True:
msg = "Connection lost. Reconnecting"
if self.start_func_complete is not None:
pretasks = [xx.background_task(msg) for xx in self.listeners]
else:
pretasks = []
tasks = [asyncio.create_task(task) for task in pretasks]
websocket = await self.reconnect_ns()
for task in tasks:
task.cancel()
if websocket is None:
if self.start_func_complete is not None:
for listener in self.listeners:
listener.install_disconnect_mode()
self.quit()
break
if self.start_func_complete is not None:
for listener in self.listeners:
listener.install_normal_key_mode()
subscribe = {
"type": "SUBSCRIBE",
"payload": {"pipeline_id": self.pipeline["pipeline_id"]},
}
await websocket.send(json.dumps(subscribe))
try:
log.info("[ns_socket_loop] starting")
async for msg_text in websocket:
msg = json.loads(msg_text)
if msg["type"] in ("FULL_INFO_UPDATE",):
log.debug(f"incoming ns message {msg['type']}")
progs = msg["payload"]["programIdToInfo"]
try:
self.pipeline = progs[self.pipeline["pipeline_id"]]
except KeyError:
# TODO: the NS cache may not yet have the pipeline,
# this is to allow for that.
if self.allow_sleep:
raise
else:
continue
if "state" not in self.pipeline["meta"]:
self.pipeline["meta"] = {
"state": "pending",
"stateCounts": {x: 0 for x in STATE_TO_COLOR.keys()},
}
pl = constants.PipelineLifecycle
if self.pipeline["status"] in pl.sleeping and self.allow_sleep:
self.quit()
elif self.pipeline["status"] not in pl.active:
for listener in self.listeners:
listener.update_node("/", self.pipeline["meta"])
except websockets.ConnectionClosedError:
pass
def ctrl_c(self, a, b=None):
# This is the windows control C handler
self.quit()
return True
async def key_loop(self):
"""
Loop and listen for key inputs
"""
log.info("[key_loop] starting")
if sys.platform != "win32":
self.old_settings = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(sys.stdin.fileno())
async for char in stream_as_char_generator(self.loop, sys.stdin):
if ord(char) in (3, 4):
# Ctrl+c (sigint) & Ctrl+d (eof) get captured as a non-printing
# characters with ASCII code 3 & 4 respectively. Quit
# gracefully.
self.quit()
elif ord(char) == 26:
# Ctrl+z gets captured as a non-printing character with ASCII
# code 26. Send SIGSTOP and reset the terminal.
self.reset_stdin()
os.kill(os.getpid(), signal.SIGSTOP)
if sys.platform != "win32":
self.old_settings = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(sys.stdin.fileno())
for listener in self.listeners:
await listener.key_press(char)
self.reset_stdin()
def reset_stdin(self):
if hasattr(self, "old_settings"):
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, self.old_settings)
async def run_start_func(self):
pretasks = [
xx.background_task(self.starthelp, immediate=True) for xx in self.listeners
]
tasks = [asyncio.create_task(task) for task in pretasks]
self.start_func_complete = time.time()
return tasks
async def run(self):
# Start all the loops. The view and socket loops are nonblocking The
# key_loop needs to be run separately because it blocks on user input
tasks = [
self.loop.create_task(self.view_loop()),
self.loop.create_task(self.gw_socket_loop()),
self.loop.create_task(self.ns_socket_loop()),
self.loop.create_task(self.key_loop()),
]
# Wait on all of them. The `gather` variable can be cancelled in
# `key_task()` if the user Ctrl+c's, which will cause the other loops
# to be cancelled gracefully.
self.gather_handle = asyncio.gather(*tasks)
try:
await self.gather_handle
except asyncio.CancelledError:
return
except websockets.ConnectionClosedError:
self.reset_stdin()
return
else:
log.error("gather_handle returned but it shouldn't have!")
raise Exception("gather_handle returned but it shouldn't have!")
finally:
for listener in self.listeners:
listener.shutdown()
def disconnect(self):
self.quit()
def quit(self):
"""
Make all event loops quit
"""
self.reset_stdin()
self.quitting = True
self.gather_handle.cancel()
def stdin_data():
return select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], [])
async def stream_as_char_generator(loop, stream):
if sys.platform != "win32":
has_key = stdin_data
read_key = lambda: stream.read(1)
else:
has_key = msvcrt.kbhit
read_key = lambda: msvcrt.getch().decode("ascii")
while True:
await asyncio.sleep(0.05)
if has_key():
char = read_key()
if not char: # EOF.
break
yield char
| 33.417284
| 88
| 0.547362
| 11,737
| 0.867223
| 432
| 0.03192
| 275
| 0.020319
| 10,537
| 0.778558
| 2,472
| 0.182651
|
492f97e1b2f70e9fa3789d450a9a566094a9d2fe
| 8,772
|
py
|
Python
|
processing_components/simulation/ionospheric_screen.py
|
cnwangfeng/algorithm-reference-library
|
9605eb01652fbfcb9ff003cc12b44c84093b7fb1
|
[
"Apache-2.0"
] | null | null | null |
processing_components/simulation/ionospheric_screen.py
|
cnwangfeng/algorithm-reference-library
|
9605eb01652fbfcb9ff003cc12b44c84093b7fb1
|
[
"Apache-2.0"
] | null | null | null |
processing_components/simulation/ionospheric_screen.py
|
cnwangfeng/algorithm-reference-library
|
9605eb01652fbfcb9ff003cc12b44c84093b7fb1
|
[
"Apache-2.0"
] | null | null | null |
""" Functions for ionospheric modelling: see SDP memo 97
"""
import astropy.units as u
import numpy
from astropy.coordinates import SkyCoord
from data_models.memory_data_models import BlockVisibility
from processing_components.calibration.operations import create_gaintable_from_blockvisibility, \
create_gaintable_from_rows
from processing_components.calibration.iterators import gaintable_timeslice_iter
from processing_components.image.operations import copy_image, create_empty_image_like
from processing_components.visibility.base import create_visibility_from_rows
from processing_components.visibility.iterators import vis_timeslice_iter
from processing_library.util.coordinate_support import xyz_to_uvw, skycoord_to_lmn
import logging
log = logging.getLogger(__name__)
def find_pierce_points(station_locations, ha, dec, phasecentre, height):
"""Find the pierce points for a flat screen at specified height
:param station_locations: All station locations [:3]
:param ha: Hour angle
:param dec: Declination
:param phasecentre: Phase centre
:param height: Height of screen
:return:
"""
source_direction = SkyCoord(ra=ha, dec=dec, frame='icrs', equinox='J2000')
local_locations = xyz_to_uvw(station_locations, ha, dec)
local_locations -= numpy.average(local_locations, axis=0)
lmn = numpy.array(skycoord_to_lmn(source_direction, phasecentre))
lmn[2] += 1.0
pierce_points = local_locations + height * numpy.array(lmn)
return pierce_points
def create_gaintable_from_screen(vis, sc, screen, height=3e5, vis_slices=None, scale=1.0, **kwargs):
""" Create gaintables from a screen calculated using ARatmospy
:param vis:
:param sc: Sky components for which pierce points are needed
:param screen:
:param height: Height (in m) of screen above telescope e.g. 3e5
:param scale: Multiply the screen by this factor
:return:
"""
assert isinstance(vis, BlockVisibility)
station_locations = vis.configuration.xyz
nant = station_locations.shape[0]
t2r = numpy.pi / 43200.0
gaintables = [create_gaintable_from_blockvisibility(vis, **kwargs) for i in sc]
# The time in the Visibility is hour angle in seconds!
for iha, rows in enumerate(vis_timeslice_iter(vis, vis_slices=vis_slices)):
v = create_visibility_from_rows(vis, rows)
ha = numpy.average(v.time)
number_bad = 0
number_good = 0
for icomp, comp in enumerate(sc):
pp = find_pierce_points(station_locations, (comp.direction.ra.rad + t2r * ha) * u.rad, comp.direction.dec,
height=height, phasecentre=vis.phasecentre)
scr = numpy.zeros([nant])
for ant in range(nant):
pp0 = pp[ant][0:2]
worldloc = [pp0[0], pp0[1], ha, 1e8]
try:
pixloc = screen.wcs.wcs_world2pix([worldloc], 0)[0].astype('int')
scr[ant] = scale * screen.data[pixloc[3], pixloc[2], pixloc[1], pixloc[0]]
number_good += 1
except:
number_bad += 1
scr[ant] = 0.0
gaintables[icomp].gain[iha, :, :, :] = numpy.exp(1j * scr[:, numpy.newaxis, numpy.newaxis, numpy.newaxis])
gaintables[icomp].phasecentre = comp.direction
if number_bad > 0:
log.warning("create_gaintable_from_screen: %d pierce points are inside the screen image" % (number_good))
log.warning("create_gaintable_from_screen: %d pierce points are outside the screen image" % (number_bad))
return gaintables
def grid_gaintable_to_screen(vis, gaintables, screen, height=3e5, gaintable_slices=None, scale=1.0, **kwargs):
""" Grid a gaintable to a screen image
The phases are just average per grid cell, no phase unwrapping is performed.
:param vis:
:param sc: Sky components for which pierce points are needed
:param screen:
:param height: Height (in m) of screen above telescope e.g. 3e5
:param scale: Multiply the screen by this factor
:return: gridded screen image, weights image
"""
assert isinstance(vis, BlockVisibility)
station_locations = vis.configuration.xyz
nant = station_locations.shape[0]
t2r = numpy.pi / 43200.0
newscreen = create_empty_image_like(screen)
weights = create_empty_image_like(screen)
nchan, ntimes, ny, nx = screen.shape
# The time in the Visibility is hour angle in seconds!
number_no_weight = 0
for gaintable in gaintables:
for iha, rows in enumerate(gaintable_timeslice_iter(gaintable, gaintable_slices=gaintable_slices)):
gt = create_gaintable_from_rows(gaintable, rows)
ha = numpy.average(gt.time)
pp = find_pierce_points(station_locations,
(gt.phasecentre.ra.rad + t2r * ha) * u.rad,
gt.phasecentre.dec,
height=height,
phasecentre=vis.phasecentre)
scr = numpy.angle(gt.gain[0, :, 0, 0, 0])
wt = gt.weight[0, :, 0, 0, 0]
for ant in range(nant):
pp0 = pp[ant][0:2]
worldloc = [pp0[0], pp0[1], ha, 1e8]
pixloc = newscreen.wcs.wcs_world2pix([worldloc], 0)[0].astype('int')
assert pixloc[0] >= 0
assert pixloc[0] < nx
assert pixloc[1] >= 0
assert pixloc[1] < ny
newscreen.data[pixloc[3], pixloc[2], pixloc[1], pixloc[0]] += wt[ant] * scr[ant]
weights.data[pixloc[3], pixloc[2], pixloc[1], pixloc[0]] += wt[ant]
if wt[ant] == 0.0:
number_no_weight += 1
if number_no_weight > 0:
print("grid_gaintable_to_screen: %d pierce points are have no weight" % (number_no_weight))
log.warning("grid_gaintable_to_screen: %d pierce points are have no weight" % (number_no_weight))
newscreen.data[weights.data > 0.0] = newscreen.data[weights.data > 0.0] / weights.data[weights.data > 0.0]
return newscreen, weights
def calculate_sf_from_screen(screen):
""" Calculate structure function image from screen
Screen axes are ['XX', 'YY', 'TIME', 'FREQ']
:param screen:
:return:
"""
from scipy.signal import fftconvolve
nchan, ntimes, ny, nx = screen.data.shape
sf = numpy.zeros([nchan, 1, 2 * ny - 1, 2 * nx - 1])
for chan in range(nchan):
sf[chan, 0, ...] = fftconvolve(screen.data[chan, 0, ...], screen.data[chan, 0, ::-1, ::-1])
for itime in range(ntimes):
sf += fftconvolve(screen.data[chan, itime, ...], screen.data[chan, itime, ::-1, ::-1])
sf[chan, 0, ...] /= numpy.max(sf[chan, 0, ...])
sf[chan, 0, ...] = 1.0 - sf[chan, 0, ...]
sf_image = copy_image(screen)
sf_image.data = sf[:, :, (ny - ny // 4):(ny + ny // 4), (nx - nx // 4):(nx + nx // 4)]
sf_image.wcs.wcs.crpix[0] = ny // 4 + 1
sf_image.wcs.wcs.crpix[1] = ny // 4 + 1
sf_image.wcs.wcs.crpix[2] = 1
return sf_image
def plot_gaintable_on_screen(vis, gaintables, height=3e5, gaintable_slices=None, plotfile=None):
""" Plot a gaintable on an ionospheric screen
:param vis:
:param sc: Sky components for which pierce points are needed
:param height: Height (in m) of screen above telescope e.g. 3e5
:param scale: Multiply the screen by this factor
:return: gridded screen image, weights image
"""
import matplotlib.pyplot as plt
assert isinstance(vis, BlockVisibility)
station_locations = vis.configuration.xyz
t2r = numpy.pi / 43200.0
# The time in the Visibility is hour angle in seconds!
plt.clf()
for gaintable in gaintables:
for iha, rows in enumerate(gaintable_timeslice_iter(gaintable, gaintable_slices=gaintable_slices)):
gt = create_gaintable_from_rows(gaintable, rows)
ha = numpy.average(gt.time)
pp = find_pierce_points(station_locations,
(gt.phasecentre.ra.rad + t2r * ha) * u.rad,
gt.phasecentre.dec,
height=height,
phasecentre=vis.phasecentre)
phases = numpy.angle(gt.gain[0, :, 0, 0, 0])
plt.scatter(pp[:,0],pp[:,1], c=phases, cmap='hsv', alpha=0.75, s=0.1)
plt.title('Pierce point phases')
plt.xlabel('X (m)')
plt.ylabel('Y (m)')
if plotfile is not None:
plt.savefig(plotfile)
plt.show()
| 40.611111
| 118
| 0.621181
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,991
| 0.226972
|
4930f596737a00ba148f8145ee070ebfb4b9133d
| 7,311
|
py
|
Python
|
build.py
|
Slaals/narval
|
3b811fb3854760a34a3875b35bd6088d4299ce8f
|
[
"CNRI-Python"
] | null | null | null |
build.py
|
Slaals/narval
|
3b811fb3854760a34a3875b35bd6088d4299ce8f
|
[
"CNRI-Python"
] | null | null | null |
build.py
|
Slaals/narval
|
3b811fb3854760a34a3875b35bd6088d4299ce8f
|
[
"CNRI-Python"
] | null | null | null |
'''
This file is part of Narval :
an opensource and free rights static blog generator.
'''
#!/usr/bin/python3
#-*- coding: utf-8 -*-
import os, filecmp, random, webbrowser
from shutil import copyfile
params = __import__('config')
helpers = __import__('helpers')
tmps = __import__('template')
def build(params, helpers, tmps, isLocal=False):
if isLocal == True:
params.folder = params.folder + '-local'
params.blogUrl = os.path.abspath('') + '/' + params.folder
pathPosts, pathPages, myPosts, myPages, nbPosts, catsList = 'posts/', 'pages/', [], [], 0, []
### Build vars (categories, myPosts, myPages) using the source files
# posts folder
listing = os.listdir(pathPosts)
for infile in listing:
fileNum = int(infile.split('.')[0])
with open(os.path.join(pathPosts, infile), 'r') as post:
myPost = {'title': '', 'date': '', 'cats': '', 'intro': '', 'desc': '', 'author': '', 'content': '', 'id': fileNum, 'titleHead': '', 'thumb': ''}
while 1:
line = post.readline().rstrip('\n\r')
if line != '---':
lineElts = line.split()
if lineElts[0] == 'title': myPost['title'] = ' '.join(lineElts[2:])
elif lineElts[0] == 'date': myPost['date'] = ' '.join(lineElts[2:])
elif lineElts[0] == 'cats': myPost['cats'] = ' '.join(lineElts[2:]).split('+')
elif lineElts[0] == 'intro': myPost['intro'] = ' '.join(lineElts[2:])
elif lineElts[0] == 'author': myPost['author'] = ' '.join(lineElts[2:])
elif lineElts[0] == 'desc': myPost['desc'] = ' '.join(lineElts[2:])
elif lineElts[0] == 'thumb': myPost['thumb'] = ' '.join(lineElts[2:])
else:
break
myPost['content'] = post.read()
myPosts.append(myPost)
for cat in myPost['cats']: catsList.append(cat)
myPosts = sorted(myPosts, key=lambda k: k['id'], reverse=True)
catsList = sorted(set(catsList))
catsList2 = []
for cat in catsList: catsList2.append({"name": cat, "path": helpers.niceURL(cat, '/')})
categories = tuple(catsList2)
for post in myPosts:
myList = []
for cat in post['cats']:
match = next((c for c in categories if c['name'] == cat), None)
myList.append(match)
post['cats'] = tuple(myList)
# pages folder
listing = os.listdir(pathPages)
for infile in listing:
with open(os.path.join(pathPages, infile), 'r') as page:
myPage = {'title': '', 'desc': '', 'content': ''}
while 1:
line = page.readline().rstrip('\n\r')
if line != '---':
lineElts = line.split()
if lineElts[0] == 'title': myPage['title'] = ' '.join(lineElts[2:])
elif lineElts[0] == 'desc': myPage['desc'] = ' '.join(lineElts[2:])
else:
break
myPage['content'] = page.read()
myPages.append(myPage)
header = tmps.TMP_header(params, helpers, tmps, categories, myPages, 0) #0 pour une liste
footer = tmps.TMP_footer(params, helpers, tmps)
### Build tree (folders & files)
def copyFolderFiles(src, dest): # source folder, destination folder
srcFiles = os.listdir(src) # all files in source folder
for srcFile in srcFiles:
if os.path.isfile(dest + srcFile) == True:
# exists
if filecmp.cmp(src + srcFile, dest + srcFile) == False:
# not same files
copyfile(src + srcFile, dest + srcFile)
else:
# not exists
copyfile(src + srcFile, dest + srcFile)
# clean folder (remove all files who isn't in source folder)
destFiles = os.listdir(dest)
for destFile in destFiles:
if os.path.isfile(src + destFile) == False:
os.remove(dest + destFile)
# si le dossier du blog généré n'existe pas, on le crée
if os.path.isdir(params.folder) == False: os.mkdir(params.folder)
if os.path.isdir(params.folder + '/posts') == False: os.mkdir(params.folder + '/posts')
if os.path.isdir(params.folder + '/theme') == False: os.mkdir(params.folder + '/theme')
if os.path.isdir(params.folder + '/cats') == False: os.mkdir(params.folder + '/cats')
if os.path.isdir(params.folder + '/medias') == False: os.mkdir(params.folder + '/medias')
if os.path.isdir(params.folder + '/thumbs') == False: os.mkdir(params.folder + '/thumbs')
with open(params.folder + '/RSSfeed.xml', 'w') as rssRender:
rssRender.write(tmps.TMP_rss(params, helpers, tmps, myPosts))
with open(params.folder + '/README.md', 'w') as rmRender:
rmRender.write(tmps.TMP_readme(params, helpers, tmps, myPosts))
copyFolderFiles('theme/', params.folder + '/theme/')
copyFolderFiles('medias/', params.folder + '/medias/')
copyFolderFiles('thumbs/', params.folder + '/thumbs/')
copyfile('favicon.ico', params.folder + '/favicon.ico')
copyfile('ui.js', params.folder + '/ui.js')
for categorie in categories:
path = params.folder + '/cats/' + categorie['path']
if os.path.isdir(path) == False: os.mkdir(path)
myPostsFiltred = []
for post in myPosts:
if categorie in post['cats']: myPostsFiltred.append(post)
array = [myPostsFiltred[i:i+params.blogNbPostsByPage] for i in range(0, len(myPostsFiltred), params.blogNbPostsByPage)]
i, nbLots = 1, len(array)
for lot in array:
result = tmps.TMP_posts(params, helpers, tmps, categorie['name'], header, footer, lot, (i, nbLots), '../../')
if i == 1:
with open(path + 'index.html', 'w') as r: r.write(result)
with open(path + 'page1.html', 'w') as r: r.write(result)
else:
with open(path + 'page' + str(i) + '.html', 'w') as r: r.write(result)
i += 1
array = [myPosts[i:i+params.blogNbPostsByPage] for i in range(0, len(myPosts), params.blogNbPostsByPage)]
i = 1
nbLots = len(array)
for lot in array:
if i == 1:
result = tmps.TMP_posts(params, helpers, tmps, '', header, footer, lot, (i, nbLots))
with open(params.folder + '/index.html', 'w') as r: r.write(result)
with open(params.folder + '/page1.html', 'w') as r: r.write(result)
else:
with open(params.folder + '/page' + str(i) + '.html', 'w') as r:
r.write(tmps.TMP_posts(params, helpers, tmps, '', header, footer, lot, (i, nbLots)))
i += 1
header = tmps.TMP_header(params, helpers, tmps, categories, myPages, 1) #1 pour un post ou une page
for post in myPosts:
with open(params.folder + '/posts/' + helpers.niceURL(post['title'], '.html'), 'w') as r:
r.write(tmps.TMP_post(params, helpers, tmps, header, post, footer))
for page in myPages:
with open(params.folder + '/' + helpers.niceURL(page['title'], '.html'), 'w') as r:
r.write(tmps.TMP_page(params, helpers, tmps, header, page, footer))
build(params, helpers, tmps)
build(params, helpers, tmps, True)
exprs = [
'Nom d\'une corne !',
'Bien joué !', 'Bravo !', 'Hihihi !', 'Félicitations !',
'La corne du narval est une dent !',
'Les femelles narval, sauf exceptions, n\'ont pas de corne.',
'Une corne de narval peut mesurer 3 mètres !',
'Une corne de narval peut peser jusqu\'à 10 kg !',
'Le narval vivrait en moyenne une cinquantaine d\'années.',
'Le narval est un cétacé à dents.',
'Outre l\'humain, le narval a 2 prédateurs : l\'orque et l\'ours polaire.',
'Le narval raffole des flétans, des raies et des morues.',
'Le narval peut descendre à 1500 mètres de profondeur.',
'Le narval peut rester en apnée près d\'une demi heure.'
]
print('\033[92m>>> ' + random.choice(exprs) + '\033[0m')
resp = input('Le blog est consultable hors ligne dans "' + params.folder + '".\nVoir dans un navigateur ? (O/n)').lower()
if resp != 'n':
webbrowser.open(params.folder + '/index.html', new=2)
| 39.518919
| 148
| 0.646286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,052
| 0.279984
|
493212d8687f50b52ca98a00b02e9f83e3d17403
| 247
|
py
|
Python
|
examples/simple/regression/sample_skipped.py
|
jonwesneski/end2
|
708c7b96c1086959565e2889a0818451e6e2c931
|
[
"MIT"
] | null | null | null |
examples/simple/regression/sample_skipped.py
|
jonwesneski/end2
|
708c7b96c1086959565e2889a0818451e6e2c931
|
[
"MIT"
] | 1
|
2022-03-12T19:43:00.000Z
|
2022-03-12T19:43:00.000Z
|
examples/simple/regression/sample_skipped.py
|
jonwesneski/end2
|
708c7b96c1086959565e2889a0818451e6e2c931
|
[
"MIT"
] | null | null | null |
from src import (
RunMode,
setup
)
__run_mode__ = RunMode.PARALLEL
@setup
def my_setup(logger):
assert False, "FAILING SETUP ON PURPOSE"
def test_skipped(logger):
assert False, "THIS TEST SHOULD NOT RUN BECAUSE SETUP FAILED"
| 14.529412
| 65
| 0.712551
| 0
| 0
| 0
| 0
| 73
| 0.295547
| 0
| 0
| 73
| 0.295547
|
493289a9cede0d970c6e0473eaec236052421c6b
| 1,276
|
py
|
Python
|
structure/DoublyLinkedList.py
|
Jaidev810/Data-Structures-package
|
f651615275817f182662892b2b57b200310d3dba
|
[
"MIT"
] | 2
|
2021-02-27T06:13:11.000Z
|
2021-02-27T06:15:03.000Z
|
structure/DoublyLinkedList.py
|
Jaidev810/Data-Structures-package
|
f651615275817f182662892b2b57b200310d3dba
|
[
"MIT"
] | null | null | null |
structure/DoublyLinkedList.py
|
Jaidev810/Data-Structures-package
|
f651615275817f182662892b2b57b200310d3dba
|
[
"MIT"
] | null | null | null |
class Node:
def __init__(self, val: int):
self.val = val
self.prev = None
self.next = None
class DoublyLinkedList:
def takeinput(self) -> Node:
inputlist = [int(x) for x in input().split()]
head = None
temp = None
for curr in inputlist:
if curr == -1:
break
Newnode = Node(curr)
if head is None:
head = Newnode
temp = head
else:
temp.next = Newnode
Newnode.prev = temp
temp = temp.next
return head
def printLL(self, head: Node) -> None:
temp = head
while temp is not None:
print(temp.val, end='->')
temp = temp.next
print("None")
def getLength(self, head: Node) -> int:
count = 0
temp = head
while temp is not None:
count += 1
temp = temp.next
return temp
def getMiddle(self, head: Node) -> int:
slow = head
fast = head
while fast and fast.next is not None:
slow = slow.next
fast = fast.next.next
return slow.val
def reverseLL(self, head: Node) -> Node:
pass
| 23.62963
| 53
| 0.471787
| 1,273
| 0.997649
| 0
| 0
| 0
| 0
| 0
| 0
| 10
| 0.007837
|
49328cd4da5fd3b0c69d9699b45e853d2628cbd7
| 9,297
|
py
|
Python
|
reference_parsing/scripts/reference_script.py
|
ScholarIndex/LinkedBooks
|
0cae008427ed1eb34a882e9d85f24b42b3ee3a28
|
[
"MIT"
] | null | null | null |
reference_parsing/scripts/reference_script.py
|
ScholarIndex/LinkedBooks
|
0cae008427ed1eb34a882e9d85f24b42b3ee3a28
|
[
"MIT"
] | 6
|
2020-03-20T18:10:01.000Z
|
2021-09-29T17:31:17.000Z
|
reference_parsing/scripts/reference_script.py
|
ScholarIndex/LinkedBooks
|
0cae008427ed1eb34a882e9d85f24b42b3ee3a28
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
USED una tantum to refactor the journal_references collection.
Note that the old collection references (monograph reference lists) is discarded: monographs are going to ba parsed again.
this script:
1- copies the journal_references collection to another collection: sand, test and production databases
2- uniforms the data model in so doing
3- updated Processing
4- validates everything using the mongoengine
"""
__author__ = """Giovanni Colavizza"""
from collections import OrderedDict
import logging
logging.basicConfig(filename="logs/xml_parser.log", level=logging.INFO)
logger = logging.getLogger(__name__)
from configparser import ConfigParser
from datetime import datetime
# Mongo
from pymongo import MongoClient, TEXT, ASCENDING
from mongoengine import connect as engineconnect
# Test models
from commons.dbmodels import *
# Establish Mongo connections
config = ConfigParser(allow_no_value=False)
config.read("config.conf")
logger.info('Read configuration file.')
# SANDBOX the playground
db = "mongo_sand"
mongo_user = config.get(db, 'username')
mongo_pwd = config.get(db, 'password')
mongo_auth = config.get(db, 'auth-db')
mongo_host = config.get(db, 'db-host')
mongo_port = config.get(db, 'db-port')
con = MongoClient(mongo_host,port=int(mongo_port), **{"socketKeepAlive":True})
con.linkedbooks_sandbox.authenticate(mongo_user, mongo_pwd, source=mongo_auth)
db_sand = con.linkedbooks_sandbox
# SOURCE the collection where journal_references is
db = "mongo_source"
mongo_user = config.get(db, 'username')
mongo_pwd = config.get(db, 'password')
mongo_auth = config.get(db, 'auth-db')
mongo_host = config.get(db, 'db-host')
mongo_port = config.get(db, 'db-port')
con = MongoClient(mongo_host,port=int(mongo_port), **{"socketKeepAlive":True})
con.linkedbooks_dev.authenticate(mongo_user, mongo_pwd, source=mongo_auth)
db_source = con.linkedbooks_dev
# DEV the development DB
db = "mongo_dev"
mongo_user = config.get(db, 'username')
mongo_pwd = config.get(db, 'password')
mongo_auth = config.get(db, 'auth-db')
mongo_host = config.get(db, 'db-host')
mongo_port = config.get(db, 'db-port')
con = MongoClient(mongo_host,port=int(mongo_port), **{"socketKeepAlive":True})
con.linkedbooks_refactored.authenticate(mongo_user, mongo_pwd, source=mongo_auth)
db_dev = con.linkedbooks_refactored
# PROD the production DB, only connect if explicitly called
db = "mongo_prod"
mongo_user = config.get(db, 'username')
mongo_pwd = config.get(db, 'password')
mongo_auth = config.get(db, 'auth-db')
mongo_host = config.get(db, 'db-host')
mongo_port = config.get(db, 'db-port')
con = MongoClient(mongo_host,port=int(mongo_port), connect=False, **{"socketKeepAlive":True})
con.linkedbooks_refactored.authenticate(mongo_user, mongo_pwd, source=mongo_auth)
db_prod = con.linkedbooks_refactored
logger.info('Loaded Mongo dbs configs.')
def transfer_collection(destination_db,db):
"""
Transfer the journal_references collection to other databases, after refactoring
:param destination_db: Mongo connector to the right destination database
:param db: config.conf name of the destination database
:return: Nothing.
"""
# IMPORT journal_references collection from SOURCE to new database
references = list()
pages_dict = dict()
# index of items from metadata which are valid
valid_documents = list()
for m in destination_db.metadata.find():
if m["marked_as_removed"]:
continue
if m["type_document"] == "monograph":
continue # we only have journals here
else:
for d in m["issues"]:
if d["marked_as_removed"]:
continue
else:
valid_documents.append((m["bid"], d["foldername"]))
for reference in db_source.journal_references.find(no_cursor_timeout=True):
contents = OrderedDict(sorted(reference["contents"].items(),key=lambda x:int(x[0])))
pages = set([x["page_id"] for x in contents.values()])
for p in pages:
if p not in pages_dict.keys():
try:
items = p.split("-")
bid = items[0]
image = items[-1]
issue = "-".join(items[1:-2])
image = int(image)
except:
print(p)
continue
if (bid,issue) in valid_documents:
document = destination_db.documents.find_one({"bid":bid,"number":issue})
else:
split_issue = issue.split("_")
issue = "_".join(split_issue[:-1])
issue = issue + "." + split_issue[-1]
if (bid, issue) in valid_documents:
document = destination_db.documents.find_one({"bid": bid, "number": issue})
else:
logger.info("MISSING DOCUMENT: %s, %s, %s" % (bid, issue, p))
continue
logger.info("Found a mark as removed: %s, %s" % (bid, issue))
#logger.warning("MISSING DOCUMENT: %s, %s, %s"%(bid,issue,p))
#continue
try:
page = destination_db.pages.find_one({"single_page_file_number":image,"_id":{"$in":document["pages"]}})
except:
logger.warning("MISSING PAGE: %s, %s, %s" % (bid, issue, p))
continue
pages_dict[p] = {"id":page["_id"],"issue":issue}
issue = reference["issue"]
for c in contents.values():
try:
c["page_mongo_id"] = pages_dict[c["page_id"]]["id"]
issue = pages_dict[c["page_id"]]["issue"]
except:
logger.warning("MISSING PAGE IN DICT: %s" % c["page_id"])
c["page_mongo_id"] = ""
r = {"ref_type":reference["ref_type"],
"reference_string":" ".join([x["surface"] for x in contents.values()]),
"in_golden":reference["in_golden"],
"order_in_page":reference["order_in_page"],
"continuation_candidate_in":reference["continuation_candidate_in"],
"continuation_candidate_out":reference["continuation_candidate_out"],
"continuation":reference["continuation"],
"bid":reference["bid"],
"issue":issue,
"contents":contents,
"updated_at":datetime.now()
}
references.append(r)
destination_db.drop_collection("references")
destination_db.references.insert_many(references)
destination_db.references.create_index([('reference_string', TEXT),('bid', TEXT),('issue', TEXT)], default_language='none')
destination_db.references.create_index([('contents.1.single_page_file_number',ASCENDING)],unique=False)
logger.info('Created journal_references collection into database %s'%db)
def updates_checks(destination_db,db):
"""
Checkes the new references collection is properly done, updates the Processing collection.
Note that this assumes the references collection contains objects that have been fully parsed (reason why we do not consider monograph reference lists for now: they have not!)
:param destination_db: Mongo connector to the right destination database
:param db: config.conf name of the destination database
:return: Nothing.
"""
issues_dict = list()
# update processing collection
# get all bids and issues just dumped
for r in destination_db.references.find():
issues_dict.append((r["bid"],r["issue"]))
mongo_db = config.get(db, 'db-name')
mongo_user = config.get(db, 'username')
mongo_pwd = config.get(db, 'password')
mongo_auth = config.get(db, 'auth-db')
mongo_host = config.get(db, 'db-host')
mongo_port = config.get(db, 'db-port')
logger.debug(engineconnect(mongo_db
, username=mongo_user
, password=mongo_pwd
, authentication_source=mongo_auth
, host=mongo_host
, port=int(mongo_port)))
for bid,issue in list(set(issues_dict)):
try:
if not issue or len(issue) == 0:
processing_info = Processing.objects(type_document="monograph", bid=bid).get()
else:
processing_info = Processing.objects(type_document="issue", number=issue, bid=bid).get()
if not processing_info.is_parsed:
processing_info.is_parsed = True
processing_info.updated_at = datetime.now()
processing_info.save()
except:
logger.warning("Missing item in Processing: %s, %s"%(bid,issue))
continue
logger.info('Updated Processing collection into database %s'%db)
# AT THE END, TEST COLLECTION
objects = Reference.objects
logger.info("The database contains %d Reference objects"%len(objects))
transfer_collection(db_sand,"mongo_sand")
updates_checks(db_sand,"mongo_sand")
#transfer_collection(db_dev,"mongo_dev")
#updates_checks(db_dev,"mongo_dev")
#transfer_collection(db_prod,"mongo_prod")
#updates_checks(db_prod,"mongo_prod")
| 43.443925
| 179
| 0.64182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,281
| 0.35291
|
493417e0fff28d76a0a6a6c06dec19aa851fdaf9
| 20,534
|
py
|
Python
|
simpysql/Eloquent/SqlServerBuilder.py
|
wjtxlliubin/simpysql
|
c135ce42d0bda8b11632f4003bb60995d24a7392
|
[
"MIT"
] | 29
|
2019-05-22T08:08:34.000Z
|
2021-11-16T08:15:10.000Z
|
simpysql/Eloquent/SqlServerBuilder.py
|
wjtxlliubin/simpysql
|
c135ce42d0bda8b11632f4003bb60995d24a7392
|
[
"MIT"
] | 4
|
2019-05-20T08:34:07.000Z
|
2019-09-11T11:26:57.000Z
|
simpysql/Eloquent/SqlServerBuilder.py
|
wjtxlliubin/simpysql
|
c135ce42d0bda8b11632f4003bb60995d24a7392
|
[
"MIT"
] | 5
|
2019-05-20T09:15:49.000Z
|
2021-09-04T19:08:59.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections
from simpysql.Util.Expression import expression as expr, Expression
from simpysql.Util.Response import Response
from .BaseBuilder import BaseBuilder
from simpysql.Util.Dynamic import Dynamic
class SqlServerBuilder(BaseBuilder):
operators = [
'=', '<', '>', '<=', '>=', '<>', '!=',
'like', 'like binary', 'not like', 'between', 'ilike',
'&', '|', '^', '<<', '>>',
'rlike', 'regexp', 'not regexp',
'~', '~*', '!~', '!~*', 'similar to',
'not similar to', 'not ilike', '~~*', '!~~*', 'in', 'not in', 'not between'
]
def __init__(self, model, alias=None):
self.__model__ = model
self.__alias__ = alias
self.__where__ = []
self.__orwhere__ = [] # orwhere处理逻辑
self.__whereor__ = [] # orwhere处理逻辑
self.__select__ = [] # 检索的字段
self.__limit__ = 0 # 检索的数据条数
self.__orderby__ = [] # 排序字段
self.__groupby__ = [] # 排序字段
self.__offset__ = None # offset
self.__lock__ = None # lock
self.__join__ = [] # leftjoin
self.__union__ = [] # union & unionall
self.__on__ = [] # leftjoin
self.__having__ = None # having
self.__subquery__ = [] # subquery
def first(self):
self.__limit__ = 1
data = self.get()
if data:
return data.pop()
return data
def one(self):
data = self.get()
if data:
return data.pop()
return data
def get(self):
return [Dynamic(index) for index in self._get_connection().execute(self._compile_select())]
def lists(self, columns):
return Response(self._get_connection().execute(self._compile_select())).tolist(columns)
def data(self):
return Response(self._get_connection().execute(self._compile_select())).data()
def response(self):
return Response(self._get_connection().execute(self._compile_select()))
def max(self, column):
if isinstance(column, str) and column in self.__model__.columns:
self.__select__ = ['max({}) as aggregate'.format(column)]
data = self.one()
return data['aggregate'] if data else None
raise Exception('param invalid in function max')
def min(self, column):
if isinstance(column, str) and column in self.__model__.columns:
self.__select__ = ['min({}) as aggregate'.format(column)]
data = self.one()
return data['aggregate'] if data else None
raise Exception('param invalid in function min')
def avg(self, column):
if isinstance(column, str) and column in self.__model__.columns:
self.__select__ = ['avg({}) as aggregate'.format(column)]
data = self.one()
return data['aggregate'] if data else None
raise Exception('param invalid in function avg')
def sum(self, column):
if isinstance(column, str) and column in self.__model__.columns:
self.__select__ = ['sum({}) as aggregate'.format(column)]
data = self.one()
return data['aggregate'] if data else None
raise Exception('param invalid in function sum')
def count(self):
self.__select__ = ['count(*) as aggregate']
data = self.one()
return data['aggregate'] if data else None
def exist(self):
return True if self.count() > 0 else False
def update(self, data):
if data and isinstance(data, dict):
data = self._set_update_time(data)
return self._get_connection().execute(self._compile_update(data))
def increment(self, key, amount=1):
if isinstance(amount, int) and amount > 0:
data = collections.defaultdict(dict)
data[key] = '{}+{}'.format(expr.format_column(key), str(amount))
data = self._set_update_time(data)
return self._get_connection().execute(self._compile_increment(data))
def decrement(self, key, amount=1):
if isinstance(amount, int) and amount > 0:
data = collections.defaultdict(dict)
data[key] = '{}-{}'.format(expr.format_column(key), str(amount))
data = self._set_update_time(data)
return self._get_connection().execute(self._compile_increment(data))
def create(self, data):
if data:
if data and isinstance(data, dict):
data = [data]
data = self._set_create_time(data)
self._get_connection().execute(self._compile_create(data))
return self
def insert(self, columns, data):
self._get_connection().execute(self._compile_insert(columns, data))
return self
def lastid(self):
data = self._get_connection().execute(self._compile_lastid())
return data[0][0] if data and data[0] and data[0][0] else None
def delete(self):
return self._get_connection().execute(self._compile_delete())
def take(self, number):
if number <= 0:
raise Exception('take number invalid')
self.__limit__ = int(number)
return self
def select(self, *args):
self.__select__ = self._format_columns(list(args))
return self
def groupby(self, *args):
self.__groupby__ = self._format_columns(list(args))
return self
def offset(self, number):
if number <= 0:
raise Exception('offset number invalid')
self.__offset__ = int(number)
return self
def tosql(self):
return self._compile_select()
def where(self, *args):
length = args.__len__()
if length == 1 and isinstance(args[0], dict):
self.__where__.append(args[0])
elif length == 2:
self.__where__.append({args[0]: self._check_columns_value(args[1])})
elif length == 3:
if args[1] in self.operators:
if args[1] == '=':
self.__where__.append({args[0]: self._check_columns_value(args[2])})
else:
self.__where__.append((args[0], args[1], self._check_columns_value(args[2])))
else:
raise Exception('operator key world not found: "{}"'.format(args[1]))
else:
raise Exception('bad parameters in where function')
return self
def orwhere(self, *args):
length = args.__len__()
if length == 1 and isinstance(args[0], dict):
self.__orwhere__.append(args[0])
elif length == 1 and isinstance(args[0], list):
self.__orwhere__.append(args[0])
elif length == 2:
self.__orwhere__.append({args[0]: args[1]})
elif length == 3:
if args[1] in self.operators:
if args[1] == '=':
self.__orwhere__.append({args[0]: args[2]})
else:
self.__orwhere__.append((args[0], args[1], args[2]))
else:
raise Exception('operator key world not found: "{}"'.format(args[1]))
else:
raise Exception('bad parameters in where function')
return self
def whereor(self, *args):
length = args.__len__()
if length == 1 and isinstance(args[0], list):
self.__whereor__.append(args[0])
else:
raise Exception('bad parameters in where function')
return self
def orderby(self, column, direction='asc'):
if direction.lower() == 'asc':
self.__orderby__.append(expr.format_column(column))
else:
self.__orderby__.append(expr.format_column(column) + ' desc')
return self
def execute(self, sql):
return self._get_connection().execute(sql)
def having(self, *args):
length = args.__len__()
if length == 2:
self.__having__ = ' having {} {} {}'.format(args[0], '=', expr.format_string(args[1]))
elif length == 3:
self.__having__ = ' having {} {} {}'.format(args[0], args[1], expr.format_string(args[2]))
else:
raise Exception('invalid parameter in having function')
return self
def lock_for_update(self):
self.__lock__ = ' for update'
return self
def lock_for_share(self):
self.__lock__ = ' lock in share mode'
return self
def leftjoin(self, model):
if not (isinstance(model, BaseBuilder)):
raise TypeError('invalid parameter type in leftjoin')
self.__join__.append(('left join', model))
return self
def rightjoin(self, model):
if not (isinstance(model, BaseBuilder)):
raise TypeError('invalid parameter type in rightjoin')
self.__join__.append(('right join', model))
return self
def join(self, model):
return self.innerjoin(model)
def innerjoin(self, model):
if not (isinstance(model, BaseBuilder)):
raise TypeError('invalid parameter type in innerjoin')
self.__join__.append(('inner join', model))
return self
def union(self, model):
if not (isinstance(model, BaseBuilder)):
raise TypeError('invalid parameter type in union')
self.__union__.append(('union', model))
return self
def unionall(self, model):
if not (isinstance(model, BaseBuilder)):
raise TypeError('invalid parameter type in unionall')
self.__union__.append(('union all', model))
return self
def on(self, *args):
length = args.__len__()
if length == 2:
self.__on__.append((args[0], '=', args[1]))
elif length == 3:
self.__on__.append((args[0], args[1], args[2]))
else:
raise Exception('invalid parameter in on function')
return self
def subquery(self, model, alias='tmp'):
self.__subquery__.append((alias, model))
return self
def _compile_select(self):
if len(self.__select__) == 0:
self.__select__.append('*')
subsql = ''.join(
[self._compile_where(), self._compile_whereor(), self._compile_orwhere(), self._compile_groupby(), self._compile_orderby(),
self._compile_having(), self._compile_offset(), self._compile_lock()])
joinsql = ''.join(self._compile_leftjoin())
returnsql = "select {}{} from {}{}{}".format(self._compile_limit(), ','.join(self.__select__), self._tablename(), joinsql, subsql)
if self.__union__:
return '{}'.format(returnsql) + self._compile_union()
return returnsql
def _compile_create(self, data):
return "insert into {} {} values {}".format(self._tablename(), self._columnize(data[0]), self._valueize(data))
def _compile_insert(self, columns, data):
return "insert into {} {} values {}".format(self._tablename(), self._columnize(columns), ','.join([tuple(index).__str__() for index in data]))
def _compile_update(self, data):
return "update {} set {}{}".format(self._tablename(), ','.join(self._compile_dict(data)), self._compile_where())
def _compile_increment(self, data):
subsql = ','.join(['{}={}'.format(expr.format_column(index), value) for index, value in data.items()])
return "update {} set {}{}".format(self._tablename(), subsql, self._compile_where())
def _compile_delete(self):
return 'delete from {}{}'.format(self._tablename(), self._compile_where())
def _compile_lastid(self):
return 'select last_insert_id() as lastid'
def _columnize(self, columns):
return tuple(columns).__str__().replace('\'', '`')
def _valueize(self, data):
return ','.join([tuple(index.values()).__str__() for index in data])
def _compile_groupby(self):
return '' if len(self.__groupby__) == 0 else ' group by ' + ','.join(self.__groupby__)
def _compile_orderby(self):
return '' if len(self.__orderby__) == 0 else ' order by ' + ','.join(self.__orderby__)
def _compile_limit(self):
return '' if self.__limit__ == 0 else 'top ({}) '.format(self.__limit__)
def _compile_offset(self):
if self.__offset__:
if self.__orderby__:
return '' if self.__offset__ is None else ' offset {} rows fetch next {} rows only'.format(self.__offset__, self.__limit__)
raise Exception('orderby function not set exception')
return ''
def _compile_lock(self):
return '' if self.__lock__ is None else self.__lock__
def _compile_leftjoin(self):
if self.__join__:
return ' ' + ' '.join(['{} {} on {}'.format(index, value._tablename(), value._compile_on()) for (index, value) in
self.__join__])
return ''
def _compile_union(self):
if self.__union__:
return ' ' + ' '.join(['{} ({})'.format(index, value.tosql()) for (index, value) in self.__union__])
return ''
def _compile_on(self):
sqlstr = ['{} {} {}'.format(index[0], index[1], index[2]) for index in self.__on__]
return ' and '.join(sqlstr)
def _compile_having(self):
if self.__having__:
return self.__having__
return ''
def _compile_where(self):
if len(self.__where__) > 0:
sqlstr = []
for index in self.__where__:
if isinstance(index, dict):
sqlstr.append(' and '.join(self._compile_dict(index)))
elif isinstance(index, tuple):
sqlstr.append(self._compile_tuple(index))
return ' where {}'.format(' and '.join(sqlstr))
return ''
def _compile_orwhere(self):
if len(self.__orwhere__) > 0:
sqlstr = []
for index in self.__orwhere__:
if isinstance(index, dict):
subsql = self._compile_dict(index)
if len(subsql) == 1:
sqlstr.append(subsql.pop())
else:
sqlstr.append('({})'.format(' and '.join(subsql)))
elif isinstance(index, tuple):
sqlstr.append(self._compile_tuple(index))
elif isinstance(index, list):
subsql = []
for items in index:
if len(items) == 2:
subsql.append(self._compile_keyvalue(items[0], items[1]))
if len(items) == 3:
subsql.append(self._compile_tuple((items[0], items[1], items[2])))
sqlstr.append('({})'.format(' and '.join(subsql)))
else:
raise Exception('undefined query condition {}'.format(index.__str__()))
if len(self.__where__) > 0:
return ' or {}'.format(' or '.join(sqlstr))
return ' where {}'.format(' or '.join(sqlstr))
return ''
def _compile_whereor(self):
if len(self.__whereor__) > 0:
sqlstr = []
for index in self.__whereor__:
subsql = []
for item in index:
if isinstance(item, dict):
if len(item) == 1:
subsql.append(self._compile_dict(item).pop())
else:
subsql.append('(' + ' and '.join(self._compile_dict(item)) + ')')
elif isinstance(item, list):
if isinstance(item[0], str):
subsql.append(self._compile_tuple(tuple(item)))
else:
subsql.append(self._compile_lists(item))
elif isinstance(item, tuple):
subsql.append(self._compile_tuple(item))
else:
raise Exception('whereor param invalid')
sqlstr.append(' or '.join(subsql))
if len(self.__where__) > 0:
return ' and ({})'.format(' or '.join(sqlstr))
return ' where ({})'.format(' or '.join(sqlstr))
return ''
def _compile_dict(self, data):
return ['{}={}'.format(expr.format_column(index), expr.format_string(value)) for index, value in data.items()]
def _compile_tuple(self, data):
if data[1] in ['in', 'not in']:
return self._compile_in((data[0], data[1], data[2]))
elif data[1] in ['between', 'not between']:
return self._compile_between((data[0], data[1], data[2]))
return '{} {} {}'.format(expr.format_column(data[0]), data[1], expr.format_string(data[2]))
def _compile_in(self, data):
return '{} {} {}'.format(expr.format_column(data[0]), data[1], expr.list_to_str(data[2]))
def _compile_list(self, data):
length = len(data)
if length == 2:
return self._compile_keyvalue(data[0], data[1])
if length == 3:
return self._compile_tuple((data[0], data[1], data[2]))
def _compile_lists(self, data):
return_data = []
for index in data:
if isinstance(index, list):
return_data.append(self._compile_list(index))
if isinstance(index, tuple):
return_data.append(self._compile_tuple(index))
return '(' + ' and '.join(return_data) + ')'
def _compile_between(self, data):
if not (len(data) == 3 and len(data[2]) == 2):
raise Exception('between param invalid')
return '{} {} {} and {}'.format(expr.format_column(data[0]), data[1], expr.format_string(data[2][0]),
expr.format_string(data[2][1]))
def _compile_keyvalue(self, key, value):
return '{}={}'.format(expr.format_column(key), expr.format_string(value))
def _compile_subquery(self):
subquery = []
for index, value in self.__subquery__:
if isinstance(value, str):
subquery.append('{} as {}'.format(value, index))
else:
subquery.append('({}) as {}'.format(value.tosql(), index))
return ','.join(subquery)
def _get_connection(self):
return self.connect(self.__model__)
def _check_columns_value(self, value):
if self.__subquery__ and len(self.__subquery__) >= 2 and isinstance(value, str):
tmp = value.split('.')
if len(tmp) == 2 and tmp[0] in self._get_subquery_alias():
return Expression(value)
return value
def _get_subquery_alias(self):
return [index for index, value in self.__subquery__]
def database(self, name):
self.__model__.__database__ = name
return self
def _tablename(self):
if self.__subquery__:
return self._compile_subquery()
if self.__alias__ is None:
return self.__model__.__tablename__
return self.__model__.__tablename__ + ' as {}'.format(self.__alias__)
def _format_columns(self, columns):
return list(map(lambda index: expr.format_column(index), columns))
def _set_create_time(self, data):
currtime = self.__model__.fresh_timestamp()
update_column = self.__model__.update_time_column()
create_column = self.__model__.create_time_column()
for index in data:
if create_column and create_column not in index:
index[create_column] = currtime
if update_column and update_column not in index:
index[update_column] = currtime
return data
def _set_update_time(self, data):
currtime = self.__model__.fresh_timestamp()
update_column = self.__model__.update_time_column()
if update_column and update_column not in data:
data[update_column] = currtime
return data
def transaction(self, callback):
return self._get_connection().transaction(callback)
def transaction_wrapper(self, callback):
return self._get_connection().transaction_wrapper(callback)
| 39.564547
| 150
| 0.564868
| 20,329
| 0.987324
| 0
| 0
| 0
| 0
| 0
| 0
| 2,116
| 0.102768
|
49352eca9c4b127887b5b697dac9363a8f43de19
| 18,955
|
py
|
Python
|
openBMC/terminal_cmd.py
|
kevinkellyspacey/openBMC-rpi
|
468f3ec39a29e7d89e0601ba6d51279cd4617b93
|
[
"MIT"
] | null | null | null |
openBMC/terminal_cmd.py
|
kevinkellyspacey/openBMC-rpi
|
468f3ec39a29e7d89e0601ba6d51279cd4617b93
|
[
"MIT"
] | null | null | null |
openBMC/terminal_cmd.py
|
kevinkellyspacey/openBMC-rpi
|
468f3ec39a29e7d89e0601ba6d51279cd4617b93
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from openBMC.smbpbi import smbpbi_read
import smbus
from openBMC.fan_control import set_dbus_data,get_dbus_data,pwm_reqest_set,TMP451_7bit_ADDR,GPU0_7bit_ADDR,GPU1_7bit_ADDR
import dbus
import argparse
import socket
import sys
import os
def show(string,serial):
if serial:
serial.write(string)
else:
print(string)
class CMDManager(object):
def __init__(self, *args, **kwargs):
# cmd sample:cmd_name:[cmd_function,max_args_length,description,ret]
self.cmd_list ={"help":[self.help_command,
1,
""" Try `help [command]`,\n
Display helpful information about builtin commands.\n
If PATTERN is specified, gives detailed help on all\n
commands matching PATTERN, otherwise a list of the\n
builtins is printed. The -s option restricts the output\n
for each builtin command matching PATTERN to a short usage synopsis""",
False],
"version":[self.version_command,
1,
""" none\n
will show the git hash associated with the firmware version\n
print the version of firmware""",
True],
"dmesg":[self.dmesg_command,
1,
""" none\n
print the system log to shell\n
just type dmesg""",
False],
"smbpbi":[self.smbpbi_command,
3,
""" <dev_addr> <data_in(32bits)> <command(32bit)>\n
SMBPBI read from a specific device\n""",
True],
"power":[self.smbpbi_sub_command,
1,
""" <gpu index>\n
read GPU power for gpu [0-1]\n
example arguments: \n
power 0\n
read specified GPU power""",
True],
"temp":[self.i2c_sub_command,
1,
""" <index>\n
read temperature for gpu/LR10 [0-2]\n
example arguments: 0 for GPU0, 1 for GPU1, 2 for LR10\n
temp 0\n
read specified GPU temperature""",
False],
"get_pwm":[self.dbus_command,
1,
""" <pwm index[0-3]>\n
get current PWM[0-3] duty cycle\n
0:GPU1, 1:GPU2, 2:LR, 3:ALL\n
example arguments: \n
pwm 1\n
get current PWM duty cycle""",
False],
"pwm_restore":[self.dbus_command,
1,
""" <pwm index[0-3]>\n
resotre PWM[0-3] for control based on temperature\n
0:GPU1, 1:GPU2, 2:LR, 3:ALL\n
example arguments: \n
pwm_restore 1\n
restore PWM duty cycle for control based on temperature""",
False],
"pwm":[self.dbus_command,
2,
""" <pwm index[0-3]> <duty cycle percentage[0-100]>\n
specify PWM[0-3] for fans [0-100]\n
0:GPU1, 1:GPU2, 2:LR, 3:ALL\n
example: \n
pwm 0 50\n
set GPU1 to 50% duty cycle
set PWM duty cycle""",
False],
"hsc":[self.i2c_sub_command,
2,
"""hsc number([0-3])> <info type(power,temp,alert)>\n
ead HS power, temperature, alert state\n
example arguments: \n
hsc 1 power\n
hsc 1 temp\n
hsc 1 alert\n
read specified HSC power , temp ,alert""",
False],
"i2c_block_write":[self.i2c_command,
100,
"""<dev_addr> <reg_addr> <byte_count> <reg_val[0]> <reg_val[1]> .... \n
write block data to a specified smbus device's register""",
False],
"i2c_block_read":[self.i2c_command,
3,
"""<dev_addr> <reg_addr> <byte_count>\n
read the block data from a specified device's register""",
True],
"i2c_word_write":[self.i2c_command,
3,
"""<dev_addr> <dev_reg_addr> <reg_val>\n
write word data to a specified smbus device's register""",
False],
"i2c_word_read":[self.i2c_command,
2,
"""<dev_addr> <dev_reg_addr>\n
read the word data from a specified device's register""",
True],
"i2c_byte_write":[self.i2c_command,
3,
"""<dev_addr> <dev_reg_addr> <reg_val>\n
write one byte data to a specified smbus device's register""",
False],
"i2c_byte_read":[self.i2c_command,
2,
"""<dev_addr> <reg_addr>\n
read the byte data from a specified device's register""",
True],
"i2c_dump":[self.i2c_command,
1,
"""<i2c_dev_addr>\n
dump the data from a specified smbus address""",
False],
"i2c_probe":[self.i2c_command,
0,
"""No agrment need\n
probe to find physical addresses that ack""",
False],
"ip":[self.ip_command,
0,
"""No agrment need\n
show the ip address of current openBMC RPI module""",
True],
}
def add_cmd(self,cmd_name,*args):
if not self.search_cmd(cmd_name):
self.cmd_list[cmd_name] = []
for para in args:
self.cmd_list[cmd_name].append(para)
def remove_cmd(self,cmd_name):
if self.search_cmd(cmd_name):
del self.cmd_list[cmd_name]
def update_cmd(self,cmd_name,*args):
if self.search_cmd(cmd_name):
for i,para in enumerate(args):
self.cmd_list[cmd_name][i] = para
return True
else:
return False
def search_cmd(self,cmd_name):
if cmd_name in self.cmd_list.keys():
return True
else:
return False
def apply_cmd(self,cmd_name,serial=None,*args):
if self.search_cmd(cmd_name):
#print(len(args),args)
if len(args) <= self.cmd_list[cmd_name][1]:
f = self.cmd_list[cmd_name][0]
try:
if self.cmd_list[cmd_name][3]:
ret = f(cmd_name,serial,*args)
show(str(ret),serial)
else:
f(cmd_name,serial,*args)
except Exception as err:
logging.error(err)
else:
show("the parametes of the cmd {} is not valid\n".format(cmd_name),serial)
else:
show("this {} is not in the commad list\n".format(cmd_name),serial)
def help_command(self,name,serial=None,cmd_name=None):
def print_command_list(serial=None):
cmd_list = ""
for key in self.cmd_list:
cmd_list += "{0}:{1}\n".format(key,self.cmd_list[key][2])
show(cmd_list,serial)
if not cmd_name:
print_command_list(serial)
return
if self.search_cmd(cmd_name):
show("{0}:{1}\n".format(cmd_name,self.cmd_list[cmd_name][2]),serial)
else:
show("commad list doesn't have the cmd [{}]\n".format(cmd_name),serial)
def version_command(self,name,serial=None):
return 1.0
def dmesg_command(self,name,serial=None):
os.system("dmesg")
def smbpbi_command(self,name,serial=None,*args):
if len(args) <3:
show("Need SMBPBI slave address, data_in(32 bits) ,command(32 bits) \n",serial)
return -1
address = int(args[0],0)
data_in = int(args[1],0)
command_in = int(args[2],0)
dbus_iface = dbus.Interface(dbus.SystemBus().get_object('com.openBMC.RPI','/RPI'),'com.openBMC.RPI')
pre_user_status = []
for i in range(3):
pre_user_status.append(get_dbus_data(i,"user",dbus_iface))
set_dbus_data(i,"user",1,dbus_iface)
val,status = smbpbi_read(address,command_in,None,data_in)
for i in range(3):
set_dbus_data(i,"user",pre_user_status.pop(0),dbus_iface)
if status == 0x1f:
return val
elif status == 0x08:
show("Requested parameter is not supported on given configuration.\n",serial)
return -1
def smbpbi_sub_command(self,name,serial=None,*args):
if name == "power":
if len(args) <1:
show("Error: invalid argument\n",serial)
return -1
gpu_index = int(args[0])
#print(gpu_index,type(gpu_index))
if gpu_index >1 or gpu_index <0:
show("Error: invalid gpu index\n",serial)
return -1
if gpu_index == 0:
address = GPU0_7bit_ADDR
else:
address = GPU1_7bit_ADDR
try:
val = self.smbpbi_command(None,None,str(address),"0x0","0x80000004")
return val
except Exception as err:
logging.error(err)
return -1
def dbus_command(self,name,serial=None,*args):
gpu_index = None
percent = None
bus = smbus.SMBus(1)
dbus_iface = dbus.Interface(dbus.SystemBus().get_object('com.openBMC.RPI','/RPI'),'com.openBMC.RPI')
if name == "pwm":
if len(args) <2:
show("error, must specify PWM index 0-3 and duty cycle 0-100\n",serial)
return -1
gpu_index = int(args[0])
if gpu_index > 3 or gpu_index <0:
show("Error: invalid PWM index\n",serial)
return -1
percent = int(args[1])
if percent > 100:
percent = 100
elif percent < 0:
percent = 0
if gpu_index == 3:
for i in range(3):
set_dbus_data(i,"user",1,dbus_iface)
pwm_reqest_set(i,percent,bus,dbus_iface)
else:
set_dbus_data(gpu_index,"user",1,dbus_iface)
pwm_reqest_set(gpu_index,percent,bus,dbus_iface)
if name == "pwm_restore":
if len(args) <1:
show("error, must specify PWM0/1/2/3\n",serial)
return -1
gpu_index = int(args[0])
if gpu_index > 3 or gpu_index < 0:
show("Error: invalid PWM index\n",serial)
return -1
if gpu_index == 3:
for i in range(3):
set_dbus_data(i,"user",0,dbus_iface)
else:
set_dbus_data(gpu_index,"user",0,dbus_iface)
if name == "get_pwm":
if len(args) <1:
show("Error: invalid argument\n",serial)
return -1
gpu_index = int(args[0])
if gpu_index > 3 or gpu_index < 0:
show("Error: invalid PWM index\n",serial)
return -1
if gpu_index == 3:
for i in range(3):
show("{0}: {1}%\n".format(i,get_dbus_data(i,"percent",dbus_iface)),serial)
else:
show("{}%".format(get_dbus_data(gpu_index,"percent",dbus_iface)),serial)
def i2c_sub_command(self,name,serial=None,*args):
if name == "hsc":
address_list = [0x40,0x42,0x44,0x46]
if len(args) <2:
show("Need HSC Number([0-3]),info_type(power,temp,alert)\n",serial)
return -1
if int(args[0]) > 3 and int(args[0]) <0:
show("Error, HSC parameters wrong. Only support [0-3]\n",serial)
return -1
address = address_list[int(args[0])]
if args[1] == "temp":
offset = 0x8d
elif args[1] == "power":
offset = 0x97
elif args[1] == "alert":
offset = 0x79
else:
show("Error, info type parameters wrong. Only support power,temp,alert\n",serial)
return -1
try:
val = self.i2c_command("i2c_word_read",serial,address,offset)
if args[1] == "temp":
show("HSC 0x{0:02x} 0x{1:02x}: {2} C".format(address,offset,(val&0xff)+((val&0xff00)>>8)*256),serial)
elif args[1] == "power":
show("HSC 0x{0:02x} 0x{1:02x}: {2} w".format(address,offset,(val&0xff)+((val&0xff00)>>8)*256),serial)
elif args[1] == "alert":
show("HSC 0x{0:02x} 0x{1:02x}: 0x{3:02X}{2:02X}".format(address,offset,(val&0xff),((val&0xff00)>>8)),serial)
except Exception as err:
show(err,serial)
if name == "temp":
if len(args) <1:
show("Error: invalid argument\n",serial)
return -1
gpu_index = int(args[0])
if gpu_index > 3 and gpu_index <0:
show("Error: invalid gpu index\n",serial)
return -1
if gpu_index == 0:
address = GPU0_7bit_ADDR
offset = 0
elif gpu_index == 1:
address = GPU1_7bit_ADDR
offset = 0
else:
address = TMP451_7bit_ADDR
offset = 1
try:
val = self.i2c_command("i2c_byte_read",serial,address,offset)
if gpu_index == 2:
show("{0:3d} C".format(val-64),serial)
else:
show("{0:3d} C".format(val),serial)
except Exception as err:
show(err,serial)
def i2c_command(self,name,serial=None,*args):
# init
val = None
address = None
offset = None
count = None
# get i2c1 bus
bus = smbus.SMBus(1)
if name == "i2c_probe":
if len(args) > 0:
show("error, no need argument\n",serial)
return -1
os.system("i2cdetect -y 1")
if name == "i2c_dump":
if len(args) < 1:
show("Need i2c_dev_addr\n",serial)
return -1
address = int(args[0],0)
os.system("i2cdump -y 1 {}".format(address))
if name == "i2c_byte_read":
if len(args) < 2:
show("Need i2c_dev_addr, dev_reg_addr\n",serial)
address = int(args[0])
offset = int(args[1])
val = bus.read_byte_data(address,offset)
if val < 0:
show("Error reading device register 0x{0:x} from addr 0x{1:x} \nI2c bus collision may detect, please try again\n".format(address,offset),serial)
return val
if name == "i2c_byte_write":
if len(args) < 3:
show("Need i2c_dev_addr, dev_reg_addr, reg_val\n",serial)
address = int(args[0])
offset = int(args[1])
val = int(args[2])
bus.write_byte_data(address,offset,val)
if name == "i2c_word_read":
if len(args) < 2:
show("Need i2c_dev_addr, dev_reg_addr\n",serial)
address = int(args[0])
offset = int(args[1])
val = bus.read_word_data(address,offset)
if val < 0:
show("Error reading device register 0x{0:x} from addr 0x{1:x} \nI2c bus collision may detect, please try again\n".format(address,offset),serial)
return val
if name == "i2c_word_write":
if len(args) < 3:
show("Need i2c_dev_addr, dev_reg_addr, reg_val\n",serial)
address = int(args[0])
offset = int(args[1])
val = int(args[2])
bus.write_word_data(address,offset,val)
if name == "i2c_block_read":
if len(args) < 3:
show("Need i2c_dev_addr, dev_reg_addr, byte_count\n",serial)
address = int(args[0])
offset = int(args[1])
count = int(args[2])
try:
val = bus.read_i2c_block_data(address,offset,count)
except Exception as err:
show("Error reading device register 0x{0:x} from addr 0x{1:x} \nI2c bus collision may detect, please try again\n{2}".format(address,offset,err),serial)
return val
if name == "i2c_block_write":
num = len(args)
if num < 4:
show("Need i2c_dev_addr, dev_reg_addr, byte_count ,reg_val[0] , reg_val[1] ....\n",serial)
address = int(args[0])
offset = int(args[1])
count = int(args[2])
if count == num - 3:
try:
val = args[3:]
bus.write_i2c_block_data(address,offset,val)
except Exception as err:
show("Error writing device register 0x{0:x} from addr 0x{1:x} \nI2c bus collision may detect, please try again\n{2}".format(address,offset,err),serial)
else:
show("the count is not equal to value list num\n",serial)
def ip_command(self,name,serial=None):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
if __name__ == '__main__':
# python terminal_cmd.py cmd_name arg1 arg2 arg3
terminal_command = CMDManager()
if len(sys.argv) == 1:
terminal_command.apply_cmd("help")
sys.exit(0)
cmd_name = sys.argv[1]
args = tuple(sys.argv[2:])
# print(sys.argv)
terminal_command.apply_cmd(cmd_name,None,*args)
| 39.737945
| 171
| 0.482564
| 18,207
| 0.960538
| 0
| 0
| 0
| 0
| 0
| 0
| 5,321
| 0.280717
|
4935f536cd95ba674ac2e9ef0ae15b9cb27cb00e
| 5,436
|
py
|
Python
|
other/wget_files.py
|
arlewis/galaxy_cutouts
|
02c7eac9a6251a36290e7c620ff6a76c012fd53b
|
[
"MIT"
] | null | null | null |
other/wget_files.py
|
arlewis/galaxy_cutouts
|
02c7eac9a6251a36290e7c620ff6a76c012fd53b
|
[
"MIT"
] | null | null | null |
other/wget_files.py
|
arlewis/galaxy_cutouts
|
02c7eac9a6251a36290e7c620ff6a76c012fd53b
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import numpy as np
import astropy.io.fits
import gal_data
import config
import argparse
import os
import sys
from collections import defaultdict
from pdb import set_trace
_WORK_DIR = '/Users/lewis.1590/research/galbase'
_GALDATA_DIR = '/Users/lewis.1590/python/galbase/gal_data'
_OUTPUT_DIR = '/Users/lewis.1590/research/z0mgs'
BAND = 'fuv'
#galex_file_suffixes = ['-int', '-cnt', '-exp', '-rrhr', '-skybg', '-intbgsub', '-wt', '-flags', '-objmask', '-cat']
galex_file_suffixes = ['-int', '-intbgsub', '-rrhr']
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--galaxy_list', nargs='+', help='list of tilenames to gather')
parser.add_argument('--output_file', default='wget_commands.sh', help='file to write commands to')
return parser.parse_args()
def calc_tile_overlap(ra_ctr, dec_ctr, pad=0.0, min_ra=0., max_ra=180., min_dec=-90., max_dec=90.):
"""
Find all tiles that fall within a given overlap (pad) of (ra_ctr, dec_ctr)
Parameters
----------
ra_ctr : float
Central RA
dec_ctr : float
Central Dec
pad : float, optional
Size of region about center (Default: 0.0)
min_ra : float. optional
Min RA of box to search in for overlaps (Default: 0.)
max_ra : float, optional
Max RA of box to search in (Default 180.)
min_dec : float, optional
Min Dec of box to search in (Default: -90.)
max_dec : float, optional
Max Dec of box to search in (Default: 90.)
Returns
-------
overlap : bool array
Bool arrat indicatinng which tiles in the index file fall within the given region
"""
overlap = ((min_dec - pad) < dec_ctr) & ((max_dec + pad) > dec_ctr)
#TRAP HIGH LATITUDE CASE AND (I GUESS) TOSS BACK ALL TILES. DO BETTER LATER
mean_dec = (min_dec + max_dec) * 0.5
if np.abs(dec_ctr) + pad > 88.0:
return overlap
ra_pad = pad / np.cos(np.radians(mean_dec))
# MERIDIAN CASES
merid = np.where(max_ra < min_ra)
overlap[merid] = overlap[merid] & ( ((min_ra-ra_pad) < ra_ctr) | ((max_ra+ra_pad) > ra_ctr) )[merid]
# BORING CASE
normal = np.where(max_ra > min_ra)
overlap[normal] = overlap[normal] & ((((min_ra-ra_pad) < ra_ctr) & ((max_ra+ra_pad) > ra_ctr)))[normal]
return overlap
def main(**kwargs):
orig_wgetfile = os.path.join(_WORK_DIR, 'code/adam/MyAllSkyTable_akleroy.csv')
outfile = kwargs.get('output_file', 'wget_commands.sh')
with open(orig_wgetfile, 'r') as f:
lines = f.readlines()
galaxy_list = kwargs.get('galaxy_list', None)
if galaxy_list is None:
print('No galaxies requested!')
sys.exit()
gals = gal_data.gal_data(names=kwargs['galaxy_list'], galdata_dir=_GALDATA_DIR)
n_gals = len(gals)
size_deg = 30. * 60. / 3600.
#tile_list = []
for i in range(n_gals):
this_gal = np.rec.fromarrays(gals[i], names=list(config.COLUMNS))
galname = str(this_gal.name).replace(' ', '').upper()
# open the index file which provides information about all the galex tiles
indexfile = os.path.join(_OUTPUT_DIR, 'galex_index_file.fits')
ext = 1
index, hdr = astropy.io.fits.getdata(indexfile, ext, header=True)
# CALCULATE TILE OVERLAP
tile_overlaps = calc_tile_overlap(this_gal.ra_deg, this_gal.dec_deg, pad=size_deg,
min_ra=index['MIN_RA'],
max_ra=index['MAX_RA'],
min_dec=index['MIN_DEC'],
max_dec=index['MAX_DEC'])
# FIND OVERLAPPING TILES WITH RIGHT BAND
# index file set up such that index['fuv'] = 1 if fuv and index['nuv'] = 1 if nuv
ind = np.where((index[BAND]) & tile_overlaps)[0]
# pull out the tilenames of the overlapping tiles
infiles = index[ind]['fname']
tile_list = [os.path.basename(infile.split('.')[0]) for infile in infiles]
# setup the output directory and command file
output_dir = os.path.join(_OUTPUT_DIR, galname)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
newcommandfile = os.path.join(output_dir, galname + '_' + outfile)
# in the original wget file, select out all of the filenames and paths of the retrieved files
goodlines = lines[1:] # lines with a wget command
allfiles = [f.split(' ')[-1].strip('"\n').split('/')[-1].split('.')[0] for f in goodlines]
# find the locations in the allfiles list that correspond to tiles for the given galaxy
inds = np.where(np.in1d(allfiles, tile_list))[0]
names = np.asarray(allfiles)[inds] # just a check to make sure the tiles do match
# now select out the lines with the correct tiles
# we'll use this list to create our list of tiles to grab with various endings
filestarts = np.asarray(goodlines)[inds]
# populate the wget command file by replacing -int at the end of the filename with the desired file type
with open(newcommandfile, 'w') as g:
for s in galex_file_suffixes:
for f in filestarts:
newline = f.replace('-int', s)
g.writelines(newline)
if __name__ == '__main__':
args = get_args()
main(**vars(args))
| 36.979592
| 116
| 0.625276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,260
| 0.415747
|
49373d99cd60462ee40755d32e9fd17e9129e6bd
| 478
|
py
|
Python
|
jessie_bot/help/help.py
|
KNNCreative/jessie-bot
|
de6994b6a58b742f1e943cdfbd84af6c0c183851
|
[
"MIT"
] | 1
|
2017-08-06T06:08:29.000Z
|
2017-08-06T06:08:29.000Z
|
jessie_bot/help/help.py
|
KNNCreative/jessie-bot
|
de6994b6a58b742f1e943cdfbd84af6c0c183851
|
[
"MIT"
] | null | null | null |
jessie_bot/help/help.py
|
KNNCreative/jessie-bot
|
de6994b6a58b742f1e943cdfbd84af6c0c183851
|
[
"MIT"
] | null | null | null |
import json
import logging
from pathlib import Path
from hermes.common.lex_utils import success, error
logger = logging.getLogger(__name__)
script_path = Path.cwd().joinpath('hermes/help/script.json')
with script_path.open() as f: script = json.load(f)
def handler(event, context):
help_text = '\n'.join(script['help_text'])
return success(message=help_text)
if __name__ == '__main__':
res = handler(event={}, context={})
print(json.dumps(res, indent=3))
| 22.761905
| 60
| 0.719665
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 0.104603
|
493744bd4e38a4f634bfc5490a7a5a2f5c5b9cc9
| 4,733
|
py
|
Python
|
MyStudy/app/Stateful_Firewall.py
|
OucMan/ryu
|
6ca460ec16f967945643fc7b3846898c571ad6cf
|
[
"Apache-2.0"
] | null | null | null |
MyStudy/app/Stateful_Firewall.py
|
OucMan/ryu
|
6ca460ec16f967945643fc7b3846898c571ad6cf
|
[
"Apache-2.0"
] | null | null | null |
MyStudy/app/Stateful_Firewall.py
|
OucMan/ryu
|
6ca460ec16f967945643fc7b3846898c571ad6cf
|
[
"Apache-2.0"
] | null | null | null |
from ryu.base import app_manager
from ryu.ofproto import ofproto_v1_3
from ryu.controller import ofp_event
from ryu.controller.handler import set_ev_cls, MAIN_DISPATCHER, CONFIG_DISPATCHER
from ryu.lib.packet.packet import packet, ether_types
from ryu.lib.packet.ethernet import ethernet
from ryu.lib.packet.ethernet import arp
from ryu.lib.packet.ipv4 import ipv4
from ryu.lib.packet.tcp import tcp
from ryu.lib.packet import in_proto
from ryu.lib import addrconv
from ryu.lib import mac
import struct
import time
class FireWall(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self,*args,**kwargs):
super(FireWall,self).__init__(*args,**kwargs)
self.mac_to_port = {}
self.ip_to_port = {}
self.internal_host = ['10.0.0.1', '10.0.0.2']
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions, 0, 0)
def add_flow(self, datapath, priority, match, actions, idle_timeout, hard_timeout):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
idle_timeout=idle_timeout, hard_timeout=hard_timeout,
match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
ofp_parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
pkt_arp = pkt.get_protocol(arp.arp)
if pkt_arp:
arp_ip_src = pkt_arp.src_ip
self.ip_to_port[dpid][arp_ip_src] = in_port
actions = [ofp_parser.OFPActionOutput(out_port, ofproto.OFPCML_NO_BUFFER)]
out = ofp_parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port,
actions=actions, data=msg.data)
datapath.send_msg(out)
return
pkt_ipv4 = pkt.get_protocol(ipv4.ipv4)
if pkt_ipv4:
ipv4_src = pkt_ipv4.src
ipv4_dst = pkt_ipv4.dst
self.ip_to_port[dpid][ipv4_src] = in_port
ipv4_proto = pkt_ipv4.proto
if ipv4_proto == in_proto.IPPROTO_TCP:
pkt_tcp = pkt.get_protocol(tcp.tcp)
if pkt_tcp.bits == tcp.TCP_SYN and ipv4_src not in self.internal_host:
#增加优先级高一些的流表项,(src, dst, tcp, syn, drop)
actions = []
match = ofp_parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ip_proto=ipv4_proto,
ipv4_src=ipv4_src, ipv4_dst=ipv4_dst, tcp_flags=tcp.TCP_SYN)
self.add_flow(datapath, 10, match, actions)
if ipv4_dst in self.ip_to_port[dpid]:
out_port = self.ip_to_port[dpid][ipv4_dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [ofp_parser.OFPActionOutput(out_port, ofproto.OFPCML_NO_BUFFER)]
if out_port != ofproto.OFPP_FLOOD:
match = ofp_parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP, ip_proto=ipv4_proto,
ipv4_src=ipv4_src, ipv4_dst=ipv4_dst)
self.add_flow(datapath, 1, match, actions)
out = ofp_parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port,
actions=actions, data=msg.data)
datapath.send_msg(out)
| 45.509615
| 110
| 0.592225
| 4,246
| 0.891455
| 0
| 0
| 3,395
| 0.712786
| 0
| 0
| 99
| 0.020785
|
493827951fe9c01069f538a18ee56a8c22b8b962
| 1,355
|
py
|
Python
|
screenpy/questions/body_of_the_last_response.py
|
perrygoy/screenpy
|
862c0d7e5ff9f1265e520ab383c04ddbd4d060eb
|
[
"MIT"
] | 39
|
2019-03-22T15:18:23.000Z
|
2022-02-23T17:32:03.000Z
|
screenpy/questions/body_of_the_last_response.py
|
perrygoy/screenpy
|
862c0d7e5ff9f1265e520ab383c04ddbd4d060eb
|
[
"MIT"
] | 63
|
2019-07-17T06:25:19.000Z
|
2022-01-13T07:03:53.000Z
|
screenpy/questions/body_of_the_last_response.py
|
bandophahita/screenpy
|
db0f3ef91a891b9d095016d83fa4b589620808ce
|
[
"MIT"
] | 15
|
2019-07-09T11:02:56.000Z
|
2021-12-24T07:43:56.000Z
|
"""
Investigate the body of the last API response received by the Actor.
"""
from json.decoder import JSONDecodeError
from typing import Union
from screenpy import Actor
from screenpy.abilities import MakeAPIRequests
from screenpy.exceptions import UnableToAnswer
from screenpy.pacing import beat
class BodyOfTheLastResponse:
"""Ask about the body of the last API response received by the Actor.
Abilities Required:
|MakeAPIRequests|
Examples::
the_actor.should(
See.the(BodyOfTheLastResponse(), ContainsTheEntry(play="Hamlet"))
)
the_actor.should(
See.the(BodyOfTheLastResponse(), ReadsExactly("To be, or not to be"))
)
"""
def describe(self) -> str:
"""Describe the Question.."""
return "The body of the last response."
@beat("{} examines the body of the last response they received.")
def answered_by(self, the_actor: Actor) -> Union[dict, str]:
"""Direct the Actor to investigate the body of the last response."""
responses = the_actor.ability_to(MakeAPIRequests).responses
if len(responses) < 1:
raise UnableToAnswer(f"{the_actor} has not yet received any API responses.")
try:
return responses[-1].json()
except JSONDecodeError:
return responses[-1].text
| 30.111111
| 88
| 0.667159
| 1,053
| 0.777122
| 0
| 0
| 518
| 0.382288
| 0
| 0
| 695
| 0.512915
|
493855de80e96da6e183d61540552721c4471e12
| 2,973
|
py
|
Python
|
d3rlpy/algos/torch/td3_impl.py
|
ningyixue/AIPI530_Final_Project
|
b95353ffd003692a37a59042dfcd744a18b7e802
|
[
"MIT"
] | 565
|
2020-08-01T02:44:28.000Z
|
2022-03-30T15:00:54.000Z
|
d3rlpy/algos/torch/td3_impl.py
|
ningyixue/AIPI530_Final_Project
|
b95353ffd003692a37a59042dfcd744a18b7e802
|
[
"MIT"
] | 144
|
2020-08-01T03:45:10.000Z
|
2022-03-30T14:51:16.000Z
|
d3rlpy/algos/torch/td3_impl.py
|
ningyixue/AIPI530_Final_Project
|
b95353ffd003692a37a59042dfcd744a18b7e802
|
[
"MIT"
] | 103
|
2020-08-26T13:27:34.000Z
|
2022-03-31T12:24:27.000Z
|
from typing import Optional, Sequence
import torch
from ...gpu import Device
from ...models.encoders import EncoderFactory
from ...models.optimizers import OptimizerFactory
from ...models.q_functions import QFunctionFactory
from ...preprocessing import ActionScaler, RewardScaler, Scaler
from ...torch_utility import TorchMiniBatch
from .ddpg_impl import DDPGImpl
class TD3Impl(DDPGImpl):
_target_smoothing_sigma: float
_target_smoothing_clip: float
def __init__(
self,
observation_shape: Sequence[int],
action_size: int,
actor_learning_rate: float,
critic_learning_rate: float,
actor_optim_factory: OptimizerFactory,
critic_optim_factory: OptimizerFactory,
actor_encoder_factory: EncoderFactory,
critic_encoder_factory: EncoderFactory,
q_func_factory: QFunctionFactory,
gamma: float,
tau: float,
n_critics: int,
target_reduction_type: str,
target_smoothing_sigma: float,
target_smoothing_clip: float,
use_gpu: Optional[Device],
scaler: Optional[Scaler],
action_scaler: Optional[ActionScaler],
reward_scaler: Optional[RewardScaler],
):
super().__init__(
observation_shape=observation_shape,
action_size=action_size,
actor_learning_rate=actor_learning_rate,
critic_learning_rate=critic_learning_rate,
actor_optim_factory=actor_optim_factory,
critic_optim_factory=critic_optim_factory,
actor_encoder_factory=actor_encoder_factory,
critic_encoder_factory=critic_encoder_factory,
q_func_factory=q_func_factory,
gamma=gamma,
tau=tau,
n_critics=n_critics,
target_reduction_type=target_reduction_type,
use_gpu=use_gpu,
scaler=scaler,
action_scaler=action_scaler,
reward_scaler=reward_scaler,
)
self._target_smoothing_sigma = target_smoothing_sigma
self._target_smoothing_clip = target_smoothing_clip
def compute_target(self, batch: TorchMiniBatch) -> torch.Tensor:
assert self._targ_policy is not None
assert self._targ_q_func is not None
with torch.no_grad():
action = self._targ_policy(batch.next_observations)
# smoothing target
noise = torch.randn(action.shape, device=batch.device)
scaled_noise = self._target_smoothing_sigma * noise
clipped_noise = scaled_noise.clamp(
-self._target_smoothing_clip, self._target_smoothing_clip
)
smoothed_action = action + clipped_noise
clipped_action = smoothed_action.clamp(-1.0, 1.0)
return self._targ_q_func.compute_target(
batch.next_observations,
clipped_action,
reduction=self._target_reduction_type,
)
| 36.703704
| 73
| 0.670703
| 2,604
| 0.875883
| 0
| 0
| 0
| 0
| 0
| 0
| 18
| 0.006054
|
49389ecac90c405c9c35bd0a48479aa66ba8e1c6
| 9,086
|
py
|
Python
|
mod_modPackInformer/source/mod_modPackInformer.py
|
stealthz67/spoter-mods-1
|
4ebd859fbb705b085ae5c4cb621edfbab476e378
|
[
"WTFPL"
] | null | null | null |
mod_modPackInformer/source/mod_modPackInformer.py
|
stealthz67/spoter-mods-1
|
4ebd859fbb705b085ae5c4cb621edfbab476e378
|
[
"WTFPL"
] | null | null | null |
mod_modPackInformer/source/mod_modPackInformer.py
|
stealthz67/spoter-mods-1
|
4ebd859fbb705b085ae5c4cb621edfbab476e378
|
[
"WTFPL"
] | 1
|
2019-12-10T19:11:55.000Z
|
2019-12-10T19:11:55.000Z
|
# -*- coding: utf-8 -*-
import json
import os
import threading
import urllib
import urllib2
import BigWorld
import ResMgr
from gui.Scaleform.daapi.view.dialogs import DIALOG_BUTTON_ID, ConfirmDialogButtons, SimpleDialogMeta
from gui.Scaleform.daapi.view.lobby.LobbyView import LobbyView
from gui import DialogsInterface, SystemMessages, makeHtmlString
from notification.NotificationListView import NotificationListView
from constants import AUTH_REALM
from helpers import getLanguageCode
from adisp import process
from gui.Scaleform.daapi.view.common.BaseTicker import BaseTicker
from helpers import dependency
from skeletons.gui.game_control import IBrowserController, IExternalLinksController
class Config(object):
def __init__(self):
self.data = {
'version' : '',
'name' : '',
'serverMain' : '',
'serverBackup' : '',
'statistic' : False,
'statisticTid' : '',
'openLinkInGameBrowser': False
}
xml = ResMgr.openSection('scripts/client/gui/mods/mod_modPackInformer.xml')
if xml is not None:
self.data['version'] = '%s' % xml.readString('version', '')
self.data['name'] = '%s' % xml.readString('name', '')
self.data['serverMain'] = '%s' % xml.readString('serverMain', '')
self.data['serverBackup'] = '%s' % xml.readString('serverBackup', '')
self.data['statistic'] = xml.readBool('statistic', False)
self.data['statisticTid'] = '%s' % xml.readString('statisticTid', '')
self.data['openLinkInGameBrowser'] = xml.readBool('openLinkInGameBrowser', False)
class Updater(object):
def __init__(self):
self.show = True
self.count = 0
self.lin1 = ''
def start(self):
if not updater.show: return
try:
f = urllib2.urlopen(config.data['serverMain'])
except StandardError:
f = None
if f is None or f.getcode() is not 200:
try:
f = urllib2.urlopen(config.data['serverBackup'])
except StandardError:
f = None
if f is not None and f.getcode() is 200:
mod_text = ''
json_text = json.loads(f.read().decode('utf-8-sig'))
if config.data['version'] != '%s' % json_text['version']:
self.show = False
if json_text['header']:
mod_text += '%s' % json_text['header'].format(**json_text)
if json_text['image']:
try:
image = 'img://gui/html/%s' % json_text['imageName']
path = os.path.realpath(os.path.join('./res/gui/html', '%s' % json_text['imageName']))
if not os.path.exists(path):
urllib.urlretrieve('%s' % json_text['imageLink'], path)
except StandardError:
image = ''
path = ''
if image and path and os.path.exists(path):
mod_text += '<br/><img src=\"%s\" width=\"%s\" height=\"%s\">' % (image, json_text['imageWidth'], json_text['imageHeight'])
if json_text['message']:
mod_text += '<br/>%s' % json_text['message'].format(**json_text)
self.lin1 = '%s' % json_text['link']
DialogsInterface.showDialog(SimpleDialogMeta(json_text['windowName'], mod_text, ConfirmDialogButtons(json_text['buttonNameOpen'], json_text['buttonNameClose']), None), self.click)
link = makeHtmlString('html_templates:lobby/system_messages', 'link', {
'text' : '%s' % json_text['messageLinkName'],
'linkType': '%s' % self.lin1
})
p__msg = '%s<br><br>' % json_text['header'].format(**json_text)
p__msg += '<font color="#E2D2A2" size="15"><b>%s</b></font>' % link
SystemMessages.pushMessage(p__msg, SystemMessages.SM_TYPE.GameGreeting)
def click(self, isConfirmed):
if isConfirmed and self.lin1:
if self.lin1.lower().startswith('http:') or self.lin1.lower().startswith('https:'):
if config.data['openLinkInGameBrowser']:
browser.open(self.lin1)
else:
BigWorld.wg_openWebBrowser(self.lin1)
def openLink(self, action):
if self.lin1 is None or self.lin1 == '': return
if self.lin1 in action:
self.click(True)
class Statistics(object):
def __init__(self):
self.analytics_started = False
self.thread_analytics = None
self.user = None
self.old_user = None
def analytics_start(self):
if not self.analytics_started:
lang = str(getLanguageCode()).upper()
param = urllib.urlencode({
'v' : 1, # Version.
'tid': config.data['statisticTid'],
'cid': self.user, # Anonymous Client ID.
't' : 'screenview', # Screenview hit type.
'an' : 'modPackInformer "%s"' % config.data['name'], # App name.
'av' : 'modPackInformer "%s" %s' % (config.data['name'], config.data['version']),
'cd' : 'Cluster: [%s], lang: [%s]' % (AUTH_REALM, lang), # Screen name / content description.
'ul' : '%s' % lang,
'sc' : 'start'
})
urllib2.urlopen(url='http://www.google-analytics.com/collect?', data=param).read()
self.analytics_started = True
self.old_user = BigWorld.player().databaseID
def start(self):
player = BigWorld.player()
if self.user and self.user != player.databaseID:
self.old_user = player.databaseID
self.thread_analytics = threading.Thread(target=self.end, name='Thread')
self.thread_analytics.start()
self.user = player.databaseID
self.thread_analytics = threading.Thread(target=self.analytics_start, name='Thread')
self.thread_analytics.start()
def end(self):
if self.analytics_started:
lang = str(getLanguageCode()).upper()
param = urllib.urlencode({
'v' : 1, # Version.
'tid': config.data['statisticTid'],
'cid': self.user, # Anonymous Client ID.
't' : 'screenview', # Screenview hit type.
'an' : 'modPackInformer "%s"' % config.data['name'], # App name.
'av' : 'modPackInformer "%s" %s' % (config.data['name'], config.data['version']),
'cd' : 'Cluster: [%s], lang: [%s]' % (AUTH_REALM, lang), # Screen name / content description.
'ul' : '%s' % lang,
'sc' : 'end'
})
urllib2.urlopen(url='http://www.google-analytics.com/collect?', data=param).read()
self.analytics_started = False
class p__Browser(BaseTicker):
externalBrowser = dependency.descriptor(IExternalLinksController)
internalBrowser = dependency.descriptor(IBrowserController)
def __init__(self):
super(p__Browser, self).__init__()
self.__browserID = 'modPackInformer'
return
def _dispose(self):
self.__browserID = 'modPackInformer'
super(p__Browser, self)._dispose()
return
def open(self, link, internal=True):
if internal:
if self.internalBrowser is not None:
self.__showInternalBrowser(link)
else:
self.__showExternalBrowser(link)
else:
self.__showExternalBrowser(link)
return
@process
def __showInternalBrowser(self, link):
self.__browserID = yield self.internalBrowser.load(url=link, browserID=self.__browserID)
def __showExternalBrowser(self, link):
if self.externalBrowser is not None:
self.externalBrowser.open(link)
def hookedGetLabels(self):
return [{
'id' : DIALOG_BUTTON_ID.SUBMIT,
'label' : self._submit,
'focused': True
}, {
'id' : DIALOG_BUTTON_ID.CLOSE,
'label' : self._close,
'focused': False
}]
def hookedLobbyPopulate(self):
hookLobbyPopulate(self)
start = threading.Thread(target=updater.start, name='updater.start')
start.start()
if config.data['statistic']:
stat.start()
def hookedOnClickAction(*args):
updater.openLink(args[3])
hookOnClickAction(*args)
def init():
print '[LOAD_MOD]: [modPackInformer, by spoter]'
def fini():
stat.end()
config = Config()
browser = p__Browser()
updater = Updater()
stat = Statistics()
ConfirmDialogButtons.getLabels = hookedGetLabels
hookLobbyPopulate = LobbyView._populate
LobbyView._populate = hookedLobbyPopulate
hookOnClickAction = NotificationListView.onClickAction
NotificationListView.onClickAction = hookedOnClickAction
| 38.5
| 195
| 0.575171
| 7,395
| 0.81371
| 135
| 0.014855
| 148
| 0.016285
| 0
| 0
| 1,680
| 0.184859
|
49397d33975fc946c23e0dd90e4f51ce16027f86
| 1,978
|
py
|
Python
|
tests/dummypredictor/predictors.py
|
kiconiaworks/igata
|
1d8a4b82a65eb936d5d8f8ff70747ba82ddef31a
|
[
"BSD-2-Clause"
] | 1
|
2021-12-31T14:29:44.000Z
|
2021-12-31T14:29:44.000Z
|
tests/dummypredictor/predictors.py
|
kiconiaworks/igata
|
1d8a4b82a65eb936d5d8f8ff70747ba82ddef31a
|
[
"BSD-2-Clause"
] | 6
|
2019-11-25T04:20:26.000Z
|
2021-12-13T05:23:16.000Z
|
tests/dummypredictor/predictors.py
|
kiconiaworks/igata
|
1d8a4b82a65eb936d5d8f8ff70747ba82ddef31a
|
[
"BSD-2-Clause"
] | null | null | null |
from time import sleep
from igata.predictors import PredictorBase
class DummyPredictorNoInputNoOutput(PredictorBase):
def predict(self, inputs, meta):
result = {"result": 0.222, "class": "car", "is_valid": True}
return result
class DummyPredictorNoInputNoOutputVariableOutput(PredictorBase):
def __init__(self, *args, **kwargs):
default_result = {"result": 0.222, "class": "car", "is_valid": True}
self.result = kwargs.get("result", default_result)
def predict(self, input, meta=None):
return self.result
class DummyPredictorNoOutput(PredictorBase):
def preprocess_input(self, record, meta=None):
return {}
def predict(self, record, meta):
return record
class DummyPredictorNoInputNoOutputWithPredictTimeout5s(PredictorBase):
def predict(self, inputs, meta):
self.set_predict_timeout(3)
sleep(10)
result = {"result": 0.222, "class": "car", "is_valid": True}
return result
class DummyPredictorOptionalValidStaticMethods(PredictorBase):
@staticmethod
def get_pandas_read_csv_kwargs(self):
return {"x": 1}
def predict(self, inputs, meta):
return {"result": 0.222, "class": "car", "is_valid": True}
@staticmethod
def get_pandas_to_csv_kwargs(self):
return {"y": 2}
@staticmethod
def set_additional_dynamodb_request_update_attributes(self):
return {"v": True}
class DummyPredictorOptionalInValidStaticMethods(PredictorBase):
def get_pandas_read_csv_kwargs(self):
return {"x": 1}
def predict(self, inputs, meta):
return {"result": 0.222, "class": "car", "is_valid": True}
def get_pandas_to_csv_kwargs(self):
return {"y": 2}
def set_additional_dynamodb_request_update_attributes(self):
return {"v": True}
class DummyInPandasDataFrameOutPandasCSVPredictor(PredictorBase):
def predict(self, inputs, meta):
raise NotImplementedError
| 27.859155
| 76
| 0.68453
| 1,890
| 0.955511
| 0
| 0
| 261
| 0.131951
| 0
| 0
| 176
| 0.088979
|
493a4dbeaaa60cda9d709c7b5d41a2f09db7205c
| 2,134
|
py
|
Python
|
ML/wrapper.py
|
NVombat/ReadAssist
|
11e107a7387f97024fa9e16d58a5b25e8291d343
|
[
"MIT"
] | 5
|
2021-04-27T09:18:42.000Z
|
2022-03-22T17:24:33.000Z
|
ML/wrapper.py
|
SanahSidhu/ReadAssist
|
abbe910d0583bc504c3f2b318ccf263bb7170900
|
[
"MIT"
] | null | null | null |
ML/wrapper.py
|
SanahSidhu/ReadAssist
|
abbe910d0583bc504c3f2b318ccf263bb7170900
|
[
"MIT"
] | 4
|
2021-03-30T06:18:09.000Z
|
2021-04-05T08:05:16.000Z
|
#Encapsulates all models
#Caches the models and uses the preexisting model instead of reloading it
from .OCR import get_text
from .ptt import get_pdf
import pytesseract
from .question import secondary_pipeline
from transformers import pipeline
class customwrapper():
def __init__(self):
self.questionmodel = None
self.summarizemodel = None
self.tessearct = None
self.generatemodel = None
def question(self, text : str):
if self.questionmodel == None:
self.questionmodel = secondary_pipeline('question-generation')
return self.questionmodel(text)
def summarize(self, text : str, min_length, max_lenght ):
if self.summarizemodel == None:
self.summarizemodel = pipeline('summarization')
return self.summarizemodel(text, min_length=min_length, max_lenght=max_lenght)
def generate_text(self, text : str):
if self.generatemodel == None:
self.generatemodel = pipeline('text2text-generation')
return self.generatemodel(text)
if __name__ == '__main__':
# gpt = pipeline('text-generation', model='gpt')
trans = cutomwrapper()
text = '''
The physical nature of time is addressed by general relativity with respect to events in space-time. Examples of events are the collision or two particles, the explosion of a supernova, or the arrival of a rocket ship. Every event can be assigned four numbers representing its time and position (the event's coordinates). However, the numerical values are different for different observers. In general relativity, the question of what time it is now only has meaning relative to a particular observer. Distance and time are intimately related and the time required for light to travel a specific distance is the same for all observers, as first publicly demonstrated by Michelson and Morley. General relativity does not address the nature of time for extremely small intervals where quantum mechanics holds. At this time, there is no generally accepted theory of quantum general relativity.
'''
print(trans.question(text=text))
| 54.717949
| 894
| 0.735708
| 825
| 0.386598
| 0
| 0
| 0
| 0
| 0
| 0
| 1,119
| 0.524367
|
493bd803d4c7823847afa2537f0ada612dffc26a
| 154
|
py
|
Python
|
unicode_urls/cms/__init__.py
|
Alexx-G/django-unicode-urls
|
fd4f89181c7172412ddf499efd050119c16c7d43
|
[
"MIT"
] | null | null | null |
unicode_urls/cms/__init__.py
|
Alexx-G/django-unicode-urls
|
fd4f89181c7172412ddf499efd050119c16c7d43
|
[
"MIT"
] | null | null | null |
unicode_urls/cms/__init__.py
|
Alexx-G/django-unicode-urls
|
fd4f89181c7172412ddf499efd050119c16c7d43
|
[
"MIT"
] | null | null | null |
from .urlutils import any_path_re
def patch_djangocms_urls():
import cms.utils.urlutils as cms_urlutils
cms_urlutils.any_path_re = any_path_re
| 19.25
| 45
| 0.792208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
493bdcbcdb59f9dfe146c2a5250f13347ddc9c85
| 4,242
|
py
|
Python
|
mailpile/plugins/demos.py
|
pyarnold/Mailpile
|
a7c0a0c6257da167207200f3b214b0e66bb93a10
|
[
"Apache-2.0"
] | 2
|
2017-02-03T07:00:57.000Z
|
2020-12-18T01:07:34.000Z
|
mailpile/plugins/demos.py
|
cz8s/Mailpile
|
a7c0a0c6257da167207200f3b214b0e66bb93a10
|
[
"Apache-2.0"
] | null | null | null |
mailpile/plugins/demos.py
|
cz8s/Mailpile
|
a7c0a0c6257da167207200f3b214b0e66bb93a10
|
[
"Apache-2.0"
] | null | null | null |
# This is a collection of very short demo-plugins to illustrate how
# to create and register hooks into the various parts of Mailpile
#
# To start creating a new plugin, it may make sense to copy this file,
# globally search/replace the word "Demo" with your preferred plugin
# name and then go delete sections you aren't going to use.
#
# Happy hacking!
from gettext import gettext as _
import mailpile.plugins
##[ Pluggable configuration ]#################################################
# FIXME
##[ Pluggable keyword extractors ]############################################
# FIXME
##[ Pluggable search terms ]##################################################
# Pluggable search terms allow plugins to enhance the behavior of the
# search engine in various ways. Examples of basic enhanced search terms
# are the date: and size: keywords, which accept human-friendly ranges
# and input, and convert those to a list of "low level" keywords to
# actually search for.
# FIXME
##[ Pluggable vcard functions ]###############################################
from mailpile.vcard import *
class DemoVCardImporter(VCardImporter):
"""
This VCard importer simply generates VCards based on data in the
configuration. This is not particularly useful, but it demonstrates
how each importer can define (and use) its own settings.
"""
FORMAT_NAME = _('Demo Contacts')
FORMAT_DESCRPTION = _('This is the demo importer')
SHORT_NAME = 'demo'
CONFIG_RULES = {
'active': [_('Activate demo importer'), bool, True],
'name': [_('Contact name'), str, 'Mr. Rogers'],
'email': [_('Contact email'), 'email', 'mr@rogers.com']
}
def get_vcards(self):
"""Returns just a single contact, based on data from the config."""
# Notes to implementors:
#
# - It is important to only return one card per (set of)
# e-mail addresses, as internal overwriting may cause
# unexpected results.
# - If data is to be deleted from the contact list, it
# is important to return a VCard for that e-mail address
# which has the relevant data removed.
#
if not self.config.active:
return []
return [SimpleVCard(
VCardLine(name='fn', value=self.config.name),
VCardLine(name='email', value=self.config.email)
)]
mailpile.plugins.register_vcard_importers(DemoVCardImporter)
##[ Pluggable cron jobs ]#####################################################
def TickJob(session):
"""
This is a very minimal cron job - just a function that runs within
a session.
Note that generally it is a better pattern to create a Command which
is then invoked by the cron job, so power users can access the
functionality directly. It is also a good idea to make the interval
configurable by registering a setting and referencing that instead of
a fixed number. See compose.py for an example of how this is done.
"""
session.ui.notify('Tick!')
mailpile.plugins.register_fast_periodic_job('tick-05', # Job name
5, # Interval in seconds
TickJob) # Callback
mailpile.plugins.register_slow_periodic_job('tick-15', 15, TickJob)
##[ Pluggable commands ]######################################################
from mailpile.commands import Command
from mailpile.util import md5_hex
class md5sumCommand(Command):
"""This command calculates MD5 sums"""
SYNOPSIS = (None, 'md5sum', 'md5sum', '[<data to hash>]')
SPLIT_ARG = False
HTTP_CALLABLE = ('GET', 'POST')
HTTP_QUERY_VARS = {
'data': 'Data to hash'
}
def command(self):
if 'data' in self.data:
data = self.data['data']
else:
data = ''.join(self.args)
if 'gross' in data or not data:
return self._error(_('I refuse to work with empty or gross data'),
info={'data': data})
return self._success(_('I hashed your data for you, yay!'),
result=md5_hex(data))
mailpile.plugins.register_commands(md5sumCommand)
| 32.883721
| 78
| 0.599481
| 1,989
| 0.468883
| 0
| 0
| 0
| 0
| 0
| 0
| 2,631
| 0.620226
|
493d7925c52f4ec18bce691a24e25dd57a737ace
| 2,523
|
py
|
Python
|
cqi_cpp/src/wrapper/cqi_test.py
|
AMR-/Conservative-Q-Improvement
|
f9d47b33fe757475d3216d3c406d147206738c90
|
[
"MIT"
] | null | null | null |
cqi_cpp/src/wrapper/cqi_test.py
|
AMR-/Conservative-Q-Improvement
|
f9d47b33fe757475d3216d3c406d147206738c90
|
[
"MIT"
] | null | null | null |
cqi_cpp/src/wrapper/cqi_test.py
|
AMR-/Conservative-Q-Improvement
|
f9d47b33fe757475d3216d3c406d147206738c90
|
[
"MIT"
] | null | null | null |
import argparse
import gym
import math
from qtree_wrapper import PyBox as Box
from qtree_wrapper import PyDiscrete as Discrete
from qtree_wrapper import PyQTree as QTree
from qtree_wrapper import PyVector as Vector
from train import Train
from utils import convert_to_pybox
env = gym.make('CartPole-v0')
def truncate(number, digits):
stepper = 10.0 ** digits
return math.trunc(stepper * number) / stepper
box = convert_to_pybox(env.observation_space)
discrete = Discrete(env.action_space.n)
# Optional command line args
parser = argparse.ArgumentParser()
parser.add_argument("--gamma")
parser.add_argument("--alpha")
parser.add_argument("--visit_decay")
parser.add_argument("--split_thresh_max")
parser.add_argument("--split_thresh_decay")
parser.add_argument("--num_splits")
parser.add_argument("--grid_search")
parser.add_argument("--steps")
args = parser.parse_args()
gamma = float(args.gamma) if args.gamma else 0.99
alpha = float(args.alpha) if args.alpha else 0.01
visit_decay = float(args.visit_decay) if args.visit_decay else 0.999
split_thresh_max = float(args.split_thresh_max) if args.split_thresh_max else 0.1
split_thresh_decay = float(args.split_thresh_decay) if args.split_thresh_decay else 0.99
num_splits = int(args.num_splits) if args.num_splits else 2
grid_search = bool(args.grid_search) if args.grid_search else False
qfunc = QTree(box, discrete, None,
# Hyperparameters
gamma,
alpha,
visit_decay,
split_thresh_max,
split_thresh_decay,
num_splits)
t = Train(qfunc, env)
eps_func = (lambda step: max(0.05, 1 - step/1e5))
train_steps = int(args.steps) if args.steps else int(3e7)
# Training
history = t.train(train_steps, eps_func, verbose=True, qfunc_hist=None)
# Evaluation:
results, history2, avg_r_per_ep, _ = t.train(50000, lambda step: 0.05, \
verbose=True, eval_only=True, penalty_check=lambda s, r: r <= -1000, \
track_data_per=1, run_tag="some descriptive tag for logging")
qfunc.print_structure()
nodes = f"\nNumber of nodes: {qfunc.num_nodes()}\n"
reward = f"\nAverage reward per episode: {truncate(avg_r_per_ep, 3)}\n"
hparams_str = f"gamma={gamma}, alpha={alpha}, visit_decay={visit_decay}, "
hparams_str += f"split_thresh_max={split_thresh_max}, "
hparams_str += f"split_thresh_decay={split_thresh_decay}, num_splits={num_splits}"
if grid_search:
with open("grid_search_results.txt", "a") as myfile:
myfile.write(nodes + reward + hparams_str)
myfile.close()
else:
print(nodes + reward + hparams_str)
| 31.148148
| 88
| 0.752675
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 528
| 0.209275
|
493e1e1955403325340bec49f18afa1cd0849a0e
| 621
|
py
|
Python
|
mcu-controller/main.py
|
KongoPL/lego-rc-car
|
6e731cd8a6787d69a83d5a92a290bbea074ef588
|
[
"BSD-3-Clause"
] | null | null | null |
mcu-controller/main.py
|
KongoPL/lego-rc-car
|
6e731cd8a6787d69a83d5a92a290bbea074ef588
|
[
"BSD-3-Clause"
] | null | null | null |
mcu-controller/main.py
|
KongoPL/lego-rc-car
|
6e731cd8a6787d69a83d5a92a290bbea074ef588
|
[
"BSD-3-Clause"
] | null | null | null |
# Yea, there is probably some good framework waiting for me,
# but I just want to have fun. Sometimes reinventing the wheel will serve you.
# But...don't do that in professional work :)
import config
import time
from Controller import Controller
print("Hello!")
root = Controller()
# try:
if True:
lastLoopExecution = time.time()
while True:
loopStartExecution = time.time()
deltaTime = loopStartExecution - lastLoopExecution
if deltaTime < 0.001:
continue
for object in root.createdObjects:
object.update(deltaTime)
lastLoopExecution = loopStartExecution
# except:
# print("An error occured")
| 20.7
| 78
| 0.742351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 234
| 0.376812
|
493f41de8fbe0f2e07f4b04ada75db7783d58023
| 803
|
py
|
Python
|
thread_test.py
|
mrabedini/playground_threading
|
664bcae4a9328779170551d7d0e271707635e85d
|
[
"MIT"
] | null | null | null |
thread_test.py
|
mrabedini/playground_threading
|
664bcae4a9328779170551d7d0e271707635e85d
|
[
"MIT"
] | null | null | null |
thread_test.py
|
mrabedini/playground_threading
|
664bcae4a9328779170551d7d0e271707635e85d
|
[
"MIT"
] | null | null | null |
import concurrent.futures
import logging
from logging import StreamHandler
import time
import timeit
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
logger = logging.getLogger(__name__)
def do_something(wait_time):
logger.info("Waiting for %d seconds.", wait_time)
time.sleep(wait_time)
return f"Wait was done for {wait_time} seconds."
start = timeit.default_timer()
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executer:
secs = [5, 4, 3, 2, 1]
results = [executer.submit(do_something, sec) for sec in secs]
for f in concurrent.futures.as_completed(results):
logger.info("result %s", f.result())
end = timeit.default_timer()
logger.info("Execution took %f seconds", end - start)
| 25.09375
| 71
| 0.716065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 158
| 0.196762
|
4940523067b88b49cb6f2210898a5f21bd9601ae
| 1,249
|
py
|
Python
|
iv/Backtracking/combinations.py
|
iamsuman/iv
|
bf68d3fd45455b6041e74b09272f69503bf7a8ac
|
[
"MIT"
] | 2
|
2020-09-19T22:28:15.000Z
|
2020-10-03T01:44:53.000Z
|
iv/Backtracking/combinations.py
|
iamsuman/iv
|
bf68d3fd45455b6041e74b09272f69503bf7a8ac
|
[
"MIT"
] | null | null | null |
iv/Backtracking/combinations.py
|
iamsuman/iv
|
bf68d3fd45455b6041e74b09272f69503bf7a8ac
|
[
"MIT"
] | 1
|
2020-10-03T01:43:30.000Z
|
2020-10-03T01:43:30.000Z
|
class Combo():
def combine(self,n, k):
A = list(range(1,n + 1))
res = self.comb(A, k)
return res
def comb(self, A, n):
if n == 0:
return [[]]
l = []
for i in range(0, len(A)):
m = A[i]
remLst = A[i + 1:]
for p in self.comb(remLst, n - 1):
l.append([m] + p)
return l
def combinations(n, list, combos=[]):
# initialize combos during the first pass through
if combos is None:
combos = []
if len(list) == n:
# when list has been dwindeled down to size n
# check to see if the combo has already been found
# if not, add it to our list
if combos.count(list) == 0:
combos.append(list)
combos.sort()
return combos
else:
# for each item in our list, make a recursive
# call to find all possible combos of it and
# the remaining items
for i in range(len(list)):
refined_list = list[:i] + list[i+1:]
combos = combinations(n, refined_list, combos)
return combos
a = Combo()
A = 4
B = 2
# print(a.combine(A,B))
print(a.comb([1,2,3,4], 2))
print(a.comb([1,2,3,4,5], 2))
| 22.709091
| 58
| 0.506005
| 394
| 0.315452
| 0
| 0
| 0
| 0
| 0
| 0
| 305
| 0.244195
|
4941a07b8598fcd71acf4d8decca54a679038504
| 1,257
|
py
|
Python
|
urlbrevity/test_urlconf.py
|
kezabelle/django-urlbrevity
|
a8b779587986c60c4e0597aead908d954480f0f9
|
[
"BSD-2-Clause-FreeBSD"
] | 4
|
2015-02-13T16:20:41.000Z
|
2020-07-02T18:45:50.000Z
|
urlbrevity/test_urlconf.py
|
kezabelle/django-urlbrevity
|
a8b779587986c60c4e0597aead908d954480f0f9
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
urlbrevity/test_urlconf.py
|
kezabelle/django-urlbrevity
|
a8b779587986c60c4e0597aead908d954480f0f9
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
# -*- coding: utf-8 -*-
from pytest import raises
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.core.urlresolvers import resolve
from django.core.urlresolvers import NoReverseMatch
from django.core.urlresolvers import Resolver404
from django.http import HttpResponse
import urlbrevity
try:
from django.conf.urls import patterns, url, include
except ImportError: # pragma: no cover
from django.conf.urls.defaults import patterns, url, include
finally:
def just_a_view(request, pk):
return HttpResponse(str(pk))
urlpatterns = patterns("",
url(regex=r'^test_user/(?P<pk>\d+)/?$',
view=just_a_view),
url(r'redirect/', include(urlbrevity.redirects)),
url(r'admin/', include(admin.site.urls)),
)
def test_reversing():
assert (reverse('urlbrevity:short', kwargs={'encoded_value': 'rQuX'})
== '/redirect/rQuX')
def test_reversing_badchars():
with raises(NoReverseMatch):
reverse('urlbrevity:short', kwargs={'encoded_value': 'rQu1'})
def test_resolving():
assert resolve('/redirect/rQuX').func == urlbrevity.do_redirect
def test_resolving_badchars():
with raises(Resolver404):
resolve('/redirect/rQu1')
| 28.568182
| 73
| 0.703262
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 218
| 0.173429
|
4945214eb5cf61ec5b89774833abf449ace18614
| 7,845
|
py
|
Python
|
test/unittest/datafinder_test/persistence/metadata/value_mapping/custom_format_test.py
|
schlauch/DataFinder
|
958fda4f3064f9f6b2034da396a20ac9d9abd52f
|
[
"BSD-3-Clause"
] | 9
|
2016-05-25T06:12:52.000Z
|
2021-04-30T07:22:48.000Z
|
test/unittest/datafinder_test/persistence/metadata/value_mapping/custom_format_test.py
|
schlauch/DataFinder
|
958fda4f3064f9f6b2034da396a20ac9d9abd52f
|
[
"BSD-3-Clause"
] | 6
|
2016-03-29T13:38:18.000Z
|
2017-01-18T15:57:42.000Z
|
test/unittest/datafinder_test/persistence/metadata/value_mapping/custom_format_test.py
|
schlauch/DataFinder
|
958fda4f3064f9f6b2034da396a20ac9d9abd52f
|
[
"BSD-3-Clause"
] | 7
|
2016-06-15T12:01:22.000Z
|
2022-03-05T08:50:25.000Z
|
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
#
# All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements test cases for the custom meta data persistence format.
"""
from datetime import datetime
import decimal
import sys
import unicodedata
import unittest
from datafinder.persistence.error import PersistenceError
from datafinder.persistence.metadata.value_mapping import\
MetadataValue, getPersistenceRepresentation
__version__ = "$Revision-Id$"
_AE = unicodedata.lookup("LATIN SMALL LETTER A WITH DIAERESIS")
class MetadataValueTestCase(unittest.TestCase):
def testInvalidPersistenceValue(self):
self.assertRaises(PersistenceError, MetadataValue, None)
def testComparison(self):
self.assertEquals(MetadataValue("a"), MetadataValue("a"))
self.assertEquals(hash(MetadataValue("a")),
hash(MetadataValue("a")))
self.assertNotEquals(MetadataValue("a"), MetadataValue("b"))
self.assertNotEquals(hash(MetadataValue("a")),
hash(MetadataValue("b")))
self.assertNotEquals(MetadataValue("a"), None)
self.assertNotEquals(hash(MetadataValue("a")), hash(None))
def testRepresentation(self):
self.assertEquals(str(MetadataValue("a")), "'a'")
def testBoolValue(self):
self.assertTrue(MetadataValue("1").value)
self.assertFalse(MetadataValue("0").value)
def testStringValue(self):
self.assertEquals(MetadataValue(u"test").value, u"test")
self.assertEquals(MetadataValue("test").value, "test")
# Special escaped sequences
self.assertEquals(MetadataValue("\\____EMPTY____LIST____").value,
"____EMPTY____LIST____")
self.assertEquals(MetadataValue("\\;").value, ";")
def testNumericValue(self):
self.assertEquals(MetadataValue(u"4.5").value, decimal.Decimal("4.5"))
self.assertEquals(MetadataValue(u"5").value, decimal.Decimal("5"))
def testDatetimeValue(self):
# From time stamp
metdataValue = MetadataValue("0", expectedType=datetime)
self.assertEquals(metdataValue.value, datetime(1970, 1, 1, 1, 0))
# From RFC 822.
persistedValue = u"Wed, 02 Oct 2002 13:00:00 GMT"
metdataValue = MetadataValue(persistedValue)
self.assertEquals(metdataValue.value, datetime(2002, 10, 2, 15, 0))
# From Iso8601.
persistedValue = u"2006-10-16T08:19:39Z"
metdataValue = MetadataValue(persistedValue)
self.assertEquals(metdataValue.value, datetime(2006, 10, 16, 10, 19, 39))
def testListValue(self):
# Success
self.assertEquals(MetadataValue("a;b;1").value,
["a", "b", decimal.Decimal(1)])
# Special cases
persistedValue = u"____EMPTY____LIST____"
metdataValue = MetadataValue(persistedValue)
self.assertEquals(metdataValue.value, list())
self.assertEquals(MetadataValue(";").value, ";")
self.assertEquals(MetadataValue("a\\;b;c").value, ["a;b", "c"])
def testDictValues(self):
metdataValue = MetadataValue("{}")
self.assertEquals(metdataValue.value, dict())
def testGuessRepresentation(self):
# Success
self.assertEquals(MetadataValue("").guessRepresentation(), [None])
self.assertEquals(MetadataValue("1").guessRepresentation(),
[True, decimal.Decimal("1"),
datetime(1970, 1, 1, 1, 0, 1), u"1"])
class GetPersistenceRepresentationTestCase(unittest.TestCase):
def testBoolValue(self):
self.assertEquals(getPersistenceRepresentation(True), "1")
self.assertEquals(getPersistenceRepresentation(False), "0")
def testNoneValue(self):
self.assertEquals(getPersistenceRepresentation(None), "")
self.assertRaises(PersistenceError, getPersistenceRepresentation, tuple())
def testStringValue(self):
self.assertEquals(getPersistenceRepresentation(u"test"), u"test")
self.assertEquals(getPersistenceRepresentation("test"), u"test")
# Special escaped sequences
self.assertEquals(getPersistenceRepresentation(";"), "\\;")
self.assertEquals(getPersistenceRepresentation("____EMPTY____LIST____"),
"\\____EMPTY____LIST____")
# Invalid raw string
orignalFunction = sys.getdefaultencoding
sys.getdefaultencoding = lambda: None # Mock encoding determination
try:
self.assertRaises(
PersistenceError, getPersistenceRepresentation, _AE.encode("Latin-1)"))
finally:
sys.getdefaultencoding = orignalFunction
def testNumericValue(self):
# Decimals
persistedValue = decimal.Decimal("4.5")
self.assertEquals(getPersistenceRepresentation(persistedValue), u"4.5")
persistedValue = decimal.Decimal("5")
self.assertEquals(getPersistenceRepresentation(persistedValue), u"5")
# Raw integer
self.assertEquals(getPersistenceRepresentation(5), u"5")
#Raw float
self.assertEquals(getPersistenceRepresentation(4.5), u"4.5")
def testDatetimeValue(self):
persistedValue = datetime(2006, 10, 16, 10, 19, 39)
self.assertEquals(getPersistenceRepresentation(persistedValue),
u"2006-10-16T08:19:39Z")
def testListValue(self):
persistedValue = [decimal.Decimal("2006"), decimal.Decimal("10"),
decimal.Decimal("16"), decimal.Decimal("10")]
self.assertEquals(getPersistenceRepresentation(persistedValue),
u"2006;10;16;10;")
persistedValue = list()
self.assertEquals(getPersistenceRepresentation(persistedValue),
u"____EMPTY____LIST____")
def testDictValue(self):
self.assertEquals(getPersistenceRepresentation(dict()), u"{}")
| 42.405405
| 88
| 0.653792
| 5,675
| 0.723391
| 0
| 0
| 0
| 0
| 0
| 0
| 2,511
| 0.320076
|
49477b4b9fb8484c659b6dfe9a98235bbdb4b218
| 3,629
|
py
|
Python
|
programmers/kakao2022/kakao2022/grader.py
|
jiyolla/StudyForCodingTestWithDongbinNa
|
c070829dd9c7b02b139e56511832c4a3b9f5982f
|
[
"MIT"
] | null | null | null |
programmers/kakao2022/kakao2022/grader.py
|
jiyolla/StudyForCodingTestWithDongbinNa
|
c070829dd9c7b02b139e56511832c4a3b9f5982f
|
[
"MIT"
] | null | null | null |
programmers/kakao2022/kakao2022/grader.py
|
jiyolla/StudyForCodingTestWithDongbinNa
|
c070829dd9c7b02b139e56511832c4a3b9f5982f
|
[
"MIT"
] | null | null | null |
import random
from .api import put_change_grade
# grades[id] = grade for user #{id}.
# grades[0] is not used. Since user id starts from 1.
def change_grade_randomshuffle(grades):
changed_users_id = set(range(len(grades)))
changed_users_id.remove(0)
grades = list(range(len(grades)))
random.shuffle(grades)
commands = []
for changed_user_id in changed_users_id:
commands.append({'id': changed_user_id, 'grade': grades[changed_user_id]})
put_change_grade(commands)
def change_grade_simplelinear(grades, game_results):
MAX_TAKEN = 40
changed_users_id = set()
for game_result in game_results:
changed_users_id.add(game_result['win'])
changed_users_id.add(game_result['lose'])
grades[game_result['win']] += MAX_TAKEN - game_result['taken']
grades[game_result['lose']] -= MAX_TAKEN - game_result['taken']
commands = []
for changed_user_id in changed_users_id:
commands.append({'id': changed_user_id, 'grade': grades[changed_user_id]})
put_change_grade(commands)
def change_grade_discountedlinear(grades, game_results):
BASE_SCORE = 100
MIN_TAKEN = 3
MAX_TAKEN = 40
changed_users_id = set()
for game_result in game_results:
changed_users_id.add(game_result['win'])
changed_users_id.add(game_result['lose'])
grades[game_result['win']] += BASE_SCORE * (2 - 1.6*(game_result['taken'] - MIN_TAKEN)/(MAX_TAKEN - MIN_TAKEN))
grades[game_result['lose']] -= BASE_SCORE * (2 - 1.6*(game_result['taken'] - MIN_TAKEN)/(MAX_TAKEN - MIN_TAKEN))
commands = []
for changed_user_id in changed_users_id:
commands.append({'id': changed_user_id, 'grade': grades[changed_user_id]})
put_change_grade(commands)
def change_grade_simplequadratic(grades, game_results):
MAX_TAKEN = 40
changed_users_id = set()
for game_result in game_results:
changed_users_id.add(game_result['win'])
changed_users_id.add(game_result['lose'])
grades[game_result['win']] += (MAX_TAKEN - game_result['taken'])**2
grades[game_result['lose']] -= (MAX_TAKEN - game_result['taken'])**2
commands = []
for changed_user_id in changed_users_id:
commands.append({'id': changed_user_id, 'grade': grades[changed_user_id]})
put_change_grade(commands)
def change_grade_preventabusediscountedlinear(grades, game_results, suspicion_marks):
BASE_SCORE = 4000
MIN_TAKEN = 3
MAX_TAKEN = 40
changed_users_id = set()
for game_result in game_results:
winner = game_result['win']
loser = game_result['lose']
game_time = game_result['taken']
changed_users_id.add(winner)
changed_users_id.add(loser)
if game_time < 11:
expected_game_time = 40 - abs(grades[winner] - grades[loser])/99000*35
tolerance = 5 + 5
if game_time < expected_game_time - tolerance:
suspicion_marks[loser] += 1
if suspicion_marks[loser] > 2:
continue
expected_win_rate = grades[winner]/(grades[winner] + grades[loser])
win_rate_modifier = expected_win_rate # (expected_win_rate - 0.3)*2 + 0.2
grades[winner] += win_rate_modifier*BASE_SCORE*(3 - 2.5*(game_time - MIN_TAKEN)/(MAX_TAKEN - MIN_TAKEN))
grades[loser] -= win_rate_modifier*BASE_SCORE*(3 - 2.5*(game_time - MIN_TAKEN)/(MAX_TAKEN - MIN_TAKEN))
commands = []
for changed_user_id in changed_users_id:
commands.append({'id': changed_user_id, 'grade': grades[changed_user_id]})
put_change_grade(commands)
| 36.656566
| 120
| 0.673188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 305
| 0.084045
|
4947e1cc23f3c7930219fe180c751c514d914052
| 2,188
|
py
|
Python
|
resources/benchmark.py
|
HPI-SWA-Lab/TargetSpecific-ICOOOLPS
|
2936fe010103cfbe4b0131313abcee3a59bb8fbc
|
[
"MIT"
] | 1
|
2015-04-10T17:25:56.000Z
|
2015-04-10T17:25:56.000Z
|
resources/benchmark.py
|
HPI-SWA-Lab/TargetSpecific-ICOOOLPS
|
2936fe010103cfbe4b0131313abcee3a59bb8fbc
|
[
"MIT"
] | null | null | null |
resources/benchmark.py
|
HPI-SWA-Lab/TargetSpecific-ICOOOLPS
|
2936fe010103cfbe4b0131313abcee3a59bb8fbc
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
N = 4
ind = np.arange(N) # the x locations for the groups
width = 0.4 # the width of the bars
fig, ax = plt.subplots()
ax.set_ylim(0,11) # outliers only
#ax2.set_ylim(0,35) # most of the data
#ax.spines['bottom'].set_visible(False)
#ax2.spines['top'].set_visible(False)
ax.xaxis.tick_top()
#ax.tick_params(labeltop='off') # don't put tick labels at the top
ax.xaxis.tick_bottom()
fig.subplots_adjust(hspace=0.1)
# call-site-specific
noneV = (5.729, 6.966, 7.953, 8.524)
rectsNone = ax.bar(ind, noneV, width, color='w', hatch=' ')
#ax2.bar(ind, noneV, width, color='w')
# call-target-specific uncached
classCached = (2.560, 3.616, 5.357, 6.846)
rectsClassCached = ax.bar(ind+width, classCached, width, color='w', hatch='o')
#ax2.bar(ind+width, classCached, width, color='w', hatch='/')
# call-target-specific cached
#classUncached = (2.634, 3.358, 5.583, 6.838)
#rectsClassUncached = ax.bar(ind+2*width, classUncached, width, color='w', hatch='o')
#ax2.bar(ind+2*width, classUncached, width, color='w', hatch='o')
# add some text for labels, title and axes ticks
#ax2.set_ylabel('Runtime (ms)')
#ax.set_title('Average rendering runtime per frame')
ax.set_ylabel('Runtime (s) / 100.000 invocations')
ax.set_xticks(ind+width+0.14)
ax.set_xticklabels( ('(a) 1 target \n (10 kwargs)', '(b) 2 targets \n (10 kwargs; \n 10 kwargs)', '(c) 2 targets \n (10 kwargs; \n 5 kwargs + rest kwargs)', '(d) 1 target \n (5 kwargs + rest kwargs)') )
#ax2.set_yticks(ax2.get_yticks()[:-1])
ax.set_yticks(ax.get_yticks()[1:])
ax.legend( (rectsNone[0], rectsClassCached[0]), ('call-site-specific', 'call-target-specific') , loc=4)
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
if height == 0:
ax.text(rect.get_x()+rect.get_width()/2., height+2, 'n/a',
ha='center', va='bottom', rotation='vertical')
else:
ax.text(rect.get_x()+rect.get_width()/2., height+0.2, '%.2f'%float(height),
ha='center', va='bottom', rotation='vertical')
autolabel(rectsNone)
autolabel(rectsClassCached)
plt.show()
| 32.656716
| 202
| 0.66042
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,142
| 0.521938
|
4948d466851a602b9bcffdd7a6596bee89b1c959
| 3,829
|
py
|
Python
|
talktracker/analysis.py
|
alTeska/talktracker
|
7d2c507bda78a6faf92568291190ea9300e878dc
|
[
"MIT"
] | 1
|
2018-08-22T09:07:04.000Z
|
2018-08-22T09:07:04.000Z
|
talktracker/analysis.py
|
alTeska/talktracker
|
7d2c507bda78a6faf92568291190ea9300e878dc
|
[
"MIT"
] | 1
|
2018-10-14T20:06:51.000Z
|
2018-10-14T20:06:51.000Z
|
talktracker/analysis.py
|
alTeska/talktracker
|
7d2c507bda78a6faf92568291190ea9300e878dc
|
[
"MIT"
] | 3
|
2018-09-21T15:09:26.000Z
|
2018-10-13T13:58:06.000Z
|
from datetime import timedelta
from random import sample, randint
import talktracker as tt
def time_diff(time1, time2):
"""calculate the time different"""
time1_info = timedelta(hours=time1[0], minutes=time1[1], seconds=time1[2])
time2_info = timedelta(hours=time2[0], minutes=time2[1], seconds=time2[2])
diff_in_sec = (time1_info - time2_info).seconds
diff_hours, diff_minutes, diff_seconds = dissect_time(diff_in_sec)
return diff_hours, diff_minutes, diff_seconds
def time_add(time1, time2):
"""calculate the time different"""
time1_info = timedelta(hours=time1[0], minutes=time1[1], seconds=time1[2])
time2_info = timedelta(hours=time2[0], minutes=time2[1], seconds=time2[2])
add_in_sec = (time1_info + time2_info).seconds
add_hours, add_minutes, add_seconds = dissect_time(add_in_sec)
return add_hours, add_minutes, add_seconds
def dissect_time(sec):
"""changes total seconds into hours, minutes, seconds"""
seconds = sec % 60
minutes = (sec // 60) % 60
hours = (sec // 60) // 60
return hours, minutes, seconds
def to_seconds(*args):
"""Converts (hour, min, sec) to seconds only"""
if len(args) == 3:
return args[0] * 60 * 60 + args[1] * 60 + args[2]
elif len(args) == 1:
return args[0][0] * 60 * 60 + args[0][1] * 60 + args[0][2]
else:
raise ValueError("Input must be either three integers, or a tuple of three integers")
def gen_fake_data(teams_n=0, members_n=[], duration=(2, 30, 0)):
""" Sudo code
1. create teams_n teams with randomly generated names
2. for each team create corresponding number of members with randomly generated attributes.
attributes might include:
- age (int)
- country (str, category)
- batch (int)
3. create a session and add the teams to the session
4. randomly pick a team
5. randomly pick a member and assign a time to him/her
6. do 4 and 5 again and again until the total time of the session (total time of the total times of the teams) becomes greater than the given duration
"""
team_names = team_name_list.copy()
member_names = member_name_list.copy()
teams = []
for ind in range(teams_n):
members = []
for _ in range(members_n[ind]):
name = sample(member_names, 1)[0]
member_names.remove(name) # remove this name from the list (without replacement)
age = randint(1, 40)
batch = randint(1, 3)
country = 'Germany'
members.append(tt.Member(name, age=age, batch=batch, country=country))
name = sample(team_names, 1)[0]
team_names.remove(name)
teams.append(tt.Team(name, members=members))
session = tt.Session('Untitled', teams=teams)
return session
""" Generates data for a fake session
Args:
teams_n (int): number of teams
members_n (int or a list): a single number or a list of numbers. of a single number os passed all the team will have similar number of members.
Returns:
a session object with fake data
"""
team_name_list = ["RockStars", "ShadowWalkers", "MiddleEasterns", "Newrons", "Persians",
"Baghalies", "Golabies", "Loosers"]
member_name_list = ["Mohammad", "Annika", "Amir", "Yasaman", "Arman", "Nick", "Nicholas" ,
"Michael", "Aleksndra", "Fati", "Rasoul", "Janne", "Yagmur", "Raja",
"Abdallah", "Viktorja", "Alex", "James", "Marie", "Auguste", "Nora",
"Mathew", "Stefan", "Steffen", "Darya", "Tamara", "Ali", "Niloufar",
"Christoph", "Werner", "Florian", "Bernhard", "Samuel", "Karan", "Elisa",
"Atena", "Milad", "Nazanin", "Rahaa", "Amin", "Ehsan", "Shahab", "Sepideh"]
| 36.122642
| 154
| 0.629146
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,639
| 0.428049
|
494aa2d4e0d2a060a9ff51a1b37123a087e51342
| 2,692
|
py
|
Python
|
app/internal/module/video_del/queue.py
|
penM000/eALPluS-video-api
|
4ec8f850bd98450b76279f5e30da854dbfaed776
|
[
"MIT"
] | null | null | null |
app/internal/module/video_del/queue.py
|
penM000/eALPluS-video-api
|
4ec8f850bd98450b76279f5e30da854dbfaed776
|
[
"MIT"
] | null | null | null |
app/internal/module/video_del/queue.py
|
penM000/eALPluS-video-api
|
4ec8f850bd98450b76279f5e30da854dbfaed776
|
[
"MIT"
] | null | null | null |
import asyncio
from dataclasses import dataclass, field
from typing import Any
from .encode import encoder
from .database import database
from ..logger import logger
@dataclass(order=True)
class QueueItem:
"""
キューアイテム
"""
priority: int
item: Any = field(compare=False)
queue = None
encode_tasks = []
async def encode_worker(queue: QueueItem):
while True:
# Get a "work item" out of the queue.
queue_item = await queue.get()
encode_config = queue_item.item
# DBにprogressの更新
result = await encoder.encode(
folderpath=encode_config["folderpath"],
filename=encode_config["filename"],
resolution=encode_config["height"])
await database.encode_result(encode_config["folderpath"],
encode_config["height"],
result)
# 入力動画の削除判定
# await filemanager.delete_original_video()
# DBにdoneの更新
queue.task_done()
async def add_encode_queue(folderpath, filename, encode_resolution="Auto"):
global queue
global encode_workers
if queue is None:
queue = asyncio.PriorityQueue()
await encoder.encode_test()
for i in range(encoder.encode_worker):
task = asyncio.create_task(encode_worker(queue))
encode_tasks.append(task)
# 動画の形式確認
input_video_info = await encoder.get_video_info(folderpath, filename)
# 映像がなければエラー
if not input_video_info.is_video:
logger.warning(f"{folderpath} not video file")
await database.encode_error(folderpath, "not video file")
return
else:
await encoder.thumbnail(folderpath, filename)
# 解像度ごとにエンコードキューを追加
if encode_resolution == "Auto":
video_size = [360, 480, 720, 1080]
for height in video_size:
# 入力解像度が超えていれば追加
if input_video_info.height >= height or height == 360:
await database.encode_task(folderpath, height)
encode_config = {
"folderpath": folderpath,
"filename": filename,
"height": height
}
queue_item = QueueItem(priority=height, item=encode_config)
queue.put_nowait(queue_item)
else:
# エンコードの再追加用
height = int(encode_resolution)
await database.encode_task(folderpath, height)
encode_config = {
"folderpath": folderpath,
"filename": filename,
"height": height
}
queue_item = QueueItem(priority=height, item=encode_config)
queue.put_nowait(queue_item)
| 29.26087
| 75
| 0.609955
| 113
| 0.039566
| 0
| 0
| 136
| 0.047619
| 2,510
| 0.878852
| 542
| 0.189776
|
494b2faa96115baf8681d111a98a087de5ebcb59
| 476
|
py
|
Python
|
lib/dyson/utils/module.py
|
luna-test/luna
|
6d94439f2747daf96e295837684bdc6607f507dc
|
[
"Apache-2.0"
] | 3
|
2018-05-21T14:35:11.000Z
|
2021-03-25T12:32:25.000Z
|
lib/dyson/utils/module.py
|
dyson-framework/dyson
|
e5a2e12c7bb0ba21ff274feff34c184576d08ff5
|
[
"Apache-2.0"
] | 13
|
2018-05-22T01:01:08.000Z
|
2018-09-16T22:12:10.000Z
|
lib/dyson/utils/module.py
|
luna-test/luna
|
6d94439f2747daf96e295837684bdc6607f507dc
|
[
"Apache-2.0"
] | 1
|
2018-05-21T14:35:17.000Z
|
2018-05-21T14:35:17.000Z
|
import os
from dyson import constants
from abc import abstractmethod
import sys
from dyson.constants import to_boolean
class DysonModule:
def __init__(self):
pass
@abstractmethod
def run(self, webdriver, params):
pass
def fail(self, msg):
print(msg, file=sys.stderr)
if not to_boolean(constants.DEFAULT_SELENIUM_PERSIST):
exit(2)
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
| 18.307692
| 62
| 0.686975
| 272
| 0.571429
| 0
| 0
| 66
| 0.138655
| 0
| 0
| 0
| 0
|
494cefc1f9462c0538e6c405bcec6cc75cbab494
| 1,136
|
py
|
Python
|
misc/texteditor.py
|
disc0nnctd/myPythonCodesDC
|
378b0cf749124ef8b7f8d70f6f298faa6c9f73de
|
[
"MIT"
] | 1
|
2017-04-30T18:20:32.000Z
|
2017-04-30T18:20:32.000Z
|
misc/texteditor.py
|
disc0nnctd/myPythonCodesDC
|
378b0cf749124ef8b7f8d70f6f298faa6c9f73de
|
[
"MIT"
] | 1
|
2017-04-30T10:09:45.000Z
|
2017-04-30T12:39:19.000Z
|
misc/texteditor.py
|
disc0nnctd/myPythonCodesDC
|
378b0cf749124ef8b7f8d70f6f298faa6c9f73de
|
[
"MIT"
] | 1
|
2017-04-30T09:54:08.000Z
|
2017-04-30T09:54:08.000Z
|
"""A simple text editor made in Python 2.7."""
from os import path, chdir
workingdir = path.join(path.dirname(__file__), 'texts')
chdir(workingdir)
from Tkinter import Tk, Text, Button
import tkFileDialog
root = Tk("Text Editor")
text = Text(root)
text.grid()
def saveas():
"""Save file."""
try:
t = text.get("1.0", "end-1c") # "1.0" means read from beginning
# "end-1c" means delete last character
savelocation = tkFileDialog.asksaveasfilename()
file1 = open(savelocation, "w")
file1.write(t)
file1.close
except IOError:
pass
def openfile():
"""Open file."""
try:
location = tkFileDialog.askopenfilename()
file1 = open(location, "r")
fileContents = file1.read()
text.delete(1.0, "end")
text.insert(1.0, fileContents)
except IOError:
pass
button = Button(root, text="Open", command=openfile)
button.grid()
button = Button(root, text="Save As", command=saveas)
button.grid()
root.mainloop()
workingdir = path.join(path.dirname(__file__))
chdir(workingdir)
| 25.244444
| 73
| 0.611796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 210
| 0.184859
|
494e2056309882919070e0989e358cb4f828bbd0
| 397
|
py
|
Python
|
21_DivdeConquer/Step05/gamjapark.py
|
StudyForCoding/BEAKJOON
|
84e1c5e463255e919ccf6b6a782978c205420dbf
|
[
"MIT"
] | null | null | null |
21_DivdeConquer/Step05/gamjapark.py
|
StudyForCoding/BEAKJOON
|
84e1c5e463255e919ccf6b6a782978c205420dbf
|
[
"MIT"
] | 3
|
2020-11-04T05:38:53.000Z
|
2021-03-02T02:15:19.000Z
|
21_DivdeConquer/Step05/gamjapark.py
|
StudyForCoding/BEAKJOON
|
84e1c5e463255e919ccf6b6a782978c205420dbf
|
[
"MIT"
] | null | null | null |
import sys
n, k= map(int, sys.stdin.readline().split())
def power(a, b):
if b == 0:
return 1
if b % 2:
return (power(a, b//2) ** 2 * a) % P
else:
return (power(a, b//2) ** 2) % P
P = 1000000007
f = [1 for _ in range(n + 1)]
for i in range(2, n + 1):
f[i] = (f[i - 1] * i) % P
A = f[n]
B = (f[n-k]*f[k])%P
print((A % P) * (power(B, P-2) %P) % P) #페르마의 소정리: N!%P * (K!(N-K)!)^(p-2)%P
| 18.045455
| 76
| 0.476071
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 50
| 0.121655
|
494e7be275c169f4f4b49f4a379016a1594a2a8b
| 135
|
py
|
Python
|
quicksilver.py
|
binaryflesh/quicksilver
|
0d65259f305beb05efe00f096e48c41b62bfdf57
|
[
"MIT"
] | 1
|
2018-12-01T07:52:13.000Z
|
2018-12-01T07:52:13.000Z
|
quicksilver.py
|
binaryflesh/quicksilver
|
0d65259f305beb05efe00f096e48c41b62bfdf57
|
[
"MIT"
] | 7
|
2018-12-02T23:31:38.000Z
|
2018-12-03T07:44:41.000Z
|
quicksilver.py
|
binaryflesh/quicksilver
|
0d65259f305beb05efe00f096e48c41b62bfdf57
|
[
"MIT"
] | null | null | null |
# Quicksilver.py - Agnostic project analyzer that generates resourceful diagrams. WIP
# Copyright (C) 2018 Logan Campos - @binaryflesh
| 45
| 85
| 0.792593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 133
| 0.985185
|
494e987723a6b2a0236a0a1b1f66efc147868431
| 4,830
|
py
|
Python
|
action_controller/scripts/ActionControllerNode.py
|
FablabHome/The_Essense_of_the_Grey_Region
|
6385ada0879bdc6c00cb707192841fdab9ab7bf1
|
[
"MIT"
] | 1
|
2021-09-23T09:42:32.000Z
|
2021-09-23T09:42:32.000Z
|
action_controller/scripts/ActionControllerNode.py
|
FablabHome/The_Essense_of_the_Grey_Region
|
6385ada0879bdc6c00cb707192841fdab9ab7bf1
|
[
"MIT"
] | null | null | null |
action_controller/scripts/ActionControllerNode.py
|
FablabHome/The_Essense_of_the_Grey_Region
|
6385ada0879bdc6c00cb707192841fdab9ab7bf1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
MIT License
Copyright (c) 2020 rootadminWalker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import rospy
from core.Nodes import ActionEvaluator
class ActionControllerNode(ActionEvaluator):
def __init__(self):
# Initialize the intent to callback map, must have NotRecognized situation
self.intent2callback = {
'Introduce': self.__introduce,
'GiveMenu': self.__show_menu,
'OrderFood': self.__order_food,
'OrderFoodTakeOut': self.__order_food,
'NotRecognized': self.__not_recognized
}
super(ActionControllerNode, self).__init__()
def __introduce(self, intent, slots, raw_text, session):
introduce_dialog = '''
Ah, Forgive me for not introducing myself, masters.
I'm snippy, your virtual assistant in this restaurant,
I'm still under development, so you could only see me talking
right now.
'''
self.speaker.say_until_end(introduce_dialog)
@staticmethod
def __show_menu(intent, slots, raw_text, session):
menu = '''
Menu Price
-------------------------------------
French Fries $7
meat salad $20
spaghetti $23
hot chocolate $14
cappucino $19
tea $0
water $0
Hamburger $19
Ketchup $0
Tacos $15
Marshmellos $10
Steak $27
hot dog $10
'''
print(f"Sorry for your inconvenience, here's the menu\n\n{menu}")
def __order_food(self, intent, slots, raw_text, session):
order_what = False
orders = {}
i = 0
if session is not None:
rospy.loginfo(json.loads(session.custom_data))
while i < len(slots):
if slots[i]['slotName'] == 'amount':
amount = int(slots[i]['value']['value'])
try:
next_slot = slots[i + 1]
if next_slot['slotName'] == 'food':
orders[next_slot['value']['value']] = amount
i += 2
elif next_slot['slotName'] == 'amount':
orders[f'Unknown{i}'] = amount
i += 1
order_what = True
except IndexError:
order_what = True
orders[f'Unknown{i}'] = amount
i += 1
elif slots[i]['slotName'] == 'food':
orders[slots[i]['value']['value']] = 1
i += 1
if order_what or len(slots) == 0:
self.speaker.say_until_end("I'm sorry, but could you repeat it again?")
self.start_session(next_intents=['OrderFood', 'NotRecognized'], custom_data=orders)
return
if self.on_session():
if set(session.possible_next_intents) == {'OrderFood'}:
if not order_what:
self.stop_session()
self.speaker.say_until_end('Ok, Gotcha')
print(orders)
def __not_recognized(self, intent, slots, raw_text, session):
if len(session) == 0:
rospy.loginfo(f"Currently there isn't an action for '{raw_text}'")
elif session[0] == 'OrderFood':
rospy.loginfo('Sorry, I could not understand what do you want to order, please say it again')
self.stop_session()
def reset(self):
pass
if __name__ == '__main__':
node = ActionControllerNode()
| 37.44186
| 105
| 0.561698
| 3,594
| 0.744099
| 0
| 0
| 822
| 0.170186
| 0
| 0
| 2,568
| 0.531677
|
494ed3dffeb5ac99649e1cc394c891c7296dc5fc
| 5,174
|
py
|
Python
|
model.py
|
Wentao-Shi/Molecule-RNN
|
e00d89c7a6c0c341fb790da800087b9e34be5ab8
|
[
"MIT"
] | 3
|
2021-08-22T21:26:38.000Z
|
2022-01-09T11:16:40.000Z
|
model.py
|
shiwentao00/Molecule-RNN
|
e00d89c7a6c0c341fb790da800087b9e34be5ab8
|
[
"MIT"
] | null | null | null |
model.py
|
shiwentao00/Molecule-RNN
|
e00d89c7a6c0c341fb790da800087b9e34be5ab8
|
[
"MIT"
] | null | null | null |
# Copyright: Wentao Shi, 2021
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.functional import softmax
class RNN(torch.nn.Module):
def __init__(self, rnn_config):
super(RNN, self).__init__()
self.embedding_layer = nn.Embedding(
num_embeddings=rnn_config['num_embeddings'],
embedding_dim=rnn_config['embedding_dim'],
padding_idx=rnn_config['num_embeddings'] - 1
)
if rnn_config['rnn_type'] == 'LSTM':
self.rnn = nn.LSTM(
input_size=rnn_config['input_size'],
hidden_size=rnn_config['hidden_size'],
num_layers=rnn_config['num_layers'],
batch_first=True,
dropout=rnn_config['dropout']
)
elif rnn_config['rnn_type'] == 'GRU':
self.rnn = nn.GRU(
input_size=rnn_config['input_size'],
hidden_size=rnn_config['hidden_size'],
num_layers=rnn_config['num_layers'],
batch_first=True,
dropout=rnn_config['dropout']
)
else:
raise ValueError(
"rnn_type should be either 'LSTM' or 'GRU'."
)
# output does not include <sos> and <pad>, so
# decrease the num_embeddings by 2
self.linear = nn.Linear(
rnn_config['hidden_size'], rnn_config['num_embeddings'] - 2
)
def forward(self, data, lengths):
embeddings = self.embedding_layer(data)
# pack the padded input
# the lengths are decreased by 1 because we don't
# use <eos> for input and we don't need <sos> for
# output during traning.
embeddings = pack_padded_sequence(
input=embeddings,
lengths=lengths,
batch_first=True,
enforce_sorted=False
)
# recurrent network, discard (h_n, c_n) in output.
# Tearcher-forcing is used here, so we directly feed
# the whole sequence to model.
embeddings, _ = self.rnn(embeddings)
# linear layer to generate input of softmax
embeddings = self.linear(embeddings.data)
# return the packed representation for backpropagation,
# the targets will also be packed.
return embeddings
def sample(self, batch_size, vocab, device, max_length=140):
"""Use this function if device is GPU"""
# get integer of "start of sequence"
start_int = vocab.vocab['<sos>']
# create a tensor of shape [batch_size, seq_step=1]
sos = torch.ones(
[batch_size, 1],
dtype=torch.long,
device=device
)
sos = sos * start_int
# sample first output
output = []
x = self.embedding_layer(sos)
x, hidden = self.rnn(x)
x = self.linear(x)
x = softmax(x, dim=-1)
x = torch.multinomial(x.squeeze(), 1)
output.append(x)
# a tensor to indicate if the <eos> token is found
# for all data in the mini-batch
finish = torch.zeros(batch_size, dtype=torch.bool).to(device)
# sample until every sequence in the mini-batch
# has <eos> token
for _ in range(max_length):
# forward rnn
x = self.embedding_layer(x)
x, hidden = self.rnn(x, hidden)
x = self.linear(x)
x = softmax(x, dim=-1)
# sample
x = torch.multinomial(x.squeeze(), 1)
output.append(x)
# terminate if <eos> is found for every data
eos_sampled = (x == vocab.vocab['<eos>']).data
finish = torch.logical_or(finish, eos_sampled.squeeze())
if torch.all(finish):
return torch.cat(output, -1)
return torch.cat(output, -1)
def sample_cpu(self, vocab):
"""Use this function if device is CPU"""
output = []
# get integer of "start of sequence"
start_int = vocab.vocab['<sos>']
# create a tensor of shape [batch_size=1, seq_step=1]
sos = torch.tensor(
start_int,
dtype=torch.long
).unsqueeze(dim=0
).unsqueeze(dim=0)
# sample first output
x = self.embedding_layer(sos)
x, hidden = self.rnn(x)
x = self.linear(x)
x = softmax(x, dim=-1)
x = torch.multinomial(x.squeeze(), 1)
output.append(x.item())
# use first output to iteratively sample until <eos> occurs
while output[-1] != vocab.vocab['<eos>']:
x = x.unsqueeze(dim=0)
x = self.embedding_layer(x)
x, hidden = self.rnn(x, hidden)
x = self.linear(x)
x = softmax(x, dim=-1)
x = torch.multinomial(x.squeeze(), 1)
output.append(x.item())
# convert integers to tokens
output = [vocab.int2tocken[x] for x in output]
# popout <eos>
output.pop()
# convert to a single string
output = vocab.combine_list(output)
return output
| 32.3375
| 71
| 0.55547
| 5,014
| 0.969076
| 0
| 0
| 0
| 0
| 0
| 0
| 1,426
| 0.275609
|
49503dbeb658d944f139ed75ff92cfc671b7acd3
| 86
|
py
|
Python
|
day5.py
|
GuiltyD/Python_code
|
db03c491824b66d842a7b4ff8aa45644233526a6
|
[
"MIT"
] | null | null | null |
day5.py
|
GuiltyD/Python_code
|
db03c491824b66d842a7b4ff8aa45644233526a6
|
[
"MIT"
] | null | null | null |
day5.py
|
GuiltyD/Python_code
|
db03c491824b66d842a7b4ff8aa45644233526a6
|
[
"MIT"
] | null | null | null |
f = open('./day4.py')
for chunk in iter(lambda :f.read(10),''):
print(chunk)
| 17.2
| 41
| 0.55814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 0.151163
|
4950672898e66d691bf307ac9e23c9fb67819cb1
| 701
|
py
|
Python
|
Latest/venv/Lib/site-packages/apptools/io/h5/utils.py
|
adamcvj/SatelliteTracker
|
49a8f26804422fdad6f330a5548e9f283d84a55d
|
[
"Apache-2.0"
] | 1
|
2022-01-09T20:04:31.000Z
|
2022-01-09T20:04:31.000Z
|
Latest/venv/Lib/site-packages/apptools/io/h5/utils.py
|
adamcvj/SatelliteTracker
|
49a8f26804422fdad6f330a5548e9f283d84a55d
|
[
"Apache-2.0"
] | 1
|
2022-02-15T12:01:57.000Z
|
2022-03-24T19:48:47.000Z
|
Latest/venv/Lib/site-packages/apptools/io/h5/utils.py
|
adamcvj/SatelliteTracker
|
49a8f26804422fdad6f330a5548e9f283d84a55d
|
[
"Apache-2.0"
] | null | null | null |
from contextlib import contextmanager
from .file import H5File
@contextmanager
def open_h5file(filename, mode='r+', **kwargs):
"""Context manager for reading an HDF5 file as an H5File object.
Parameters
----------
filename : str
HDF5 file name.
mode : str
Mode to open the file:
'r' : Read-only
'w' : Write; create new file (an existing file would be deleted).
'a' : Read and write to file; create if not existing
'r+': Read and write to file; must already exist
See `H5File` for additional keyword arguments.
"""
h5 = H5File(filename, mode=mode, **kwargs)
try:
yield h5
finally:
h5.close()
| 24.172414
| 73
| 0.611983
| 0
| 0
| 618
| 0.881598
| 634
| 0.904422
| 0
| 0
| 465
| 0.663338
|
4950f58ba8e9dd8055eb104d658977925fab01b1
| 202
|
py
|
Python
|
src/success_backup_check/tests/test_success_backup_check.py
|
linuxluigi/success-backup-check
|
aa3be2dbd8b0106b931bf226614e05af68034077
|
[
"MIT"
] | null | null | null |
src/success_backup_check/tests/test_success_backup_check.py
|
linuxluigi/success-backup-check
|
aa3be2dbd8b0106b931bf226614e05af68034077
|
[
"MIT"
] | 7
|
2017-10-20T08:14:08.000Z
|
2017-10-31T10:04:19.000Z
|
src/success_backup_check/tests/test_success_backup_check.py
|
linuxluigi/success-backup-check
|
aa3be2dbd8b0106b931bf226614e05af68034077
|
[
"MIT"
] | null | null | null |
import pytest
import success_backup_check
def test_project_defines_author_and_version():
assert hasattr(success_backup_check, '__author__')
assert hasattr(success_backup_check, '__version__')
| 25.25
| 55
| 0.826733
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 25
| 0.123762
|
4953b0d0a882cec4862d24ffe94ed3594bc14dec
| 1,816
|
py
|
Python
|
insighioNode/lib/networking/modem/modem_sequans.py
|
insighio/insighioNode
|
396b0858ffb265ac66075e8b9d90713ffae7ffb8
|
[
"MIT"
] | 5
|
2021-06-11T09:03:12.000Z
|
2021-12-22T09:04:57.000Z
|
insighioNode/lib/networking/modem/modem_sequans.py
|
insighio/insighioNode
|
396b0858ffb265ac66075e8b9d90713ffae7ffb8
|
[
"MIT"
] | 1
|
2021-06-11T14:15:05.000Z
|
2021-06-11T14:15:33.000Z
|
insighioNode/lib/networking/modem/modem_sequans.py
|
insighio/insighioNode
|
396b0858ffb265ac66075e8b9d90713ffae7ffb8
|
[
"MIT"
] | null | null | null |
from modem_base import Modem
from network import LTE
import logging
class ModemSequans(Modem):
def __init__(self):
self.lte = LTE()
def power_on(self):
self.lte.init()
def power_off(self):
self.lte.deinit(dettach=True, reset=True)
def init(self):
return True
def connect(self, timeoutms=30000):
(status, lines) = self.send_at_cmd('AT+CGDATA="PPP",1', 30000, "CONNECT")
if not status:
return False
import network
self.ppp = network.PPP(self.uart)
self.ppp.active(True)
self.ppp.connect()
start_timestamp = utime.ticks_ms()
timeout_timestamp = start_timestamp + timeoutms
while utime.ticks_ms() < timeout_timestamp:
self.connected = self.is_connected()
if self.connected:
break
utime.sleep_ms(100)
return self.connected
def is_connected(self):
return self.lte.isconnected()
def disconnect(self):
if self.ppp:
self.ppp.active(False)
self.connected = False
(status, _) = self.send_at_cmd("AT+CGACT=0,1")
return status
# to be overriden by children
def set_gps_state(self, poweron=True):
pass
# to be overriden by children
def is_gps_on(self):
return False
def get_gps_position(self, timeoutms=300000):
return None
def send_at_cmd(self, command, timeoutms=30000, success_condition="OK"):
response = ""
status = False
logging.debug(command)
response = self.lte.send_at_cmd(command)
if response:
response = response.strip().splitlines()
logging.debug(response)
status = (response.find("OK") != -1)
return (status, response)
| 24.540541
| 81
| 0.601872
| 1,745
| 0.960903
| 0
| 0
| 0
| 0
| 0
| 0
| 110
| 0.060573
|
4953cc8e9258070e70193f6a8e92ffeda65bac35
| 1,824
|
py
|
Python
|
ravendb/tests/jvm_migrated_tests/crud_tests/test_track_entity.py
|
ravendb/RavenDB-Python-Client
|
6286b459b501e755fe8e8591a48acf8616605ccd
|
[
"MIT"
] | 8
|
2016-10-08T17:45:44.000Z
|
2018-05-29T12:16:43.000Z
|
ravendb/tests/jvm_migrated_tests/crud_tests/test_track_entity.py
|
ravendb/RavenDB-Python-Client
|
6286b459b501e755fe8e8591a48acf8616605ccd
|
[
"MIT"
] | 5
|
2017-02-12T15:50:53.000Z
|
2017-09-18T12:25:01.000Z
|
ravendb/tests/jvm_migrated_tests/crud_tests/test_track_entity.py
|
ravendb/RavenDB-Python-Client
|
6286b459b501e755fe8e8591a48acf8616605ccd
|
[
"MIT"
] | 8
|
2016-07-03T07:59:12.000Z
|
2017-09-18T11:22:23.000Z
|
from ravendb.exceptions.exceptions import NonUniqueObjectException, InvalidOperationException
from ravendb.tests.test_base import UserWithId, TestBase
class TestTrackEntity(TestBase):
def setUp(self):
super(TestTrackEntity, self).setUp()
def test_storing_document_with_the_same_id_in_the_same_session_should_throw(self):
with self.store.open_session() as session:
user = UserWithId("User1", None, "users/1")
session.store(user)
session.save_changes()
new_user = UserWithId("User2", None, "users/1")
ex_message = "Attempted to associate a different object with id 'users/1'."
self.assertRaisesWithMessage(session.store, NonUniqueObjectException, ex_message, new_user)
def test_deleting_entity_that_is_not_tracked_should_throw(self):
with self.store.open_session() as session:
user = UserWithId(None, None)
ex_message = f"{user} is not associated with the session, cannot delete unknown entity instance."
self.assertRaisesWithMessage(session.delete, InvalidOperationException, ex_message, user)
def test_loading_deleted_document_should_return_null(self):
with self.store.open_session() as session:
user1 = UserWithId("John", None, "users/1")
user2 = UserWithId("Jonathan", None, "users/2")
session.store(user1)
session.store(user2)
session.save_changes()
with self.store.open_session() as session:
session.delete("users/1")
session.delete("users/2")
session.save_changes()
with self.store.open_session() as session:
self.assertIsNone(session.load("users/1", UserWithId))
self.assertIsNone(session.load("users/2", UserWithId))
| 44.487805
| 109
| 0.679276
| 1,670
| 0.91557
| 0
| 0
| 0
| 0
| 0
| 0
| 248
| 0.135965
|
4953e3a0846206727edbbb495ede380b618ab266
| 1,781
|
py
|
Python
|
PluginSDK/PythonRecon/Python/excel_helper.py
|
PengJinFa/YAPNew
|
fafee8031669b24d0cc74876a477c97d0d7ebadc
|
[
"MIT"
] | 20
|
2016-07-05T05:23:04.000Z
|
2021-11-07T14:25:59.000Z
|
PluginSDK/PythonRecon/Python/excel_helper.py
|
PengJinFa/YAPNew
|
fafee8031669b24d0cc74876a477c97d0d7ebadc
|
[
"MIT"
] | 20
|
2016-06-08T06:36:55.000Z
|
2018-04-25T09:52:18.000Z
|
PluginSDK/PythonRecon/Python/excel_helper.py
|
PengJinFa/YAPNew
|
fafee8031669b24d0cc74876a477c97d0d7ebadc
|
[
"MIT"
] | 21
|
2016-05-31T15:34:09.000Z
|
2021-11-07T14:26:03.000Z
|
from openpyxl import Workbook
from openpyxl.utils import get_column_letter
import numbers
wb = Workbook()
def XL_Location(row, column):
return get_column_letter(column) + str(row)
def Save_Column_Title(file_dir, features, row_index, column_start):
ws = wb.active
keys = [x[0] for x in features.items()]
try:
for index in range(len(keys)):
loc = XL_Location(index + row_index, column_start)
if ws[loc].value is None:
ws[loc] = keys[index]
else:
assert (ws[loc].value == keys[index])
wb.save(file_dir)
except:
return False
return True
# save to excel data
def Save_Column_Exel(file_dir, features, row_index, column_start):
ws = wb.active
vals = [x[1] for x in features.items()]
try:
for index in range(len(vals)):
loc = XL_Location(index + row_index, column_start)
if isinstance(vals[index], numbers.Number):
ws[XL_Location(index + row_index, column_start)] = vals[index]
else:
ws[XL_Location(index + row_index, column_start)] = str(vals[index])
wb.save(file_dir)
except:
return False
return True
def Save_Row_Exel(file_dir, features, row_index, column_start):
ws = wb.active
vals = [x[1] for x in features.items()]
try:
for index in range(len(vals)):
loc = XL_Location(index + row_index, column_start)
if isinstance(vals[index], numbers.Number):
ws[XL_Location(row_index, index + column_start)] = vals[index]
else:
ws[XL_Location(row_index, index + column_start)] = str(vals[index])
wb.save(file_dir)
except:
return False
return True
| 30.706897
| 83
| 0.60977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 20
| 0.01123
|
4955e96c4c0b436986516c1c0b6010caa5dbeec8
| 1,971
|
py
|
Python
|
src/semantic_segmentation/utils/image.py
|
alteia-ai/ICSS
|
088ddb7a8b92c71cc0b95e55d186069b8af50b0a
|
[
"MIT"
] | 7
|
2022-01-10T19:04:34.000Z
|
2022-03-16T03:19:48.000Z
|
src/semantic_segmentation/utils/image.py
|
alteia-ai/ICSS
|
088ddb7a8b92c71cc0b95e55d186069b8af50b0a
|
[
"MIT"
] | null | null | null |
src/semantic_segmentation/utils/image.py
|
alteia-ai/ICSS
|
088ddb7a8b92c71cc0b95e55d186069b8af50b0a
|
[
"MIT"
] | null | null | null |
import colorsys
import itertools
import numpy as np
import torch
def sliding_window(top, step=10, window_size=(20, 20)):
""" Slide a window_shape window across the image with a stride of step """
for x in range(0, top.shape[1], step):
if x + window_size[0] > top.shape[1]:
x = top.shape[1] - window_size[0]
for y in range(0, top.shape[2], step):
if y + window_size[1] > top.shape[2]:
y = top.shape[2] - window_size[1]
yield x, y, window_size[0], window_size[1]
def grouper(n, iterable):
""" Browse an iterator by chunk of n elements """
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
def random_colors(n, bright=True):
"""
Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""
brightness = 1.0 if bright else 0.7
hsv = [(i / n, 1, brightness) for i in range(n)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
np.random.shuffle(colors)
return colors
def apply_mask(image, mask, color, i=1, alpha=0.5):
"""Apply the given mask(==i) to the image. Binary mask.
"""
target = image.copy()
for c in range(3):
target[:, :, c] = np.where(
mask == i, image[:, :, c] * (1 - alpha) + alpha * color[c], image[:, :, c]
)
return target
def from_coord_to_patch(img, coords, device):
"""Returns patches of the input image. coors is an output of grouper(n, sliding window(...))"""
if isinstance(img, np.ndarray):
img = torch.from_numpy(img)
image_patches = [img[:, x : x + w, y : y + h] for x, y, w, h in coords]
# image_patches = np.asarray(image_patches)
# image_patches = torch.from_numpy(image_patches).type(torch.FloatTensor)
image_patches = torch.stack(image_patches).to(device)
return image_patches
| 31.790323
| 99
| 0.609335
| 0
| 0
| 698
| 0.354135
| 0
| 0
| 0
| 0
| 525
| 0.266362
|
495701e22432966b0a7ba235c137600209fc7f19
| 578
|
py
|
Python
|
website/error.py
|
TWoolhouse/Libraries
|
26079ed387cb800cb97f20980720ae094008c7bf
|
[
"MIT"
] | 1
|
2020-10-11T15:34:56.000Z
|
2020-10-11T15:34:56.000Z
|
website/error.py
|
TWoolhouse/Libraries
|
26079ed387cb800cb97f20980720ae094008c7bf
|
[
"MIT"
] | null | null | null |
website/error.py
|
TWoolhouse/Libraries
|
26079ed387cb800cb97f20980720ae094008c7bf
|
[
"MIT"
] | null | null | null |
class WebsiteBaseError(Exception):
pass
class TreeTraversal(WebsiteBaseError):
def __init__(self, tree, request, segment, req=None):
super().__init__()
self.tree, self.request, self.segment, self.req = tree, request, segment, req
def __str__(self) -> str:
return f"{self.tree} > {self.request}[{self.segment}] {'' if self.req is None else self.req}"
class BufferRead(WebsiteBaseError):
def __init__(self, buffer):
super().__init__()
self.buffer = buffer
def __str__(self) -> str:
return f"{self.buffer}"
| 27.52381
| 101
| 0.647059
| 573
| 0.991349
| 0
| 0
| 0
| 0
| 0
| 0
| 102
| 0.176471
|
4958f00172a7990bcba76c17970e13446ea6dcfc
| 8,498
|
py
|
Python
|
backend/src/contaxy/operations/deployment.py
|
ml-tooling/contaxy
|
3317a866c2ef641667a2d318885c8b0f5096b56a
|
[
"MIT"
] | 3
|
2021-10-17T23:25:05.000Z
|
2022-02-03T21:40:59.000Z
|
backend/src/contaxy/operations/deployment.py
|
ml-tooling/contaxy
|
3317a866c2ef641667a2d318885c8b0f5096b56a
|
[
"MIT"
] | 14
|
2021-11-09T15:24:29.000Z
|
2022-03-11T13:26:04.000Z
|
backend/src/contaxy/operations/deployment.py
|
ml-tooling/contaxy
|
3317a866c2ef641667a2d318885c8b0f5096b56a
|
[
"MIT"
] | 3
|
2022-01-27T08:31:57.000Z
|
2022-02-11T13:38:00.000Z
|
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Any, List, Literal, Optional
from contaxy.schema import Job, JobInput, ResourceAction, Service, ServiceInput
from contaxy.schema.deployment import DeploymentType
# TODO: update_service functionality
class ServiceOperations(ABC):
@abstractmethod
def list_services(
self,
project_id: str,
deployment_type: Literal[
DeploymentType.SERVICE, DeploymentType.EXTENSION
] = DeploymentType.SERVICE,
) -> List[Service]:
"""Lists all services associated with the given project.
Args:
project_id (str): The project ID to filter the services.
deployment_type (One of [DeploymentType.SERVICE, DeploymentType.JOB]): The deployment type of either Service or Extension (which is a subtype of Service).
Returns:
List[Service]: The list of services associated with the project.
"""
pass
@abstractmethod
def deploy_service(
self,
project_id: str,
service: ServiceInput,
action_id: Optional[str] = None,
deployment_type: Literal[
DeploymentType.SERVICE, DeploymentType.EXTENSION
] = DeploymentType.SERVICE,
) -> Service:
"""Deploys a service for the specified project.
If no `action_id` is provided, the system will automatically select the best deployment option.
Available deployment options (actions) can be requested via the [list_deploy_service_actions](#services/list_deploy_service_actions) operation.
If the action is from an extension, the `action_id` must be a composite ID with the following format: `{extension_id}~{action_id}`.
The action mechanism is further explained in the description of the [list_deploy_service_actions](#services/list_deploy_service_actions).
Args:
project_id (str): [description]
service (ServiceInput): [description]
action_id (Optional[str], optional): The ID of the selected action. Defaults to `None`.
deployment_type (One of [DeploymentType.SERVICE, DeploymentType.JOB]): The deployment type of either Service or Extension (which is a subtype of Service).
Returns:
Service: The metadata of the deployed service.
"""
pass
@abstractmethod
def list_deploy_service_actions(
self,
project_id: str,
service: ServiceInput,
) -> List[ResourceAction]:
"""Lists all available service deployment options (actions).
Args:
project_id (str): The project ID associated with the service.
service_id (str): The ID of the service.
Returns:
List[ResourceAction]: Available deployment actions.
"""
pass
@abstractmethod
def get_service_metadata(
self,
project_id: str,
service_id: str,
) -> Service:
"""Returns the metadata of a single service.
Args:
project_id (str): The project ID associated with the service.
service_id (str): The ID of the service.
Returns:
Service: The service metadata.
"""
pass
@abstractmethod
def delete_service(
self,
project_id: str,
service_id: str,
delete_volumes: bool = False,
) -> None:
"""Deletes a service.
Args:
project_id (str): The project ID associated with the service.
service_id (str): The ID of the service.
delete_volumes (bool, optional): If `True`, all attached volumes will be deleted. Defaults to `False`.
Raises:
RuntimeError: If an error occurs during the deletion of the service.
"""
pass
@abstractmethod
def delete_services(
self,
project_id: str,
) -> None:
"""Deletes all services associated with a project.
Args:
project_id (str): The project ID.
"""
pass
@abstractmethod
def get_service_logs(
self,
project_id: str,
service_id: str,
lines: Optional[int],
since: Optional[datetime],
) -> str:
"""Returns the logs of a service.
Args:
project_id (str): The ID of the project into which the service is deployed in.
service_id (str): The ID of the service.
lines (Optional[int]): If provided, just the last `n` lines are returned from the log. Defaults to `None`.
since (Optional[datetime]): If provided, just the logs since the given timestamp are returned. Defaults to `None`.
Raises:
NotImplementedError: [description]
RuntimeError: If reading the logs of the given service fails.
Returns:
str: The logs of the service.
"""
pass
@abstractmethod
def suggest_service_config(
self,
project_id: str,
container_image: str,
) -> ServiceInput:
"""Suggests an input configuration based on the provided `container_image`.
The suggestion is based on metadata extracted from the container image (e.g. labels)
as well as suggestions based on previous project deployments with the same image.
Args:
project_id (str): The project ID associated with the service.
container_image (str): The container image to use as context for the suggestion.
Returns:
ServiceInput: The suggested service configuration.
"""
pass
@abstractmethod
def list_service_actions(
self,
project_id: str,
service_id: str,
) -> List[ResourceAction]:
"""Lists all actions available for the specified service.
See the endpoint documentation for more information on the action mechanism.
Args:
project_id (str): The project ID associated with the service.
service_id (str): The ID of the service.
Returns:
List[ResourceAction]: Available actions for given services.
"""
pass
@abstractmethod
def execute_service_action(
self,
project_id: str,
service_id: str,
action_id: str,
) -> Any:
"""Executes the selected service action.
The actions need to be first requested from the list_service_actions operation.
If the action is from an extension, the `action_id` must be a composite ID with the following format: `{extension_id}~{action_id}`.
Args:
project_id (str): The project ID associated with the service.
service_id (str): The ID of the service.
action_id (str): The ID of the selected action.
Returns:
`None` or a redirect response to another URL.
"""
pass
class JobOperations(ABC):
@abstractmethod
def list_jobs(self, project_id: str) -> List[Job]:
pass
@abstractmethod
def deploy_job(
self,
project_id: str,
job: JobInput,
action_id: Optional[str] = None,
) -> Job:
pass
@abstractmethod
def list_deploy_job_actions(
self,
project_id: str,
job: JobInput,
) -> List[ResourceAction]:
pass
@abstractmethod
def suggest_job_config(self, project_id: str, container_image: str) -> JobInput:
pass
@abstractmethod
def get_job_metadata(self, project_id: str, job_id: str) -> Job:
pass
@abstractmethod
def delete_job(self, project_id: str, job_id: str) -> None:
pass
@abstractmethod
def delete_jobs(
self,
project_id: str,
) -> None:
"""Deletes all jobs associated with a project.
Args:
project_id (str): The project ID.
"""
pass
@abstractmethod
def get_job_logs(
self,
project_id: str,
job_id: str,
lines: Optional[int] = None,
since: Optional[datetime] = None,
) -> str:
pass
@abstractmethod
def list_job_actions(
self,
project_id: str,
job_id: str,
) -> List[ResourceAction]:
pass
@abstractmethod
def execute_job_action(self, project_id: str, job_id: str, action_id: str) -> Any:
pass
class DeploymentOperations(ServiceOperations, JobOperations, ABC):
pass
| 29.922535
| 166
| 0.619087
| 8,203
| 0.965286
| 0
| 0
| 7,956
| 0.93622
| 0
| 0
| 4,820
| 0.567192
|
4959390b2ca88e67ed8b8674132fbee54a9cccd4
| 15,604
|
py
|
Python
|
perfkitbenchmarker/providers/openstack/os_virtual_machine.py
|
Nowasky/PerfKitBenchmarker
|
cfa88e269eb373780910896ed4bdc8db09469753
|
[
"Apache-2.0"
] | 3
|
2018-04-28T13:06:14.000Z
|
2020-06-09T02:39:44.000Z
|
perfkitbenchmarker/providers/openstack/os_virtual_machine.py
|
Nowasky/PerfKitBenchmarker
|
cfa88e269eb373780910896ed4bdc8db09469753
|
[
"Apache-2.0"
] | 1
|
2018-03-15T21:01:27.000Z
|
2018-03-15T21:01:27.000Z
|
perfkitbenchmarker/providers/openstack/os_virtual_machine.py
|
Nowasky/PerfKitBenchmarker
|
cfa88e269eb373780910896ed4bdc8db09469753
|
[
"Apache-2.0"
] | 6
|
2019-06-11T18:59:57.000Z
|
2021-03-02T19:14:42.000Z
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent an OpenStack Virtual Machine.
Regions:
User defined
Machine types, or flavors:
run 'openstack flavor list'
Images:
run 'openstack image list'
"""
import json
import logging
import threading
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import providers
from perfkitbenchmarker import virtual_machine, linux_virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.openstack import os_disk
from perfkitbenchmarker.providers.openstack import os_network
from perfkitbenchmarker.providers.openstack import utils as os_utils
from six.moves import range
NONE = 'None'
VALIDATION_ERROR_MESSAGE = '{0} {1} could not be found.'
FLAGS = flags.FLAGS
class OpenStackVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing an OpenStack Virtual Machine"""
CLOUD = providers.OPENSTACK
DEFAULT_IMAGE = None
_lock = threading.Lock() # _lock guards the following:
command_works = False
validated_resources_set = set()
uploaded_keypair_set = set()
deleted_keypair_set = set()
created_server_group_dict = {}
deleted_server_group_set = set()
floating_network_id = None
def __init__(self, vm_spec):
"""Initialize an OpenStack virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm.
"""
super(OpenStackVirtualMachine, self).__init__(vm_spec)
self.key_name = 'perfkit_key_%s' % FLAGS.run_uri
self.user_name = FLAGS.openstack_image_username
self.image = self.image or self.DEFAULT_IMAGE
# FIXME(meteorfox): Remove --openstack_public_network and
# --openstack_private_network once depreciation time has expired
self.network_name = (FLAGS.openstack_network or
FLAGS.openstack_private_network)
self.floating_ip_pool_name = (FLAGS.openstack_floating_ip_pool or
FLAGS.openstack_public_network)
self.id = None
self.boot_volume_id = None
self.server_group_id = None
self.floating_ip = None
self.firewall = None
self.public_network = None
self.subnet_id = None
self.post_provisioning_script = FLAGS.openstack_post_provisioning_script
@property
def group_id(self):
"""Returns the security group ID of this VM."""
return 'perfkit_sc_group'
def _CreateDependencies(self):
"""Validate and Create dependencies prior creating the VM."""
self._CheckPrerequisites()
self.firewall = os_network.OpenStackFirewall.GetFirewall()
self.public_network = os_network.OpenStackFloatingIPPool(
OpenStackVirtualMachine.floating_network_id)
self._UploadSSHPublicKey()
source_range = self._GetInternalNetworkCIDR()
self.firewall.AllowPort(self, os_network.MIN_PORT, os_network.MAX_PORT,
source_range)
self.firewall.AllowICMP(self) # Allowing ICMP traffic (i.e. ping)
self.AllowRemoteAccessPorts()
def _Create(self):
"""Creates an OpenStack VM instance and waits until it is ACTIVE."""
if FLAGS.openstack_boot_from_volume:
vol_name = '%s_volume' % self.name
disk_resp = os_disk.CreateBootVolume(self, vol_name, self.image)
self.boot_volume_id = disk_resp['id']
os_disk.WaitForVolumeCreation(self, self.boot_volume_id)
self._CreateInstance()
@vm_util.Retry(max_retries=4, poll_interval=2)
def _PostCreate(self):
self._SetIPAddresses()
def _Delete(self):
if self.id is None:
return
self._DeleteInstance()
if self.floating_ip:
self.public_network.release(self, self.floating_ip)
if self.server_group_id:
self._DeleteServerGroup()
if self.boot_volume_id:
os_disk.DeleteVolume(self, self.boot_volume_id)
self.boot_volume_id = None
def _DeleteDependencies(self):
"""Delete dependencies that were needed for the VM after the VM has been
deleted."""
self._DeleteSSHPublicKey()
def _Exists(self):
if self.id is None:
return False
show_cmd = os_utils.OpenStackCLICommand(self, 'server', 'show', self.id)
stdout, _, _ = show_cmd.Issue(suppress_warning=True)
try:
resp = json.loads(stdout)
return resp
except ValueError:
return False
def _Suspend(self):
"""Suspends the vm."""
raise NotImplementedError()
def _Resume(self):
"""Resumes the VM."""
raise NotImplementedError()
def _CheckCanaryCommand(self):
if OpenStackVirtualMachine.command_works: # fast path
return
with self._lock:
if OpenStackVirtualMachine.command_works:
return
logging.info('Testing OpenStack CLI command is installed and working')
cmd = os_utils.OpenStackCLICommand(self, 'image', 'list')
stdout, stderr, _ = cmd.Issue()
if stderr:
raise errors.Config.InvalidValue(
'OpenStack CLI test command failed. Please make sure the OpenStack '
'CLI client is installed and properly configured')
OpenStackVirtualMachine.command_works = True
def _CheckPrerequisites(self):
"""Checks prerequisites are met otherwise aborts execution."""
self._CheckCanaryCommand()
if self.zone in self.validated_resources_set:
return # No need to check again
with self._lock:
if self.zone in self.validated_resources_set:
return
logging.info('Validating prerequisites.')
self._CheckImage()
self._CheckFlavor()
self._CheckNetworks()
self.validated_resources_set.add(self.zone)
logging.info('Prerequisites validated.')
def _CheckImage(self):
"""Tries to get image, if found continues execution otherwise aborts."""
cmd = os_utils.OpenStackCLICommand(self, 'image', 'show', self.image)
err_msg = VALIDATION_ERROR_MESSAGE.format('Image', self.image)
self._IssueCommandCheck(cmd, err_msg)
def _CheckFlavor(self):
"""Tries to get flavor, if found continues execution otherwise aborts."""
cmd = os_utils.OpenStackCLICommand(self, 'flavor', 'show',
self.machine_type)
err_msg = VALIDATION_ERROR_MESSAGE.format('Machine type', self.machine_type)
self._IssueCommandCheck(cmd, err_msg)
def _CheckNetworks(self):
"""Tries to get network, if found continues execution otherwise aborts."""
if not self.network_name:
if self.floating_ip_pool_name:
msg = ('Cannot associate floating-ip address from pool %s without '
'an internally routable network. Make sure '
'--openstack_network flag is set.')
else:
msg = ('Cannot build instance without a network. Make sure to set '
'either just --openstack_network or both '
'--openstack_network and --openstack_floating_ip_pool flags.')
raise errors.Error(msg)
self._CheckNetworkExists(self.network_name)
if self.floating_ip_pool_name:
floating_network_dict = self._CheckFloatingIPNetworkExists(
self.floating_ip_pool_name)
OpenStackVirtualMachine.floating_network_id = floating_network_dict['id']
def _CheckFloatingIPNetworkExists(self, floating_network_name_or_id):
network = self._CheckNetworkExists(floating_network_name_or_id)
if network['router:external'] not in ('External', True):
raise errors.Config.InvalidValue('Network "%s" is not External'
% self.floating_ip_pool_name)
return network
def _CheckNetworkExists(self, network_name_or_id):
cmd = os_utils.OpenStackCLICommand(self, 'network', 'show',
network_name_or_id)
err_msg = VALIDATION_ERROR_MESSAGE.format('Network', network_name_or_id)
stdout = self._IssueCommandCheck(cmd, err_msg)
network = json.loads(stdout)
return network
def _IssueCommandCheck(self, cmd, err_msg=None):
"""Issues command and, if stderr is non-empty, raises an error message
Args:
cmd: The command to be issued.
err_msg: string. Error message if command fails.
"""
if err_msg is None:
err_msg = ''
stdout, stderr, _ = cmd.Issue()
if stderr:
raise errors.Config.InvalidValue(err_msg)
return stdout
def _UploadSSHPublicKey(self):
"""Uploads SSH public key to the VM's region."""
with self._lock:
if self.zone in self.uploaded_keypair_set:
return
cmd = os_utils.OpenStackCLICommand(self, 'keypair', 'create',
self.key_name)
cmd.flags['public-key'] = self.ssh_public_key
cmd.IssueRetryable()
self.uploaded_keypair_set.add(self.zone)
if self.zone in self.deleted_keypair_set:
self.deleted_keypair_set.remove(self.zone)
def _DeleteSSHPublicKey(self):
"""Deletes SSH public key used for the VM."""
with self._lock:
if self.zone in self.deleted_keypair_set:
return
cmd = os_utils.OpenStackCLICommand(self, 'keypair', 'delete',
self.key_name)
del cmd.flags['format'] # keypair delete does not support json output
cmd.Issue()
self.deleted_keypair_set.add(self.zone)
if self.zone in self.uploaded_keypair_set:
self.uploaded_keypair_set.remove(self.zone)
def _CreateInstance(self):
"""Execute command for creating an OpenStack VM instance."""
create_cmd = self._GetCreateCommand()
stdout, stderr, _ = create_cmd.Issue()
if stderr:
raise errors.Error(stderr)
resp = json.loads(stdout)
self.id = resp['id']
def _GetCreateCommand(self):
cmd = os_utils.OpenStackCLICommand(self, 'server', 'create', self.name)
cmd.flags['flavor'] = self.machine_type
cmd.flags['security-group'] = self.group_id
cmd.flags['key-name'] = self.key_name
cmd.flags['availability-zone'] = self.zone
cmd.flags['nic'] = 'net-id=%s' % self.network_name
cmd.flags['wait'] = True
if FLAGS.openstack_config_drive:
cmd.flags['config-drive'] = 'True'
hints = self._GetSchedulerHints()
if hints:
cmd.flags['hint'] = hints
if FLAGS.openstack_boot_from_volume:
cmd.flags['volume'] = self.boot_volume_id
else:
cmd.flags['image'] = self.image
if self.post_provisioning_script:
cmd.flags['user-data'] = self.post_provisioning_script
return cmd
def _GetSchedulerHints(self):
if FLAGS.openstack_scheduler_policy == NONE:
return None
with self._lock:
group_name = 'perfkit_server_group_%s' % FLAGS.run_uri
hint_temp = 'group=%s'
if self.zone in self.created_server_group_dict:
hint = hint_temp % self.created_server_group_dict[self.zone]['id']
return hint
server_group = self._CreateServerGroup(group_name)
self.server_group_id = server_group['id']
self.created_server_group_dict[self.zone] = server_group
if self.zone in self.deleted_server_group_set:
self.deleted_server_group_set.remove(self.zone)
return hint_temp % server_group['id']
def _CreateServerGroup(self, group_name):
cmd = os_utils.OpenStackCLICommand(self, 'server group', 'create',
group_name)
cmd.flags['policy'] = FLAGS.openstack_scheduler_policy
stdout, stderr, _ = cmd.Issue()
if stderr:
raise errors.Error(stderr)
server_group = json.loads(stdout)
return server_group
def _DeleteServerGroup(self):
with self._lock:
if self.zone in self.deleted_server_group_set:
return
cmd = os_utils.OpenStackCLICommand(self, 'server group', 'delete',
self.server_group_id)
del cmd.flags['format'] # delete does not support json output
cmd.Issue()
self.deleted_server_group_set.add(self.zone)
if self.zone in self.created_server_group_dict:
del self.created_server_group_dict[self.zone]
def _DeleteInstance(self):
cmd = os_utils.OpenStackCLICommand(self, 'server', 'delete', self.id)
del cmd.flags['format'] # delete does not support json output
cmd.flags['wait'] = True
cmd.Issue(suppress_warning=True)
def _SetIPAddresses(self):
show_cmd = os_utils.OpenStackCLICommand(self, 'server', 'show', self.name)
stdout, _, _ = show_cmd.Issue()
server_dict = json.loads(stdout)
self.ip_address = self._GetNetworkIPAddress(server_dict, self.network_name)
self.internal_ip = self.ip_address
if self.floating_ip_pool_name:
self.floating_ip = self._AllocateFloatingIP()
self.internal_ip = self.ip_address
self.ip_address = self.floating_ip.floating_ip_address
def _GetNetworkIPAddress(self, server_dict, network_name):
addresses = server_dict['addresses'].split(',')
for address in addresses:
if network_name in address:
_, ip = address.split('=')
return ip
def _GetInternalNetworkCIDR(self):
"""Returns IP addresses source range of internal network."""
net_cmd = os_utils.OpenStackCLICommand(self, 'network', 'show',
self.network_name)
net_stdout, _, _ = net_cmd.Issue()
network = json.loads(net_stdout)
if isinstance(network['subnets'], list):
self.subnet_id = network['subnets'][0]
else:
self.subnet_id = network['subnets']
subnet_cmd = os_utils.OpenStackCLICommand(self, 'subnet', 'show',
self.subnet_id)
stdout, _, _ = subnet_cmd.Issue()
subnet_dict = json.loads(stdout)
return subnet_dict['cidr']
def _AllocateFloatingIP(self):
floating_ip = self.public_network.associate(self)
logging.info('floating-ip associated: {}'.format(
floating_ip.floating_ip_address))
return floating_ip
def CreateScratchDisk(self, disk_spec):
disks_names = ('%s_data_%d_%d'
% (self.name, len(self.scratch_disks), i)
for i in range(disk_spec.num_striped_disks))
disks = [os_disk.OpenStackDisk(disk_spec, name, self.zone)
for name in disks_names]
self._CreateScratchDiskFromDisks(disk_spec, disks)
def GetResourceMetadata(self):
"""Returns a dict containing metadata about the VM.
Returns:
dict mapping string property key to value.
"""
result = super(OpenStackVirtualMachine, self).GetResourceMetadata()
if self.post_provisioning_script:
result['post_provisioning_script'] = self.post_provisioning_script
return result
class Rhel7BasedOpenStackVirtualMachine(OpenStackVirtualMachine,
linux_virtual_machine.Rhel7Mixin):
DEFAULT_IMAGE = 'rhel-7.2'
class CentOs7BasedOpenStackVirtualMachine(OpenStackVirtualMachine,
linux_virtual_machine.CentOs7Mixin):
DEFAULT_IMAGE = 'centos7'
class ClearBasedOpenStackVirtualMachine(OpenStackVirtualMachine,
linux_virtual_machine.ClearMixin):
DEFAULT_IMAGE = 'upstream-clear'
| 36.543326
| 80
| 0.692771
| 14,237
| 0.912394
| 0
| 0
| 211
| 0.013522
| 0
| 0
| 3,713
| 0.237952
|
49594197e0bf3a8d4220d5dcfdcb6644bf95fa9a
| 5,933
|
py
|
Python
|
model.py
|
kevincho840430/CarND-Behavioral-Cloning-P3-master-1
|
042707a17c6dffb19717737b3f78169428bf31f6
|
[
"MIT"
] | null | null | null |
model.py
|
kevincho840430/CarND-Behavioral-Cloning-P3-master-1
|
042707a17c6dffb19717737b3f78169428bf31f6
|
[
"MIT"
] | null | null | null |
model.py
|
kevincho840430/CarND-Behavioral-Cloning-P3-master-1
|
042707a17c6dffb19717737b3f78169428bf31f6
|
[
"MIT"
] | null | null | null |
## Self-driven car project based on nvidia's CNN model
## Package
#torch = 0.4.1.post2
#torchvision = 0.2.1
#numpy = 1.15.2
#opencv-python =3.4.3
# -*- coding: utf-8 -*-
# Import the Stuff
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils import data
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import cv2
import numpy as np
import csv
# Step1: Read from the log file
samples = []
with open('../data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
next(reader, None)
for line in reader:
samples.append(line)
# Step2: Divide the data into training set and validation set
train_len = int(0.8*len(samples))
valid_len = len(samples) - train_len
train_samples, validation_samples = data.random_split(samples, lengths=[train_len, valid_len])
# Step3a: Define the augmentation, transformation processes, parameters and dataset for dataloader
def augment(imgName, angle):
name = '../data/IMG/' + imgName.split('/')[-1]
current_image = cv2.imread(name)
current_image = current_image[65:-25, :, :]
if np.random.rand() < 0.5:
current_image = cv2.flip(current_image, 1)
angle = angle * -1.0
return current_image, angle
class Dataset(data.Dataset):
def __init__(self, samples, transform=None):
self.samples = samples
self.transform = transform
def __getitem__(self, index):
batch_samples = self.samples[index]
steering_angle = float(batch_samples[3])
center_img, steering_angle_center = augment(batch_samples[0], steering_angle)
left_img, steering_angle_left = augment(batch_samples[1], steering_angle + 0.4)
right_img, steering_angle_right = augment(batch_samples[2], steering_angle - 0.4)
center_img = self.transform(center_img)
left_img = self.transform(left_img)
right_img = self.transform(right_img)
return (center_img, steering_angle_center), (left_img, steering_angle_left), (right_img, steering_angle_right)
def __len__(self):
return len(self.samples)
# Step3b: Creating generator using the dataloader to parallasize the process
transformations = transforms.Compose([transforms.Lambda(lambda x: (x / 255.0) - 0.5)])
params = {'batch_size': 32,
'shuffle': True,
'num_workers': 4}
training_set = Dataset(train_samples, transformations)
training_generator = data.DataLoader(training_set, **params)
validation_set = Dataset(validation_samples, transformations)
validation_generator = data.DataLoader(validation_set, **params)
# Step4: Define the network
class NetworkLight(nn.Module):
def __init__(self):
super(NetworkLight, self).__init__()
self.conv_layers = nn.Sequential(
nn.Conv2d(3, 24, 3, stride=2),
nn.ELU(),
nn.Conv2d(24, 48, 3, stride=2),
nn.MaxPool2d(4, stride=4),
nn.Dropout(p=0.25)
)
self.linear_layers = nn.Sequential(
nn.Linear(in_features=48*4*19, out_features=50),
nn.ELU(),
nn.Linear(in_features=50, out_features=10),
nn.Linear(in_features=10, out_features=1)
)
def forward(self, input):
input = input.view(input.size(0), 3, 70, 320)
output = self.conv_layers(input)
output = output.view(output.size(0), -1)
output = self.linear_layers(output)
return output
# Step5: Define optimizer
model = NetworkLight()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
criterion = nn.MSELoss()
# Step6: Check the device and define function to move tensors to that device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('device is: ', device)
def toDevice(datas, device):
imgs, angles = datas
return imgs.float().to(device), angles.float().to(device)
# Step7: Train and validate network based on maximum epochs defined
max_epochs = 22
for epoch in range(max_epochs):
model.to(device)
# Training
train_loss = 0
model.train()
for local_batch, (centers, lefts, rights) in enumerate(training_generator):
# Transfer to GPU
centers, lefts, rights = toDevice(centers, device), toDevice(lefts, device), toDevice(rights, device)
# Model computations
optimizer.zero_grad()
datas = [centers, lefts, rights]
for data in datas:
imgs, angles = data
# print("training image: ", imgs.shape)
outputs = model(imgs)
loss = criterion(outputs, angles.unsqueeze(1))
loss.backward()
optimizer.step()
train_loss += loss.data[0].item()
if local_batch % 100 == 0:
print('Loss: %.3f '
% (train_loss/(local_batch+1)))
# Validation
model.eval()
valid_loss = 0
with torch.set_grad_enabled(False):
for local_batch, (centers, lefts, rights) in enumerate(validation_generator):
# Transfer to GPU
centers, lefts, rights = toDevice(centers, device), toDevice(lefts, device), toDevice(rights, device)
# Model computations
optimizer.zero_grad()
datas = [centers, lefts, rights]
for data in datas:
imgs, angles = data
# print("Validation image: ", imgs.shape)
outputs = model(imgs)
loss = criterion(outputs, angles.unsqueeze(1))
valid_loss += loss.data[0].item()
if local_batch % 100 == 0:
print('Valid Loss: %.3f '
% (valid_loss/(local_batch+1)))
# Step8: Define state and save the model wrt to state
state = {
'model': model.module if device == 'cuda' else model,
}
torch.save(state, 'model.h5')
| 31.062827
| 118
| 0.632901
| 1,712
| 0.288556
| 0
| 0
| 0
| 0
| 0
| 0
| 1,054
| 0.17765
|